aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-04-28 00:01:34 -0400
committerSteve French <sfrench@us.ibm.com>2008-04-28 00:01:34 -0400
commit1dbbb6077426f8ce63d6a59c5ac6613e1689cbde (patch)
tree6141d4d7a8eb7c557705bdfa764137d4fd2e4924
parentd09e860cf07e7c9ee12920a09f5080e30a12a23a (diff)
parent064922a805ec7aadfafdd27aa6b4908d737c3c1d (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/i386/boot.txt26
-rw-r--r--Documentation/ia64/kvm.txt82
-rw-r--r--Documentation/ide/ide-tape.txt211
-rw-r--r--Documentation/ide/ide.txt107
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/mips/AU1xxx_IDE.README46
-rw-r--r--Documentation/powerpc/kvm_440.txt41
-rw-r--r--Documentation/s390/kvm.txt125
-rw-r--r--Documentation/smart-config.txt98
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/kvm/Kconfig49
-rw-r--r--arch/ia64/kvm/Makefile61
-rw-r--r--arch/ia64/kvm/asm-offsets.c251
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1806
-rw-r--r--arch/ia64/kvm/kvm_fw.c500
-rw-r--r--arch/ia64/kvm/kvm_minstate.h273
-rw-r--r--arch/ia64/kvm/lapic.h25
-rw-r--r--arch/ia64/kvm/misc.h93
-rw-r--r--arch/ia64/kvm/mmio.c341
-rw-r--r--arch/ia64/kvm/optvfault.S918
-rw-r--r--arch/ia64/kvm/process.c970
-rw-r--r--arch/ia64/kvm/trampoline.S1038
-rw-r--r--arch/ia64/kvm/vcpu.c2163
-rw-r--r--arch/ia64/kvm/vcpu.h740
-rw-r--r--arch/ia64/kvm/vmm.c66
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1424
-rw-r--r--arch/ia64/kvm/vti.h290
-rw-r--r--arch/ia64/kvm/vtlb.c636
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug3
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c28
-rw-r--r--arch/powerpc/kvm/44x_tlb.c224
-rw-r--r--arch/powerpc/kvm/44x_tlb.h91
-rw-r--r--arch/powerpc/kvm/Kconfig42
-rw-r--r--arch/powerpc/kvm/Makefile15
-rw-r--r--arch/powerpc/kvm/booke_guest.c615
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S436
-rw-r--r--arch/powerpc/kvm/emulate.c760
-rw-r--r--arch/powerpc/kvm/powerpc.c436
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/Kconfig46
-rw-r--r--arch/s390/kvm/Makefile14
-rw-r--r--arch/s390/kvm/diag.c67
-rw-r--r--arch/s390/kvm/gaccess.h274
-rw-r--r--arch/s390/kvm/intercept.c216
-rw-r--r--arch/s390/kvm/interrupt.c592
-rw-r--r--arch/s390/kvm/kvm-s390.c685
-rw-r--r--arch/s390/kvm/kvm-s390.h64
-rw-r--r--arch/s390/kvm/priv.c323
-rw-r--r--arch/s390/kvm/sie64a.S47
-rw-r--r--arch/s390/kvm/sigp.c288
-rw-r--r--arch/s390/mm/pgtable.c65
-rw-r--r--arch/um/Kconfig.x86_647
-rw-r--r--arch/um/os-Linux/helper.c1
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--arch/um/sys-x86_64/Makefile2
-rw-r--r--arch/x86/Kconfig26
-rw-r--r--arch/x86/Kconfig.cpu11
-rw-r--r--arch/x86/Kconfig.debug13
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/ia32/ia32_signal.c10
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/apic_32.c3
-rw-r--r--arch/x86/kernel/apic_64.c3
-rw-r--r--arch/x86/kernel/apm_32.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c18
-rw-r--r--arch/x86/kernel/cpu/nexgen.c59
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c14
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/e820_64.c35
-rw-r--r--arch/x86/kernel/genapic_64.c2
-rw-r--r--arch/x86/kernel/head64.c25
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/i8253.c6
-rw-r--r--arch/x86/kernel/io_apic_32.c2
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kdebugfs.c163
-rw-r--r--arch/x86/kernel/kvm.c248
-rw-r--r--arch/x86/kernel/kvmclock.c187
-rw-r--r--arch/x86/kernel/mfgpt_32.c3
-rw-r--r--arch/x86/kernel/mpparse.c39
-rw-r--r--arch/x86/kernel/pci-calgary_64.c1
-rw-r--r--arch/x86/kernel/process.c117
-rw-r--r--arch/x86/kernel/process_32.c118
-rw-r--r--arch/x86/kernel/process_64.c123
-rw-r--r--arch/x86/kernel/ptrace.c95
-rw-r--r--arch/x86/kernel/reboot.c13
-rw-r--r--arch/x86/kernel/setup_32.c10
-rw-r--r--arch/x86/kernel/setup_64.c36
-rw-r--r--arch/x86/kernel/signal_32.c35
-rw-r--r--arch/x86/kernel/signal_64.c30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/summit_32.c5
-rw-r--r--arch/x86/kernel/tlb_64.c4
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/traps_32.c2
-rw-r--r--arch/x86/kvm/Kconfig13
-rw-r--r--arch/x86/kvm/Makefile6
-rw-r--r--arch/x86/kvm/i8254.c611
-rw-r--r--arch/x86/kvm/i8254.h63
-rw-r--r--arch/x86/kvm/irq.c18
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/kvm_svm.h2
-rw-r--r--arch/x86/kvm/lapic.c35
-rw-r--r--arch/x86/kvm/mmu.c672
-rw-r--r--arch/x86/kvm/mmu.h6
-rw-r--r--arch/x86/kvm/paging_tmpl.h86
-rw-r--r--arch/x86/kvm/segment_descriptor.h29
-rw-r--r--arch/x86/kvm/svm.c352
-rw-r--r--arch/x86/kvm/svm.h3
-rw-r--r--arch/x86/kvm/tss.h59
-rw-r--r--arch/x86/kvm/vmx.c278
-rw-r--r--arch/x86/kvm/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c897
-rw-r--r--arch/x86/kvm/x86_emulate.c285
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/bitops_32.c70
-rw-r--r--arch/x86/lib/bitops_64.c175
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c10
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/mm/init_64.c38
-rw-r--r--arch/x86/mm/numa_64.c42
-rw-r--r--arch/x86/mm/pat.c33
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--block/bsg.c43
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/char/agp/amd-k7-agp.c3
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/drm/ati_pcigart.c7
-rw-r--r--drivers/char/drm/drm.h17
-rw-r--r--drivers/char/drm/drmP.h133
-rw-r--r--drivers/char/drm/drm_agpsupport.c2
-rw-r--r--drivers/char/drm/drm_drv.c60
-rw-r--r--drivers/char/drm/drm_fops.c41
-rw-r--r--drivers/char/drm/drm_irq.c381
-rw-r--r--drivers/char/drm/drm_proc.c61
-rw-r--r--drivers/char/drm/drm_stub.c138
-rw-r--r--drivers/char/drm/drm_sysfs.c46
-rw-r--r--drivers/char/drm/drm_vm.c22
-rw-r--r--drivers/char/drm/i810_dma.c4
-rw-r--r--drivers/char/drm/i830_dma.c4
-rw-r--r--drivers/char/drm/i915_dma.c160
-rw-r--r--drivers/char/drm/i915_drm.h45
-rw-r--r--drivers/char/drm/i915_drv.c8
-rw-r--r--drivers/char/drm/i915_drv.h103
-rw-r--r--drivers/char/drm/i915_irq.c605
-rw-r--r--drivers/char/drm/mga_drv.c7
-rw-r--r--drivers/char/drm/mga_drv.h6
-rw-r--r--drivers/char/drm/mga_irq.c69
-rw-r--r--drivers/char/drm/r128_drv.c7
-rw-r--r--drivers/char/drm/r128_drv.h9
-rw-r--r--drivers/char/drm/r128_irq.c55
-rw-r--r--drivers/char/drm/radeon_drv.c8
-rw-r--r--drivers/char/drm/radeon_drv.h19
-rw-r--r--drivers/char/drm/radeon_irq.c171
-rw-r--r--drivers/char/drm/via_drv.c6
-rw-r--r--drivers/char/drm/via_drv.h7
-rw-r--r--drivers/char/drm/via_irq.c81
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/bast-ide.c25
-rw-r--r--drivers/ide/arm/icside.c69
-rw-r--r--drivers/ide/arm/ide_arm.c20
-rw-r--r--drivers/ide/arm/palm_bk3710.c64
-rw-r--r--drivers/ide/arm/rapide.c11
-rw-r--r--drivers/ide/cris/ide-cris.c53
-rw-r--r--drivers/ide/h8300/ide-h8300.c10
-rw-r--r--drivers/ide/ide-acpi.c30
-rw-r--r--drivers/ide/ide-cd.c917
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c159
-rw-r--r--drivers/ide/ide-dma.c153
-rw-r--r--drivers/ide/ide-floppy.c34
-rw-r--r--drivers/ide/ide-generic.c36
-rw-r--r--drivers/ide/ide-io.c59
-rw-r--r--drivers/ide/ide-iops.c110
-rw-r--r--drivers/ide/ide-lib.c44
-rw-r--r--drivers/ide/ide-pnp.c45
-rw-r--r--drivers/ide/ide-probe.c285
-rw-r--r--drivers/ide/ide-proc.c169
-rw-r--r--drivers/ide/ide-scan-pci.c2
-rw-r--r--drivers/ide/ide-tape.c1204
-rw-r--r--drivers/ide/ide-taskfile.c48
-rw-r--r--drivers/ide/ide.c491
-rw-r--r--drivers/ide/legacy/ali14xx.c44
-rw-r--r--drivers/ide/legacy/buddha.c18
-rw-r--r--drivers/ide/legacy/dtc2278.c39
-rw-r--r--drivers/ide/legacy/falconide.c14
-rw-r--r--drivers/ide/legacy/gayle.c22
-rw-r--r--drivers/ide/legacy/hd.c78
-rw-r--r--drivers/ide/legacy/ht6560b.c57
-rw-r--r--drivers/ide/legacy/ide-4drives.c52
-rw-r--r--drivers/ide/legacy/ide-cs.c84
-rw-r--r--drivers/ide/legacy/ide_platform.c16
-rw-r--r--drivers/ide/legacy/macide.c8
-rw-r--r--drivers/ide/legacy/q40ide.c9
-rw-r--r--drivers/ide/legacy/qd65xx.c238
-rw-r--r--drivers/ide/legacy/qd65xx.h1
-rw-r--r--drivers/ide/legacy/umc8672.c92
-rw-r--r--drivers/ide/mips/au1xxx-ide.c130
-rw-r--r--drivers/ide/mips/swarm.c19
-rw-r--r--drivers/ide/pci/aec62xx.c39
-rw-r--r--drivers/ide/pci/alim15x3.c332
-rw-r--r--drivers/ide/pci/amd74xx.c19
-rw-r--r--drivers/ide/pci/atiixp.c29
-rw-r--r--drivers/ide/pci/cmd640.c294
-rw-r--r--drivers/ide/pci/cmd64x.c153
-rw-r--r--drivers/ide/pci/cs5520.c29
-rw-r--r--drivers/ide/pci/cs5530.c18
-rw-r--r--drivers/ide/pci/cs5535.c24
-rw-r--r--drivers/ide/pci/cy82c693.c97
-rw-r--r--drivers/ide/pci/delkin_cb.c20
-rw-r--r--drivers/ide/pci/generic.c10
-rw-r--r--drivers/ide/pci/hpt34x.c17
-rw-r--r--drivers/ide/pci/hpt366.c132
-rw-r--r--drivers/ide/pci/it8213.c34
-rw-r--r--drivers/ide/pci/it821x.c52
-rw-r--r--drivers/ide/pci/jmicron.c29
-rw-r--r--drivers/ide/pci/ns87415.c40
-rw-r--r--drivers/ide/pci/opti621.c82
-rw-r--r--drivers/ide/pci/pdc202xx_new.c23
-rw-r--r--drivers/ide/pci/pdc202xx_old.c126
-rw-r--r--drivers/ide/pci/piix.c17
-rw-r--r--drivers/ide/pci/rz1000.c2
-rw-r--r--drivers/ide/pci/sc1200.c39
-rw-r--r--drivers/ide/pci/scc_pata.c95
-rw-r--r--drivers/ide/pci/serverworks.c38
-rw-r--r--drivers/ide/pci/sgiioc4.c140
-rw-r--r--drivers/ide/pci/siimage.c142
-rw-r--r--drivers/ide/pci/sis5513.c253
-rw-r--r--drivers/ide/pci/sl82c105.c83
-rw-r--r--drivers/ide/pci/slc90e66.c22
-rw-r--r--drivers/ide/pci/tc86c001.c54
-rw-r--r--drivers/ide/pci/triflex.c12
-rw-r--r--drivers/ide/pci/trm290.c47
-rw-r--r--drivers/ide/pci/via82cxxx.c20
-rw-r--r--drivers/ide/ppc/mpc8xx.c70
-rw-r--r--drivers/ide/ppc/pmac.c183
-rw-r--r--drivers/ide/setup-pci.c226
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c75
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c23
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h33
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c20
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c125
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/input/joystick/xpad.c34
-rw-r--r--drivers/macintosh/mac_hid.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/mt312.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c888
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h90
-rw-r--r--drivers/media/video/au0828/Kconfig2
-rw-r--r--drivers/media/video/au0828/au0828-cards.c1
-rw-r--r--drivers/media/video/au0828/au0828-core.c26
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/video/au0828/au0828-i2c.c6
-rw-r--r--drivers/media/video/au0828/au0828.h8
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c4
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c6
-rw-r--r--drivers/media/video/cx88/cx88-cards.c1
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c32
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c21
-rw-r--r--drivers/media/video/pvrusb2/Kconfig1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c28
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h22
-rw-r--r--drivers/media/video/tuner-core.c92
-rw-r--r--drivers/media/video/tuner-xc2028.c2
-rw-r--r--drivers/media/video/vivi.c2
-rw-r--r--drivers/misc/enclosure.c100
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mlx4/alloc.c157
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/main.c3
-rw-r--r--drivers/net/mlx4/mlx4.h3
-rw-r--r--drivers/net/mlx4/qp.c31
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/kvm/Makefile9
-rw-r--r--drivers/s390/kvm/kvm_virtio.c338
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c39
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c114
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aha152x.c7
-rw-r--r--drivers/scsi/aha1542.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h23
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg115
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c835
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h859
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c181
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h177
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1145
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1555
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h55
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c16
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c676
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h616
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c95
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h142
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c73
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped233
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y105
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l19
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c25
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h1
-rw-r--r--drivers/scsi/eata.c11
-rw-r--r--drivers/scsi/esp_scsi.c35
-rw-r--r--drivers/scsi/esp_scsi.h13
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/ide-scsi.c19
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/mac_esp.c657
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c394
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c7
-rw-r--r--drivers/scsi/scsi_scan.c84
-rw-r--r--drivers/scsi/scsi_sysfs.c142
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_sas.c22
-rw-r--r--drivers/scsi/scsi_transport_spi.c33
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/u14-34f.c9
-rw-r--r--fs/9p/vfs_super.c7
-rw-r--r--fs/binfmt_elf.c23
-rw-r--r--fs/binfmt_misc.c18
-rw-r--r--fs/binfmt_som.c10
-rw-r--r--fs/cifs/cifs_dfs_ref.c29
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/exec.c28
-rw-r--r--fs/fcntl.c40
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/locks.c1
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/sysfs/group.c83
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--include/asm-alpha/bitops.h5
-rw-r--r--include/asm-arm/arch-sa1100/ide.h6
-rw-r--r--include/asm-cris/arch-v10/ide.h11
-rw-r--r--include/asm-generic/bitops/__fls.h43
-rw-r--r--include/asm-generic/bitops/find.h2
-rw-r--r--include/asm-generic/bitops/fls64.h22
-rw-r--r--include/asm-ia64/bitops.h16
-rw-r--r--include/asm-ia64/gcc_intrin.h12
-rw-r--r--include/asm-ia64/kvm.h205
-rw-r--r--include/asm-ia64/kvm_host.h524
-rw-r--r--include/asm-ia64/kvm_para.h29
-rw-r--r--include/asm-ia64/processor.h63
-rw-r--r--include/asm-mips/bitops.h5
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_ide.h42
-rw-r--r--include/asm-parisc/bitops.h1
-rw-r--r--include/asm-powerpc/bitops.h5
-rw-r--r--include/asm-powerpc/kvm.h53
-rw-r--r--include/asm-powerpc/kvm_asm.h55
-rw-r--r--include/asm-powerpc/kvm_host.h152
-rw-r--r--include/asm-powerpc/kvm_para.h37
-rw-r--r--include/asm-powerpc/kvm_ppc.h88
-rw-r--r--include/asm-powerpc/mmu-44x.h2
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/bitops.h1
-rw-r--r--include/asm-s390/kvm.h41
-rw-r--r--include/asm-s390/kvm_host.h234
-rw-r--r--include/asm-s390/kvm_para.h150
-rw-r--r--include/asm-s390/kvm_virtio.h53
-rw-r--r--include/asm-s390/lowcore.h15
-rw-r--r--include/asm-s390/mmu.h1
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-s390/pgtable.h93
-rw-r--r--include/asm-s390/setup.h1
-rw-r--r--include/asm-sh/bitops.h1
-rw-r--r--include/asm-sparc64/bitops.h1
-rw-r--r--include/asm-x86/bios_ebda.h2
-rw-r--r--include/asm-x86/bitops.h149
-rw-r--r--include/asm-x86/bitops_32.h166
-rw-r--r--include/asm-x86/bitops_64.h162
-rw-r--r--include/asm-x86/bootparam.h14
-rw-r--r--include/asm-x86/e820_64.h3
-rw-r--r--include/asm-x86/io_apic.h6
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h2
-rw-r--r--include/asm-x86/pgtable_32.h8
-rw-r--r--include/asm-x86/posix_types.h8
-rw-r--r--include/asm-x86/processor.h2
-rw-r--r--include/asm-x86/ptrace.h2
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/rio.h11
-rw-r--r--include/asm-x86/unistd.h8
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bitops.h140
-rw-r--r--include/linux/bsg.h14
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/file.h3
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/hdsmart.h126
-rw-r--r--include/linux/ide.h227
-rw-r--r--include/linux/kvm.h130
-rw-r--r--include/linux/kvm_host.h59
-rw-r--r--include/linux/kvm_para.h11
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/mlx4/device.h40
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sysfs.h4
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c60
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile1
-rw-r--r--lib/find_next_bit.c77
-rw-r--r--mm/bootmem.c164
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/sparse.c37
-rw-r--r--net/mac80211/mesh.h1
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/mod/modpost.c8
-rw-r--r--virt/kvm/kvm_main.c230
-rw-r--r--virt/kvm/kvm_trace.c276
487 files changed, 36118 insertions, 13580 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index a82a113b4a4b..1977fab38656 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -329,8 +329,6 @@ sgi-visws.txt
329 - short blurb on the SGI Visual Workstations. 329 - short blurb on the SGI Visual Workstations.
330sh/ 330sh/
331 - directory with info on porting Linux to a new architecture. 331 - directory with info on porting Linux to a new architecture.
332smart-config.txt
333 - description of the Smart Config makefile feature.
334sound/ 332sound/
335 - directory with info on sound card support. 333 - directory with info on sound card support.
336sparc/ 334sparc/
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index 2eb16100bb3f..0fac3465f2e3 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -42,6 +42,8 @@ Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable.
42Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of 42Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of
43 the boot command line 43 the boot command line
44 44
45Protocol 2.09: (kernel 2.6.26) Added a field of 64-bit physical
46 pointer to single linked list of struct setup_data.
45 47
46**** MEMORY LAYOUT 48**** MEMORY LAYOUT
47 49
@@ -172,6 +174,8 @@ Offset Proto Name Meaning
1720240/8 2.07+ hardware_subarch_data Subarchitecture-specific data 1740240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
1730248/4 2.08+ payload_offset Offset of kernel payload 1750248/4 2.08+ payload_offset Offset of kernel payload
174024C/4 2.08+ payload_length Length of kernel payload 176024C/4 2.08+ payload_length Length of kernel payload
1770250/8 2.09+ setup_data 64-bit physical pointer to linked list
178 of struct setup_data
175 179
176(1) For backwards compatibility, if the setup_sects field contains 0, the 180(1) For backwards compatibility, if the setup_sects field contains 0, the
177 real value is 4. 181 real value is 4.
@@ -572,6 +576,28 @@ command line is entered using the following protocol:
572 covered by setup_move_size, so you may need to adjust this 576 covered by setup_move_size, so you may need to adjust this
573 field. 577 field.
574 578
579Field name: setup_data
580Type: write (obligatory)
581Offset/size: 0x250/8
582Protocol: 2.09+
583
584 The 64-bit physical pointer to NULL terminated single linked list of
585 struct setup_data. This is used to define a more extensible boot
586 parameters passing mechanism. The definition of struct setup_data is
587 as follow:
588
589 struct setup_data {
590 u64 next;
591 u32 type;
592 u32 len;
593 u8 data[0];
594 };
595
596 Where, the next is a 64-bit physical pointer to the next node of
597 linked list, the next field of the last node is 0; the type is used
598 to identify the contents of data; the len is the length of data
599 field; the data holds the real payload.
600
575 601
576**** MEMORY LAYOUT OF THE REAL-MODE CODE 602**** MEMORY LAYOUT OF THE REAL-MODE CODE
577 603
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
new file mode 100644
index 000000000000..bec9d815da33
--- /dev/null
+++ b/Documentation/ia64/kvm.txt
@@ -0,0 +1,82 @@
1Currently, kvm module in EXPERIMENTAL stage on IA64. This means that
2interfaces are not stable enough to use. So, plase had better don't run
3critical applications in virtual machine. We will try our best to make it
4strong in future versions!
5 Guide: How to boot up guests on kvm/ia64
6
7This guide is to describe how to enable kvm support for IA-64 systems.
8
91. Get the kvm source from git.kernel.org.
10 Userspace source:
11 git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
12 Kernel Source:
13 git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
14
152. Compile the source code.
16 2.1 Compile userspace code:
17 (1)cd ./kvm-userspace
18 (2)./configure
19 (3)cd kernel
20 (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
21 (5)cd ..
22 (6)make qemu
23 (7)cd qemu; make install
24
25 2.2 Compile kernel source code:
26 (1) cd ./$kernel_dir
27 (2) Make menuconfig
28 (3) Enter into virtualization option, and choose kvm.
29 (4) make
30 (5) Once (4) done, make modules_install
31 (6) Make initrd, and use new kernel to reboot up host machine.
32 (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
33 (8) insmod kvm.ko; insmod kvm-intel.ko
34
35Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
36
373. Get Guest Firmware named as Flash.fd, and put it under right place:
38 (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
39
40 (2) If you have no firmware at hand, Please download its source from
41 hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
42 you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
43
44 (3) Rename the firware you owned to Flash.fd, and copy it to /usr/local/share/qemu
45
464. Boot up Linux or Windows guests:
47 4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
48
49 4.2 Boot up guests use the following command.
50 /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
51 (xx is the number of virtual processors for the guest, now the maximum value is 4)
52
535. Known possibile issue on some platforms with old Firmware.
54
55If meet strange host crashe issues, try to solve it through either of the following ways:
56
57(1): Upgrade your Firmware to the latest one.
58
59(2): Applying the below patch to kernel source.
60diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
61index 0b53344..f02b0f7 100644
62--- a/arch/ia64/kernel/pal.S
63+++ b/arch/ia64/kernel/pal.S
64@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
65 mov ar.pfs = loc1
66 mov rp = loc0
67 ;;
68- srlz.d // seralize restoration of psr.l
69+ srlz.i // seralize restoration of psr.l
70+ ;;
71 br.ret.sptk.many b0
72 END(ia64_pal_call_static)
73
746. Bug report:
75 If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
76 https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
77
78Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
79
80
81 Xiantao Zhang <xiantao.zhang@intel.com>
82 2008.3.10
diff --git a/Documentation/ide/ide-tape.txt b/Documentation/ide/ide-tape.txt
index 658f271a373f..3f348a0b21d8 100644
--- a/Documentation/ide/ide-tape.txt
+++ b/Documentation/ide/ide-tape.txt
@@ -1,146 +1,65 @@
1/* 1IDE ATAPI streaming tape driver.
2 * IDE ATAPI streaming tape driver. 2
3 * 3This driver is a part of the Linux ide driver.
4 * This driver is a part of the Linux ide driver. 4
5 * 5The driver, in co-operation with ide.c, basically traverses the
6 * The driver, in co-operation with ide.c, basically traverses the 6request-list for the block device interface. The character device
7 * request-list for the block device interface. The character device 7interface, on the other hand, creates new requests, adds them
8 * interface, on the other hand, creates new requests, adds them 8to the request-list of the block device, and waits for their completion.
9 * to the request-list of the block device, and waits for their completion. 9
10 * 10The block device major and minor numbers are determined from the
11 * Pipelined operation mode is now supported on both reads and writes. 11tape's relative position in the ide interfaces, as explained in ide.c.
12 * 12
13 * The block device major and minor numbers are determined from the 13The character device interface consists of the following devices:
14 * tape's relative position in the ide interfaces, as explained in ide.c. 14
15 * 15ht0 major 37, minor 0 first IDE tape, rewind on close.
16 * The character device interface consists of the following devices: 16ht1 major 37, minor 1 second IDE tape, rewind on close.
17 * 17...
18 * ht0 major 37, minor 0 first IDE tape, rewind on close. 18nht0 major 37, minor 128 first IDE tape, no rewind on close.
19 * ht1 major 37, minor 1 second IDE tape, rewind on close. 19nht1 major 37, minor 129 second IDE tape, no rewind on close.
20 * ... 20...
21 * nht0 major 37, minor 128 first IDE tape, no rewind on close. 21
22 * nht1 major 37, minor 129 second IDE tape, no rewind on close. 22The general magnetic tape commands compatible interface, as defined by
23 * ... 23include/linux/mtio.h, is accessible through the character device.
24 * 24
25 * The general magnetic tape commands compatible interface, as defined by 25General ide driver configuration options, such as the interrupt-unmask
26 * include/linux/mtio.h, is accessible through the character device. 26flag, can be configured by issuing an ioctl to the block device interface,
27 * 27as any other ide device.
28 * General ide driver configuration options, such as the interrupt-unmask 28
29 * flag, can be configured by issuing an ioctl to the block device interface, 29Our own ide-tape ioctl's can be issued to either the block device or
30 * as any other ide device. 30the character device interface.
31 * 31
32 * Our own ide-tape ioctl's can be issued to either the block device or 32Maximal throughput with minimal bus load will usually be achieved in the
33 * the character device interface. 33following scenario:
34 * 34
35 * Maximal throughput with minimal bus load will usually be achieved in the 35 1. ide-tape is operating in the pipelined operation mode.
36 * following scenario: 36 2. No buffering is performed by the user backup program.
37 * 37
38 * 1. ide-tape is operating in the pipelined operation mode. 38Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
39 * 2. No buffering is performed by the user backup program. 39
40 * 40Here are some words from the first releases of hd.c, which are quoted
41 * Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive. 41in ide.c and apply here as well:
42 * 42
43 * Here are some words from the first releases of hd.c, which are quoted 43| Special care is recommended. Have Fun!
44 * in ide.c and apply here as well: 44
45 * 45Possible improvements:
46 * | Special care is recommended. Have Fun! 46
47 * 471. Support for the ATAPI overlap protocol.
48 * 48
49 * An overview of the pipelined operation mode. 49In order to maximize bus throughput, we currently use the DSC
50 * 50overlap method which enables ide.c to service requests from the
51 * In the pipelined write mode, we will usually just add requests to our 51other device while the tape is busy executing a command. The
52 * pipeline and return immediately, before we even start to service them. The 52DSC overlap method involves polling the tape's status register
53 * user program will then have enough time to prepare the next request while 53for the DSC bit, and servicing the other device while the tape
54 * we are still busy servicing previous requests. In the pipelined read mode, 54isn't ready.
55 * the situation is similar - we add read-ahead requests into the pipeline, 55
56 * before the user even requested them. 56In the current QIC development standard (December 1995),
57 * 57it is recommended that new tape drives will *in addition*
58 * The pipeline can be viewed as a "safety net" which will be activated when 58implement the ATAPI overlap protocol, which is used for the
59 * the system load is high and prevents the user backup program from keeping up 59same purpose - efficient use of the IDE bus, but is interrupt
60 * with the current tape speed. At this point, the pipeline will get 60driven and thus has much less CPU overhead.
61 * shorter and shorter but the tape will still be streaming at the same speed. 61
62 * Assuming we have enough pipeline stages, the system load will hopefully 62ATAPI overlap is likely to be supported in most new ATAPI
63 * decrease before the pipeline is completely empty, and the backup program 63devices, including new ATAPI cdroms, and thus provides us
64 * will be able to "catch up" and refill the pipeline again. 64a method by which we can achieve higher throughput when
65 * 65sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
66 * When using the pipelined mode, it would be best to disable any type of
67 * buffering done by the user program, as ide-tape already provides all the
68 * benefits in the kernel, where it can be done in a more efficient way.
69 * As we will usually not block the user program on a request, the most
70 * efficient user code will then be a simple read-write-read-... cycle.
71 * Any additional logic will usually just slow down the backup process.
72 *
73 * Using the pipelined mode, I get a constant over 400 KBps throughput,
74 * which seems to be the maximum throughput supported by my tape.
75 *
76 * However, there are some downfalls:
77 *
78 * 1. We use memory (for data buffers) in proportional to the number
79 * of pipeline stages (each stage is about 26 KB with my tape).
80 * 2. In the pipelined write mode, we cheat and postpone error codes
81 * to the user task. In read mode, the actual tape position
82 * will be a bit further than the last requested block.
83 *
84 * Concerning (1):
85 *
86 * 1. We allocate stages dynamically only when we need them. When
87 * we don't need them, we don't consume additional memory. In
88 * case we can't allocate stages, we just manage without them
89 * (at the expense of decreased throughput) so when Linux is
90 * tight in memory, we will not pose additional difficulties.
91 *
92 * 2. The maximum number of stages (which is, in fact, the maximum
93 * amount of memory) which we allocate is limited by the compile
94 * time parameter IDETAPE_MAX_PIPELINE_STAGES.
95 *
96 * 3. The maximum number of stages is a controlled parameter - We
97 * don't start from the user defined maximum number of stages
98 * but from the lower IDETAPE_MIN_PIPELINE_STAGES (again, we
99 * will not even allocate this amount of stages if the user
100 * program can't handle the speed). We then implement a feedback
101 * loop which checks if the pipeline is empty, and if it is, we
102 * increase the maximum number of stages as necessary until we
103 * reach the optimum value which just manages to keep the tape
104 * busy with minimum allocated memory or until we reach
105 * IDETAPE_MAX_PIPELINE_STAGES.
106 *
107 * Concerning (2):
108 *
109 * In pipelined write mode, ide-tape can not return accurate error codes
110 * to the user program since we usually just add the request to the
111 * pipeline without waiting for it to be serviced. In case an error
112 * occurs, I will report it on the next user request.
113 *
114 * In the pipelined read mode, subsequent read requests or forward
115 * filemark spacing will perform correctly, as we preserve all blocks
116 * and filemarks which we encountered during our excess read-ahead.
117 *
118 * For accurate tape positioning and error reporting, disabling
119 * pipelined mode might be the best option.
120 *
121 * You can enable/disable/tune the pipelined operation mode by adjusting
122 * the compile time parameters below.
123 *
124 *
125 * Possible improvements.
126 *
127 * 1. Support for the ATAPI overlap protocol.
128 *
129 * In order to maximize bus throughput, we currently use the DSC
130 * overlap method which enables ide.c to service requests from the
131 * other device while the tape is busy executing a command. The
132 * DSC overlap method involves polling the tape's status register
133 * for the DSC bit, and servicing the other device while the tape
134 * isn't ready.
135 *
136 * In the current QIC development standard (December 1995),
137 * it is recommended that new tape drives will *in addition*
138 * implement the ATAPI overlap protocol, which is used for the
139 * same purpose - efficient use of the IDE bus, but is interrupt
140 * driven and thus has much less CPU overhead.
141 *
142 * ATAPI overlap is likely to be supported in most new ATAPI
143 * devices, including new ATAPI cdroms, and thus provides us
144 * a method by which we can achieve higher throughput when
145 * sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
146 */
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index 486c699f4aea..0c78f4b1d9d9 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -82,27 +82,26 @@ Drives are normally found by auto-probing and/or examining the CMOS/BIOS data.
82For really weird situations, the apparent (fdisk) geometry can also be specified 82For really weird situations, the apparent (fdisk) geometry can also be specified
83on the kernel "command line" using LILO. The format of such lines is: 83on the kernel "command line" using LILO. The format of such lines is:
84 84
85 hdx=cyls,heads,sects 85 ide_core.chs=[interface_number.device_number]:cyls,heads,sects
86or hdx=cdrom 86or ide_core.cdrom=[interface_number.device_number]
87 87
88where hdx can be any of hda through hdh, Three values are required 88For example:
89(cyls,heads,sects). For example:
90 89
91 hdc=1050,32,64 hdd=cdrom 90 ide_core.chs=1.0:1050,32,64 ide_core.cdrom=1.1
92 91
93either {hda,hdb} or {hdc,hdd}. The results of successful auto-probing may 92The results of successful auto-probing may override the physical geometry/irq
94override the physical geometry/irq specified, though the "original" geometry 93specified, though the "original" geometry may be retained as the "logical"
95may be retained as the "logical" geometry for partitioning purposes (fdisk). 94geometry for partitioning purposes (fdisk).
96 95
97If the auto-probing during boot time confuses a drive (ie. the drive works 96If the auto-probing during boot time confuses a drive (ie. the drive works
98with hd.c but not with ide.c), then an command line option may be specified 97with hd.c but not with ide.c), then an command line option may be specified
99for each drive for which you'd like the drive to skip the hardware 98for each drive for which you'd like the drive to skip the hardware
100probe/identification sequence. For example: 99probe/identification sequence. For example:
101 100
102 hdb=noprobe 101 ide_core.noprobe=0.1
103or 102or
104 hdc=768,16,32 103 ide_core.chs=1.0:768,16,32
105 hdc=noprobe 104 ide_core.noprobe=1.0
106 105
107Note that when only one IDE device is attached to an interface, it should be 106Note that when only one IDE device is attached to an interface, it should be
108jumpered as "single" or "master", *not* "slave". Many folks have had 107jumpered as "single" or "master", *not* "slave". Many folks have had
@@ -118,9 +117,9 @@ If for some reason your cdrom drive is *not* found at boot time, you can force
118the probe to look harder by supplying a kernel command line parameter 117the probe to look harder by supplying a kernel command line parameter
119via LILO, such as: 118via LILO, such as:
120 119
121 hdc=cdrom /* hdc = "master" on second interface */ 120 ide_core.cdrom=1.0 /* "master" on second interface (hdc) */
122or 121or
123 hdd=cdrom /* hdd = "slave" on second interface */ 122 ide_core.cdrom=1.1 /* "slave" on second interface (hdd) */
124 123
125For example, a GW2000 system might have a hard drive on the primary 124For example, a GW2000 system might have a hard drive on the primary
126interface (/dev/hda) and an IDE cdrom drive on the secondary interface 125interface (/dev/hda) and an IDE cdrom drive on the secondary interface
@@ -174,9 +173,7 @@ to /etc/modprobe.conf.
174 173
175When ide.c is used as a module, you can pass command line parameters to the 174When ide.c is used as a module, you can pass command line parameters to the
176driver using the "options=" keyword to insmod, while replacing any ',' with 175driver using the "options=" keyword to insmod, while replacing any ',' with
177';'. For example: 176';'.
178
179 insmod ide.o options="hda=nodma hdb=nodma"
180 177
181 178
182================================================================================ 179================================================================================
@@ -184,57 +181,6 @@ driver using the "options=" keyword to insmod, while replacing any ',' with
184Summary of ide driver parameters for kernel command line 181Summary of ide driver parameters for kernel command line
185-------------------------------------------------------- 182--------------------------------------------------------
186 183
187 "hdx=" is recognized for all "x" from "a" to "u", such as "hdc".
188
189 "idex=" is recognized for all "x" from "0" to "9", such as "ide1".
190
191 "hdx=noprobe" : drive may be present, but do not probe for it
192
193 "hdx=none" : drive is NOT present, ignore cmos and do not probe
194
195 "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
196
197 "hdx=cdrom" : drive is present, and is a cdrom drive
198
199 "hdx=cyl,head,sect" : disk drive is present, with specified geometry
200
201 "hdx=autotune" : driver will attempt to tune interface speed
202 to the fastest PIO mode supported,
203 if possible for this drive only.
204 Not fully supported by all chipset types,
205 and quite likely to cause trouble with
206 older/odd IDE drives.
207
208 "hdx=nodma" : disallow DMA
209
210 "idebus=xx" : inform IDE driver of VESA/PCI bus speed in MHz,
211 where "xx" is between 20 and 66 inclusive,
212 used when tuning chipset PIO modes.
213 For PCI bus, 25 is correct for a P75 system,
214 30 is correct for P90,P120,P180 systems,
215 and 33 is used for P100,P133,P166 systems.
216 If in doubt, use idebus=33 for PCI.
217 As for VLB, it is safest to not specify it.
218 Bigger values are safer than smaller ones.
219
220 "idex=serialize" : do not overlap operations on idex. Please note
221 that you will have to specify this option for
222 both the respective primary and secondary channel
223 to take effect.
224
225 "idex=reset" : reset interface after probe
226
227 "idex=ata66" : informs the interface that it has an 80c cable
228 for chipsets that are ATA-66 capable, but the
229 ability to bit test for detection is currently
230 unknown.
231
232 "ide=doubler" : probe/support IDE doublers on Amiga
233
234There may be more options than shown -- use the source, Luke!
235
236Everything else is rejected with a "BAD OPTION" message.
237
238For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672) 184For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672)
239you need to explicitly enable probing by using "probe" kernel parameter, 185you need to explicitly enable probing by using "probe" kernel parameter,
240i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use: 186i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
@@ -251,6 +197,33 @@ are detected automatically).
251You also need to use "probe" kernel parameter for ide-4drives driver 197You also need to use "probe" kernel parameter for ide-4drives driver
252(support for IDE generic chipset with four drives on one port). 198(support for IDE generic chipset with four drives on one port).
253 199
200To enable support for IDE doublers on Amiga use "doubler" kernel parameter
201for gayle host driver (i.e. "gayle.doubler" if the driver is built-in).
202
203To force ignoring cable detection (this should be needed only if you're using
204short 40-wires cable which cannot be automatically detected - if this is not
205a case please report it as a bug instead) use "ignore_cable" kernel parameter:
206
207* "ide_core.ignore_cable=[interface_number]" boot option if IDE is built-in
208 (i.e. "ide_core.ignore_cable=1" to force ignoring cable for "ide1")
209
210* "ignore_cable=[interface_number]" module parameter (for ide_core module)
211 if IDE is compiled as module
212
213Other kernel parameters for ide_core are:
214
215* "nodma=[interface_number.device_number]" to disallow DMA for a device
216
217* "noflush=[interface_number.device_number]" to disable flush requests
218
219* "noprobe=[interface_number.device_number]" to skip probing
220
221* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit
222
223* "cdrom=[interface_number.device_number]" to force device as a CD-ROM
224
225* "chs=[interface_number.device_number]" to force device as a disk (using CHS)
226
254================================================================================ 227================================================================================
255 228
256Some Terminology 229Some Terminology
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index c18363bd8d11..240ce7a56c40 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -183,6 +183,8 @@ Code Seq# Include File Comments
1830xAC 00-1F linux/raw.h 1830xAC 00-1F linux/raw.h
1840xAD 00 Netfilter device in development: 1840xAD 00 Netfilter device in development:
185 <mailto:rusty@rustcorp.com.au> 185 <mailto:rusty@rustcorp.com.au>
1860xAE all linux/kvm.h Kernel-based Virtual Machine
187 <mailto:kvm-devel@lists.sourceforge.net>
1860xB0 all RATIO devices in development: 1880xB0 all RATIO devices in development:
187 <mailto:vgo@ratio.de> 189 <mailto:vgo@ratio.de>
1880xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> 1900xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bf6303ec0bde..e5f3d918316f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -772,10 +772,6 @@ and is between 256 and 4096 characters. It is defined in the file
772 Format: ide=nodma or ide=doubler 772 Format: ide=nodma or ide=doubler
773 See Documentation/ide/ide.txt. 773 See Documentation/ide/ide.txt.
774 774
775 ide?= [HW] (E)IDE subsystem
776 Format: ide?=ata66 or chipset specific parameters.
777 See Documentation/ide/ide.txt.
778
779 idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed 775 idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
780 See Documentation/ide/ide.txt. 776 See Documentation/ide/ide.txt.
781 777
diff --git a/Documentation/mips/AU1xxx_IDE.README b/Documentation/mips/AU1xxx_IDE.README
index 5c8334123f4f..25a6ed1aaa5b 100644
--- a/Documentation/mips/AU1xxx_IDE.README
+++ b/Documentation/mips/AU1xxx_IDE.README
@@ -46,8 +46,6 @@ Two files are introduced:
46 46
47 a) 'include/asm-mips/mach-au1x00/au1xxx_ide.h' 47 a) 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
48 containes : struct _auide_hwif 48 containes : struct _auide_hwif
49 struct drive_list_entry dma_white_list
50 struct drive_list_entry dma_black_list
51 timing parameters for PIO mode 0/1/2/3/4 49 timing parameters for PIO mode 0/1/2/3/4
52 timing parameters for MWDMA 0/1/2 50 timing parameters for MWDMA 0/1/2
53 51
@@ -63,12 +61,6 @@ Four configs variables are introduced:
63 CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ - maximum transfer size 61 CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ - maximum transfer size
64 per descriptor 62 per descriptor
65 63
66If MWDMA is enabled and the connected hard disc is not on the white list, the
67kernel switches to a "safe mwdma mode" at boot time. In this mode the IDE
68performance is substantial slower then in full speed mwdma. In this case
69please add your hard disc to the white list (follow instruction from 'ADD NEW
70HARD DISC TO WHITE OR BLACK LIST' section).
71
72 64
73SUPPORTED IDE MODES 65SUPPORTED IDE MODES
74------------------- 66-------------------
@@ -120,44 +112,6 @@ CONFIG_IDEDMA_AUTO=y
120Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to 112Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to
121disable the burst support on DBDMA controller. 113disable the burst support on DBDMA controller.
122 114
123ADD NEW HARD DISC TO WHITE OR BLACK LIST
124----------------------------------------
125
126Step 1 : detect the model name of your hard disc
127
128 a) connect your hard disc to the AU1XXX
129
130 b) boot your kernel and get the hard disc model.
131
132 Example boot log:
133
134 --snipped--
135 Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
136 ide: Assuming 50MHz system bus speed for PIO modes; override with idebus=xx
137 Au1xxx IDE(builtin) configured for MWDMA2
138 Probing IDE interface ide0...
139 hda: Maxtor 6E040L0, ATA DISK drive
140 ide0 at 0xac800000-0xac800007,0xac8001c0 on irq 64
141 hda: max request size: 64KiB
142 hda: 80293248 sectors (41110 MB) w/2048KiB Cache, CHS=65535/16/63, (U)DMA
143 --snipped--
144
145 In this example 'Maxtor 6E040L0'.
146
147Step 2 : edit 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
148
149 Add your hard disc to the dma_white_list or dma_black_list structur.
150
151Step 3 : Recompile the kernel
152
153 Enable MWDMA support in the kernel configuration. Recompile the kernel and
154 reboot.
155
156Step 4 : Tests
157
158 If you have add a hard disc to the white list, please run some stress tests
159 for verification.
160
161 115
162ACKNOWLEDGMENTS 116ACKNOWLEDGMENTS
163--------------- 117---------------
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
new file mode 100644
index 000000000000..c02a003fa03a
--- /dev/null
+++ b/Documentation/powerpc/kvm_440.txt
@@ -0,0 +1,41 @@
1Hollis Blanchard <hollisb@us.ibm.com>
215 Apr 2008
3
4Various notes on the implementation of KVM for PowerPC 440:
5
6To enforce isolation, host userspace, guest kernel, and guest userspace all
7run at user privilege level. Only the host kernel runs in supervisor mode.
8Executing privileged instructions in the guest traps into KVM (in the host
9kernel), where we decode and emulate them. Through this technique, unmodified
10440 Linux kernels can be run (slowly) as guests. Future performance work will
11focus on reducing the overhead and frequency of these traps.
12
13The usual code flow is started from userspace invoking an "run" ioctl, which
14causes KVM to switch into guest context. We use IVPR to hijack the host
15interrupt vectors while running the guest, which allows us to direct all
16interrupts to kvmppc_handle_interrupt(). At this point, we could either
17- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
18- let the host interrupt handler run (e.g. when the decrementer fires), or
19- return to host userspace (e.g. when the guest performs device MMIO)
20
21Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
22address space (in host or guest), which gives us virtual address space to use
23for guest mappings. While the guest is running, the host kernel remains mapped
24in AS=0, but the guest can only use AS=1 mappings.
25
26TLB entries: The TLB entries covering the host linear mapping remain
27present while running the guest. This reduces the overhead of lightweight
28exits, which are handled by KVM running in the host kernel. We keep three
29copies of the TLB:
30 - guest TLB: contents of the TLB as the guest sees it
31 - shadow TLB: the TLB that is actually in hardware while guest is running
32 - host TLB: to restore TLB state when context switching guest -> host
33When a TLB miss occurs because a mapping was not present in the shadow TLB,
34but was present in the guest TLB, KVM handles the fault without invoking the
35guest. Large guest pages are backed by multiple 4KB shadow pages through this
36mechanism.
37
38IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
39and block IO, so those drivers must be enabled in the guest. It's possible
40that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
41little effort.
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
new file mode 100644
index 000000000000..6f5ceb0f09fc
--- /dev/null
+++ b/Documentation/s390/kvm.txt
@@ -0,0 +1,125 @@
1*** BIG FAT WARNING ***
2The kvm module is currently in EXPERIMENTAL state for s390. This means that
3the interface to the module is not yet considered to remain stable. Thus, be
4prepared that we keep breaking your userspace application and guest
5compatibility over and over again until we feel happy with the result. Make sure
6your guest kernel, your host kernel, and your userspace launcher are in a
7consistent state.
8
9This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
10kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
11
121. ioctl calls to /dev/kvm
13KVM does support the following ioctls on s390 that are common with other
14architectures and do behave the same:
15KVM_GET_API_VERSION
16KVM_CREATE_VM (*) see note
17KVM_CHECK_EXTENSION
18KVM_GET_VCPU_MMAP_SIZE
19
20Notes:
21* KVM_CREATE_VM may fail on s390, if the calling process has multiple
22threads and has not called KVM_S390_ENABLE_SIE before.
23
24In addition, on s390 the following architecture specific ioctls are supported:
25ioctl: KVM_S390_ENABLE_SIE
26args: none
27see also: include/linux/kvm.h
28This call causes the kernel to switch on PGSTE in the user page table. This
29operation is needed in order to run a virtual machine, and it requires the
30calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
31will implicitly try to switch on PGSTE if the user process has not called
32KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
33before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
34observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
35operation, is not reversible, and will persist over the entire lifetime of
36the calling process. It does not have any user-visible effect other than a small
37performance penalty.
38
392. ioctl calls to the kvm-vm file descriptor
40KVM does support the following ioctls on s390 that are common with other
41architectures and do behave the same:
42KVM_CREATE_VCPU
43KVM_SET_USER_MEMORY_REGION (*) see note
44KVM_GET_DIRTY_LOG (**) see note
45
46Notes:
47* kvm does only allow exactly one memory slot on s390, which has to start
48 at guest absolute address zero and at a user address that is aligned on any
49 page boundary. This hardware "limitation" allows us to have a few unique
50 optimizations. The memory slot doesn't have to be filled
51 with memory actually, it may contain sparse holes. That said, with different
52 user memory layout this does still allow a large flexibility when
53 doing the guest memory setup.
54** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
55log. This ioctl call is only needed for guest migration, and we intend to
56implement this one in the future.
57
58In addition, on s390 the following architecture specific ioctls for the kvm-vm
59file descriptor are supported:
60ioctl: KVM_S390_INTERRUPT
61args: struct kvm_s390_interrupt *
62see also: include/linux/kvm.h
63This ioctl is used to submit a floating interrupt for a virtual machine.
64Floating interrupts may be delivered to any virtual cpu in the configuration.
65Only some interrupt types defined in include/linux/kvm.h make sense when
66submitted as floating interrupts. The following interrupts are not considered
67to be useful as floating interrupts, and a call to inject them will result in
68-EINVAL error code: program interrupts and interprocessor signals. Valid
69floating interrupts are:
70KVM_S390_INT_VIRTIO
71KVM_S390_INT_SERVICE
72
733. ioctl calls to the kvm-vcpu file descriptor
74KVM does support the following ioctls on s390 that are common with other
75architectures and do behave the same:
76KVM_RUN
77KVM_GET_REGS
78KVM_SET_REGS
79KVM_GET_SREGS
80KVM_SET_SREGS
81KVM_GET_FPU
82KVM_SET_FPU
83
84In addition, on s390 the following architecture specific ioctls for the
85kvm-vcpu file descriptor are supported:
86ioctl: KVM_S390_INTERRUPT
87args: struct kvm_s390_interrupt *
88see also: include/linux/kvm.h
89This ioctl is used to submit an interrupt for a specific virtual cpu.
90Only some interrupt types defined in include/linux/kvm.h make sense when
91submitted for a specific cpu. The following interrupts are not considered
92to be useful, and a call to inject them will result in -EINVAL error code:
93service processor calls and virtio interrupts. Valid interrupt types are:
94KVM_S390_PROGRAM_INT
95KVM_S390_SIGP_STOP
96KVM_S390_RESTART
97KVM_S390_SIGP_SET_PREFIX
98KVM_S390_INT_EMERGENCY
99
100ioctl: KVM_S390_STORE_STATUS
101args: unsigned long
102see also: include/linux/kvm.h
103This ioctl stores the state of the cpu at the guest real address given as
104argument, unless one of the following values defined in include/linux/kvm.h
105is given as arguement:
106KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
107absolute lowcore as defined by the principles of operation
108KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
109its prefix page just like the dump tool that comes with zipl. This is useful
110to create a system dump for use with lkcdutils or crash.
111
112ioctl: KVM_S390_SET_INITIAL_PSW
113args: struct kvm_s390_psw *
114see also: include/linux/kvm.h
115This ioctl can be used to set the processor status word (psw) of a stopped cpu
116prior to running it with KVM_RUN. Note that this call is not required to modify
117the psw during sie intercepts that fall back to userspace because struct kvm_run
118does contain the psw, and this value is evaluated during reentry of KVM_RUN
119after the intercept exit was recognized.
120
121ioctl: KVM_S390_INITIAL_RESET
122args: none
123see also: include/linux/kvm.h
124This ioctl can be used to perform an initial cpu reset as defined by the
125principles of operation. The target cpu has to be in stopped state.
diff --git a/Documentation/smart-config.txt b/Documentation/smart-config.txt
deleted file mode 100644
index 8467447b5a87..000000000000
--- a/Documentation/smart-config.txt
+++ /dev/null
@@ -1,98 +0,0 @@
1Smart CONFIG_* Dependencies
21 August 1999
3
4Michael Chastain <mec@shout.net>
5Werner Almesberger <almesber@lrc.di.epfl.ch>
6Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>
7
8Here is the problem:
9
10 Suppose that drivers/net/foo.c has the following lines:
11
12 #include <linux/config.h>
13
14 ...
15
16 #ifdef CONFIG_FOO_AUTOFROB
17 /* Code for auto-frobbing */
18 #else
19 /* Manual frobbing only */
20 #endif
21
22 ...
23
24 #ifdef CONFIG_FOO_MODEL_TWO
25 /* Code for model two */
26 #endif
27
28 Now suppose the user (the person building kernels) reconfigures the
29 kernel to change some unrelated setting. This will regenerate the
30 file include/linux/autoconf.h, which will cause include/linux/config.h
31 to be out of date, which will cause drivers/net/foo.c to be recompiled.
32
33 Most kernel sources, perhaps 80% of them, have at least one CONFIG_*
34 dependency somewhere. So changing _any_ CONFIG_* setting requires
35 almost _all_ of the kernel to be recompiled.
36
37Here is the solution:
38
39 We've made the dependency generator, mkdep.c, smarter. Instead of
40 generating this dependency:
41
42 drivers/net/foo.c: include/linux/config.h
43
44 It now generates these dependencies:
45
46 drivers/net/foo.c: \
47 include/config/foo/autofrob.h \
48 include/config/foo/model/two.h
49
50 So drivers/net/foo.c depends only on the CONFIG_* lines that
51 it actually uses.
52
53 A new program, split-include.c, runs at the beginning of
54 compilation (make bzImage or make zImage). split-include reads
55 include/linux/autoconf.h and updates the include/config/ tree,
56 writing one file per option. It updates only the files for options
57 that have changed.
58
59Flag Dependencies
60
61 Martin Von Loewis contributed another feature to this patch:
62 'flag dependencies'. The idea is that a .o file depends on
63 the compilation flags used to build it. The file foo.o has
64 its flags stored in .flags.foo.o.
65
66 Suppose the user changes the foo driver from resident to modular.
67 'make' will notice that the current foo.o was not compiled with
68 -DMODULE and will recompile foo.c.
69
70 All .o files made from C source have flag dependencies. So do .o
71 files made with ld, and .a files made with ar. However, .o files
72 made from assembly source do not have flag dependencies (nobody
73 needs this yet, but it would be good to fix).
74
75Per-source-file Flags
76
77 Flag dependencies also work with per-source-file flags.
78 You can specify compilation flags for individual source files
79 like this:
80
81 CFLAGS_foo.o = -DSPECIAL_FOO_DEFINE
82
83 This helps clean up drivers/net/Makefile, drivers/scsi/Makefile,
84 and several other Makefiles.
85
86Credit
87
88 Werner Almesberger had the original idea and wrote the first
89 version of this patch.
90
91 Michael Chastain picked it up and continued development. He is
92 now the principal author and maintainer. Please report any bugs
93 to him.
94
95 Martin von Loewis wrote flag dependencies, with some modifications
96 by Michael Chastain.
97
98 Thanks to all of the beta testers.
diff --git a/MAINTAINERS b/MAINTAINERS
index a942f3852499..c1dd1ae7b133 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2329,6 +2329,13 @@ L: kvm-devel@lists.sourceforge.net
2329W: kvm.sourceforge.net 2329W: kvm.sourceforge.net
2330S: Supported 2330S: Supported
2331 2331
2332KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
2333P: Hollis Blanchard
2334M: hollisb@us.ibm.com
2335L: kvm-ppc-devel@lists.sourceforge.net
2336W: kvm.sourceforge.net
2337S: Supported
2338
2332KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64) 2339KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
2333P: Anthony Xu 2340P: Anthony Xu
2334M: anthony.xu@intel.com 2341M: anthony.xu@intel.com
@@ -2338,6 +2345,16 @@ L: kvm-ia64-devel@lists.sourceforge.net
2338W: kvm.sourceforge.net 2345W: kvm.sourceforge.net
2339S: Supported 2346S: Supported
2340 2347
2348KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
2349P: Carsten Otte
2350M: cotte@de.ibm.com
2351P: Christian Borntraeger
2352M: borntraeger@de.ibm.com
2353M: linux390@de.ibm.com
2354L: linux-s390@vger.kernel.org
2355W: http://www.ibm.com/developerworks/linux/linux390/
2356S: Supported
2357
2341KEXEC 2358KEXEC
2342P: Eric Biederman 2359P: Eric Biederman
2343M: ebiederm@xmission.com 2360M: ebiederm@xmission.com
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cd13e138bd03..3aa6c821449a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -19,6 +19,7 @@ config IA64
19 select HAVE_OPROFILE 19 select HAVE_OPROFILE
20 select HAVE_KPROBES 20 select HAVE_KPROBES
21 select HAVE_KRETPROBES 21 select HAVE_KRETPROBES
22 select HAVE_KVM
22 default y 23 default y
23 help 24 help
24 The Itanium Processor Family is Intel's 64-bit successor to 25 The Itanium Processor Family is Intel's 64-bit successor to
@@ -589,6 +590,8 @@ config MSPEC
589 590
590source "fs/Kconfig" 591source "fs/Kconfig"
591 592
593source "arch/ia64/kvm/Kconfig"
594
592source "lib/Kconfig" 595source "lib/Kconfig"
593 596
594# 597#
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index f1645c4f7039..ec4cca477f49 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
60core-$(CONFIG_KVM) += arch/ia64/kvm/
60 61
61drivers-$(CONFIG_PCI) += arch/ia64/pci/ 62drivers-$(CONFIG_PCI) += arch/ia64/pci/
62drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ 63drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
new file mode 100644
index 000000000000..7914e4828504
--- /dev/null
+++ b/arch/ia64/kvm/Kconfig
@@ -0,0 +1,49 @@
1#
2# KVM configuration
3#
4config HAVE_KVM
5 bool
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 depends on HAVE_KVM || IA64
10 default y
11 ---help---
12 Say Y here to get to see options for using your Linux host to run other
13 operating systems inside virtual machines (guests).
14 This option alone does not add any kernel code.
15
16 If you say N, all options in this submenu will be skipped and disabled.
17
18if VIRTUALIZATION
19
20config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM && EXPERIMENTAL
23 select PREEMPT_NOTIFIERS
24 select ANON_INODES
25 ---help---
26 Support hosting fully virtualized guest machines using hardware
27 virtualization extensions. You will need a fairly recent
28 processor equipped with virtualization extensions. You will also
29 need to select one or more of the processor modules below.
30
31 This module provides access to the hardware capabilities through
32 a character device node named /dev/kvm.
33
34 To compile this as a module, choose M here: the module
35 will be called kvm.
36
37 If unsure, say N.
38
39config KVM_INTEL
40 tristate "KVM for Intel Itanium 2 processors support"
41 depends on KVM && m
42 ---help---
43 Provides support for KVM on Itanium 2 processors equipped with the VT
44 extensions.
45
46config KVM_TRACE
47 bool
48
49endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
new file mode 100644
index 000000000000..41b034ffa73b
--- /dev/null
+++ b/arch/ia64/kvm/Makefile
@@ -0,0 +1,61 @@
1#This Make file is to generate asm-offsets.h and build source.
2#
3
4#Generate asm-offsets.h for vmm module build
5offsets-file := asm-offsets.h
6
7always := $(offsets-file)
8targets := $(offsets-file)
9targets += arch/ia64/kvm/asm-offsets.s
10clean-files := $(addprefix $(objtree)/,$(targets) $(obj)/memcpy.S $(obj)/memset.S)
11
12# Default sed regexp - multiline due to syntax constraints
13define sed-y
14 "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
15endef
16
17quiet_cmd_offsets = GEN $@
18define cmd_offsets
19 (set -e; \
20 echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
21 echo "#define __ASM_KVM_OFFSETS_H__"; \
22 echo "/*"; \
23 echo " * DO NOT MODIFY."; \
24 echo " *"; \
25 echo " * This file was generated by Makefile"; \
26 echo " *"; \
27 echo " */"; \
28 echo ""; \
29 sed -ne $(sed-y) $<; \
30 echo ""; \
31 echo "#endif" ) > $@
32endef
33# We use internal rules to avoid the "is up to date" message from make
34arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c
35 $(call if_changed_dep,cc_s_c)
36
37$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
38 $(call cmd,offsets)
39
40#
41# Makefile for Kernel-based Virtual Machine module
42#
43
44EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
45
46$(addprefix $(objtree)/,$(obj)/memcpy.S $(obj)/memset.S):
47 $(shell ln -snf ../lib/memcpy.S $(src)/memcpy.S)
48 $(shell ln -snf ../lib/memset.S $(src)/memset.S)
49
50common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
51
52kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
53obj-$(CONFIG_KVM) += kvm.o
54
55FORCE : $(obj)/$(offsets-file)
56EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
57kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
58 vtlb.o process.o
59#Add link memcpy and memset to avoid possible structure assignment error
60kvm-intel-objs += memset.o memcpy.o
61obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
new file mode 100644
index 000000000000..4e3dc13a619c
--- /dev/null
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -0,0 +1,251 @@
1/*
2 * asm-offsets.c Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 *
6 * Anthony Xu <anthony.xu@intel.com>
7 * Xiantao Zhang <xiantao.zhang@intel.com>
8 * Copyright (c) 2007 Intel Corporation KVM support.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 *
23 */
24
25#include <linux/autoconf.h>
26#include <linux/kvm_host.h>
27
28#include "vcpu.h"
29
30#define task_struct kvm_vcpu
31
32#define DEFINE(sym, val) \
33 asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
34
35#define BLANK() asm volatile("\n->" : :)
36
37#define OFFSET(_sym, _str, _mem) \
38 DEFINE(_sym, offsetof(_str, _mem));
39
40void foo(void)
41{
42 DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
43 DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
44
45 BLANK();
46
47 DEFINE(VMM_VCPU_META_RR0_OFFSET,
48 offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
49 DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
50 offsetof(struct kvm_vcpu,
51 arch.metaphysical_saved_rr0));
52 DEFINE(VMM_VCPU_VRR0_OFFSET,
53 offsetof(struct kvm_vcpu, arch.vrr[0]));
54 DEFINE(VMM_VPD_IRR0_OFFSET,
55 offsetof(struct vpd, irr[0]));
56 DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
57 offsetof(struct kvm_vcpu, arch.itc_check));
58 DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
59 offsetof(struct kvm_vcpu, arch.irq_check));
60 DEFINE(VMM_VPD_VHPI_OFFSET,
61 offsetof(struct vpd, vhpi));
62 DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
63 offsetof(struct kvm_vcpu, arch.vsa_base));
64 DEFINE(VMM_VCPU_VPD_OFFSET,
65 offsetof(struct kvm_vcpu, arch.vpd));
66 DEFINE(VMM_VCPU_IRQ_CHECK,
67 offsetof(struct kvm_vcpu, arch.irq_check));
68 DEFINE(VMM_VCPU_TIMER_PENDING,
69 offsetof(struct kvm_vcpu, arch.timer_pending));
70 DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
71 offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
72 DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
73 offsetof(struct kvm_vcpu, arch.mode_flags));
74 DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
75 offsetof(struct kvm_vcpu, arch.itc_offset));
76 DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
77 offsetof(struct kvm_vcpu, arch.last_itc));
78 DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
79 offsetof(struct kvm_vcpu, arch.saved_gp));
80
81 BLANK();
82
83 DEFINE(VMM_PT_REGS_B6_OFFSET,
84 offsetof(struct kvm_pt_regs, b6));
85 DEFINE(VMM_PT_REGS_B7_OFFSET,
86 offsetof(struct kvm_pt_regs, b7));
87 DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
88 offsetof(struct kvm_pt_regs, ar_csd));
89 DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
90 offsetof(struct kvm_pt_regs, ar_ssd));
91 DEFINE(VMM_PT_REGS_R8_OFFSET,
92 offsetof(struct kvm_pt_regs, r8));
93 DEFINE(VMM_PT_REGS_R9_OFFSET,
94 offsetof(struct kvm_pt_regs, r9));
95 DEFINE(VMM_PT_REGS_R10_OFFSET,
96 offsetof(struct kvm_pt_regs, r10));
97 DEFINE(VMM_PT_REGS_R11_OFFSET,
98 offsetof(struct kvm_pt_regs, r11));
99 DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
100 offsetof(struct kvm_pt_regs, cr_ipsr));
101 DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
102 offsetof(struct kvm_pt_regs, cr_iip));
103 DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
104 offsetof(struct kvm_pt_regs, cr_ifs));
105 DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
106 offsetof(struct kvm_pt_regs, ar_unat));
107 DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
108 offsetof(struct kvm_pt_regs, ar_pfs));
109 DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
110 offsetof(struct kvm_pt_regs, ar_rsc));
111 DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
112 offsetof(struct kvm_pt_regs, ar_rnat));
113
114 DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
115 offsetof(struct kvm_pt_regs, ar_bspstore));
116 DEFINE(VMM_PT_REGS_PR_OFFSET,
117 offsetof(struct kvm_pt_regs, pr));
118 DEFINE(VMM_PT_REGS_B0_OFFSET,
119 offsetof(struct kvm_pt_regs, b0));
120 DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
121 offsetof(struct kvm_pt_regs, loadrs));
122 DEFINE(VMM_PT_REGS_R1_OFFSET,
123 offsetof(struct kvm_pt_regs, r1));
124 DEFINE(VMM_PT_REGS_R12_OFFSET,
125 offsetof(struct kvm_pt_regs, r12));
126 DEFINE(VMM_PT_REGS_R13_OFFSET,
127 offsetof(struct kvm_pt_regs, r13));
128 DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
129 offsetof(struct kvm_pt_regs, ar_fpsr));
130 DEFINE(VMM_PT_REGS_R15_OFFSET,
131 offsetof(struct kvm_pt_regs, r15));
132 DEFINE(VMM_PT_REGS_R14_OFFSET,
133 offsetof(struct kvm_pt_regs, r14));
134 DEFINE(VMM_PT_REGS_R2_OFFSET,
135 offsetof(struct kvm_pt_regs, r2));
136 DEFINE(VMM_PT_REGS_R3_OFFSET,
137 offsetof(struct kvm_pt_regs, r3));
138 DEFINE(VMM_PT_REGS_R16_OFFSET,
139 offsetof(struct kvm_pt_regs, r16));
140 DEFINE(VMM_PT_REGS_R17_OFFSET,
141 offsetof(struct kvm_pt_regs, r17));
142 DEFINE(VMM_PT_REGS_R18_OFFSET,
143 offsetof(struct kvm_pt_regs, r18));
144 DEFINE(VMM_PT_REGS_R19_OFFSET,
145 offsetof(struct kvm_pt_regs, r19));
146 DEFINE(VMM_PT_REGS_R20_OFFSET,
147 offsetof(struct kvm_pt_regs, r20));
148 DEFINE(VMM_PT_REGS_R21_OFFSET,
149 offsetof(struct kvm_pt_regs, r21));
150 DEFINE(VMM_PT_REGS_R22_OFFSET,
151 offsetof(struct kvm_pt_regs, r22));
152 DEFINE(VMM_PT_REGS_R23_OFFSET,
153 offsetof(struct kvm_pt_regs, r23));
154 DEFINE(VMM_PT_REGS_R24_OFFSET,
155 offsetof(struct kvm_pt_regs, r24));
156 DEFINE(VMM_PT_REGS_R25_OFFSET,
157 offsetof(struct kvm_pt_regs, r25));
158 DEFINE(VMM_PT_REGS_R26_OFFSET,
159 offsetof(struct kvm_pt_regs, r26));
160 DEFINE(VMM_PT_REGS_R27_OFFSET,
161 offsetof(struct kvm_pt_regs, r27));
162 DEFINE(VMM_PT_REGS_R28_OFFSET,
163 offsetof(struct kvm_pt_regs, r28));
164 DEFINE(VMM_PT_REGS_R29_OFFSET,
165 offsetof(struct kvm_pt_regs, r29));
166 DEFINE(VMM_PT_REGS_R30_OFFSET,
167 offsetof(struct kvm_pt_regs, r30));
168 DEFINE(VMM_PT_REGS_R31_OFFSET,
169 offsetof(struct kvm_pt_regs, r31));
170 DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
171 offsetof(struct kvm_pt_regs, ar_ccv));
172 DEFINE(VMM_PT_REGS_F6_OFFSET,
173 offsetof(struct kvm_pt_regs, f6));
174 DEFINE(VMM_PT_REGS_F7_OFFSET,
175 offsetof(struct kvm_pt_regs, f7));
176 DEFINE(VMM_PT_REGS_F8_OFFSET,
177 offsetof(struct kvm_pt_regs, f8));
178 DEFINE(VMM_PT_REGS_F9_OFFSET,
179 offsetof(struct kvm_pt_regs, f9));
180 DEFINE(VMM_PT_REGS_F10_OFFSET,
181 offsetof(struct kvm_pt_regs, f10));
182 DEFINE(VMM_PT_REGS_F11_OFFSET,
183 offsetof(struct kvm_pt_regs, f11));
184 DEFINE(VMM_PT_REGS_R4_OFFSET,
185 offsetof(struct kvm_pt_regs, r4));
186 DEFINE(VMM_PT_REGS_R5_OFFSET,
187 offsetof(struct kvm_pt_regs, r5));
188 DEFINE(VMM_PT_REGS_R6_OFFSET,
189 offsetof(struct kvm_pt_regs, r6));
190 DEFINE(VMM_PT_REGS_R7_OFFSET,
191 offsetof(struct kvm_pt_regs, r7));
192 DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
193 offsetof(struct kvm_pt_regs, eml_unat));
194 DEFINE(VMM_VCPU_IIPA_OFFSET,
195 offsetof(struct kvm_vcpu, arch.cr_iipa));
196 DEFINE(VMM_VCPU_OPCODE_OFFSET,
197 offsetof(struct kvm_vcpu, arch.opcode));
198 DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
199 DEFINE(VMM_VCPU_ISR_OFFSET,
200 offsetof(struct kvm_vcpu, arch.cr_isr));
201 DEFINE(VMM_PT_REGS_R16_SLOT,
202 (((offsetof(struct kvm_pt_regs, r16)
203 - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
204 DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
205 offsetof(struct kvm_vcpu, arch.mode_flags));
206 DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
207 BLANK();
208
209 DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
210 DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
211 DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
212 offsetof(struct kvm_vcpu, arch.insvc[0]));
213 DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
214 DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
215
216 DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
217 DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
218 DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
219 DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
220 DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
221 DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
222 DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
223 DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
224 DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
225 DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
226 DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
227 DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
228 DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
229 DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
230 DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
231 DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
232 DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
233 DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
234 DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
235 DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
236 DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
237 DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
238 DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
239 DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
240 DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
241 DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
242 DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
243 DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
244 DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
245 DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
246 DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
247 DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
248 DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
249 DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
250 BLANK();
251}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
new file mode 100644
index 000000000000..6df073240135
--- /dev/null
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -0,0 +1,1806 @@
1
2/*
3 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
4 *
5 *
6 * Copyright (C) 2007, Intel Corporation.
7 * Xiantao Zhang (xiantao.zhang@intel.com)
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/errno.h>
26#include <linux/percpu.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29#include <linux/smp.h>
30#include <linux/kvm_host.h>
31#include <linux/kvm.h>
32#include <linux/bitops.h>
33#include <linux/hrtimer.h>
34#include <linux/uaccess.h>
35
36#include <asm/pgtable.h>
37#include <asm/gcc_intrin.h>
38#include <asm/pal.h>
39#include <asm/cacheflush.h>
40#include <asm/div64.h>
41#include <asm/tlb.h>
42
43#include "misc.h"
44#include "vti.h"
45#include "iodev.h"
46#include "ioapic.h"
47#include "lapic.h"
48
49static unsigned long kvm_vmm_base;
50static unsigned long kvm_vsa_base;
51static unsigned long kvm_vm_buffer;
52static unsigned long kvm_vm_buffer_size;
53unsigned long kvm_vmm_gp;
54
55static long vp_env_info;
56
57static struct kvm_vmm_info *kvm_vmm_info;
58
59static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
60
61struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { NULL }
63};
64
65
66struct fdesc{
67 unsigned long ip;
68 unsigned long gp;
69};
70
71static void kvm_flush_icache(unsigned long start, unsigned long len)
72{
73 int l;
74
75 for (l = 0; l < (len + 32); l += 32)
76 ia64_fc(start + l);
77
78 ia64_sync_i();
79 ia64_srlz_i();
80}
81
82static void kvm_flush_tlb_all(void)
83{
84 unsigned long i, j, count0, count1, stride0, stride1, addr;
85 long flags;
86
87 addr = local_cpu_data->ptce_base;
88 count0 = local_cpu_data->ptce_count[0];
89 count1 = local_cpu_data->ptce_count[1];
90 stride0 = local_cpu_data->ptce_stride[0];
91 stride1 = local_cpu_data->ptce_stride[1];
92
93 local_irq_save(flags);
94 for (i = 0; i < count0; ++i) {
95 for (j = 0; j < count1; ++j) {
96 ia64_ptce(addr);
97 addr += stride1;
98 }
99 addr += stride0;
100 }
101 local_irq_restore(flags);
102 ia64_srlz_i(); /* srlz.i implies srlz.d */
103}
104
105long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
106{
107 struct ia64_pal_retval iprv;
108
109 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
110 (u64)opt_handler);
111
112 return iprv.status;
113}
114
115static DEFINE_SPINLOCK(vp_lock);
116
117void kvm_arch_hardware_enable(void *garbage)
118{
119 long status;
120 long tmp_base;
121 unsigned long pte;
122 unsigned long saved_psr;
123 int slot;
124
125 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
126 PAGE_KERNEL));
127 local_irq_save(saved_psr);
128 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
129 if (slot < 0)
130 return;
131 local_irq_restore(saved_psr);
132
133 spin_lock(&vp_lock);
134 status = ia64_pal_vp_init_env(kvm_vsa_base ?
135 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
136 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
137 if (status != 0) {
138 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
139 return ;
140 }
141
142 if (!kvm_vsa_base) {
143 kvm_vsa_base = tmp_base;
144 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
145 }
146 spin_unlock(&vp_lock);
147 ia64_ptr_entry(0x3, slot);
148}
149
150void kvm_arch_hardware_disable(void *garbage)
151{
152
153 long status;
154 int slot;
155 unsigned long pte;
156 unsigned long saved_psr;
157 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
158
159 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
160 PAGE_KERNEL));
161
162 local_irq_save(saved_psr);
163 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
164 if (slot < 0)
165 return;
166 local_irq_restore(saved_psr);
167
168 status = ia64_pal_vp_exit_env(host_iva);
169 if (status)
170 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
171 status);
172 ia64_ptr_entry(0x3, slot);
173}
174
175void kvm_arch_check_processor_compat(void *rtn)
176{
177 *(int *)rtn = 0;
178}
179
180int kvm_dev_ioctl_check_extension(long ext)
181{
182
183 int r;
184
185 switch (ext) {
186 case KVM_CAP_IRQCHIP:
187 case KVM_CAP_USER_MEMORY:
188
189 r = 1;
190 break;
191 default:
192 r = 0;
193 }
194 return r;
195
196}
197
198static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
199 gpa_t addr)
200{
201 struct kvm_io_device *dev;
202
203 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
204
205 return dev;
206}
207
208static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
209{
210 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
211 kvm_run->hw.hardware_exit_reason = 1;
212 return 0;
213}
214
215static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
216{
217 struct kvm_mmio_req *p;
218 struct kvm_io_device *mmio_dev;
219
220 p = kvm_get_vcpu_ioreq(vcpu);
221
222 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
223 goto mmio;
224 vcpu->mmio_needed = 1;
225 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
226 vcpu->mmio_size = kvm_run->mmio.len = p->size;
227 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
228
229 if (vcpu->mmio_is_write)
230 memcpy(vcpu->mmio_data, &p->data, p->size);
231 memcpy(kvm_run->mmio.data, &p->data, p->size);
232 kvm_run->exit_reason = KVM_EXIT_MMIO;
233 return 0;
234mmio:
235 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
236 if (mmio_dev) {
237 if (!p->dir)
238 kvm_iodevice_write(mmio_dev, p->addr, p->size,
239 &p->data);
240 else
241 kvm_iodevice_read(mmio_dev, p->addr, p->size,
242 &p->data);
243
244 } else
245 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
246 p->state = STATE_IORESP_READY;
247
248 return 1;
249}
250
251static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
252{
253 struct exit_ctl_data *p;
254
255 p = kvm_get_exit_data(vcpu);
256
257 if (p->exit_reason == EXIT_REASON_PAL_CALL)
258 return kvm_pal_emul(vcpu, kvm_run);
259 else {
260 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
261 kvm_run->hw.hardware_exit_reason = 2;
262 return 0;
263 }
264}
265
266static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
267{
268 struct exit_ctl_data *p;
269
270 p = kvm_get_exit_data(vcpu);
271
272 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
273 kvm_sal_emul(vcpu);
274 return 1;
275 } else {
276 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
277 kvm_run->hw.hardware_exit_reason = 3;
278 return 0;
279 }
280
281}
282
283/*
284 * offset: address offset to IPI space.
285 * value: deliver value.
286 */
287static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
288 uint64_t vector)
289{
290 switch (dm) {
291 case SAPIC_FIXED:
292 kvm_apic_set_irq(vcpu, vector, 0);
293 break;
294 case SAPIC_NMI:
295 kvm_apic_set_irq(vcpu, 2, 0);
296 break;
297 case SAPIC_EXTINT:
298 kvm_apic_set_irq(vcpu, 0, 0);
299 break;
300 case SAPIC_INIT:
301 case SAPIC_PMI:
302 default:
303 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
304 break;
305 }
306}
307
308static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
309 unsigned long eid)
310{
311 union ia64_lid lid;
312 int i;
313
314 for (i = 0; i < KVM_MAX_VCPUS; i++) {
315 if (kvm->vcpus[i]) {
316 lid.val = VCPU_LID(kvm->vcpus[i]);
317 if (lid.id == id && lid.eid == eid)
318 return kvm->vcpus[i];
319 }
320 }
321
322 return NULL;
323}
324
325static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
326{
327 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
328 struct kvm_vcpu *target_vcpu;
329 struct kvm_pt_regs *regs;
330 union ia64_ipi_a addr = p->u.ipi_data.addr;
331 union ia64_ipi_d data = p->u.ipi_data.data;
332
333 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
334 if (!target_vcpu)
335 return handle_vm_error(vcpu, kvm_run);
336
337 if (!target_vcpu->arch.launched) {
338 regs = vcpu_regs(target_vcpu);
339
340 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
341 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
342
343 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
344 if (waitqueue_active(&target_vcpu->wq))
345 wake_up_interruptible(&target_vcpu->wq);
346 } else {
347 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
348 if (target_vcpu != vcpu)
349 kvm_vcpu_kick(target_vcpu);
350 }
351
352 return 1;
353}
354
355struct call_data {
356 struct kvm_ptc_g ptc_g_data;
357 struct kvm_vcpu *vcpu;
358};
359
360static void vcpu_global_purge(void *info)
361{
362 struct call_data *p = (struct call_data *)info;
363 struct kvm_vcpu *vcpu = p->vcpu;
364
365 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
366 return;
367
368 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
369 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
370 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
371 p->ptc_g_data;
372 } else {
373 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
374 vcpu->arch.ptc_g_count = 0;
375 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
376 }
377}
378
379static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
380{
381 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
382 struct kvm *kvm = vcpu->kvm;
383 struct call_data call_data;
384 int i;
385 call_data.ptc_g_data = p->u.ptc_g_data;
386
387 for (i = 0; i < KVM_MAX_VCPUS; i++) {
388 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
389 KVM_MP_STATE_UNINITIALIZED ||
390 vcpu == kvm->vcpus[i])
391 continue;
392
393 if (waitqueue_active(&kvm->vcpus[i]->wq))
394 wake_up_interruptible(&kvm->vcpus[i]->wq);
395
396 if (kvm->vcpus[i]->cpu != -1) {
397 call_data.vcpu = kvm->vcpus[i];
398 smp_call_function_single(kvm->vcpus[i]->cpu,
399 vcpu_global_purge, &call_data, 0, 1);
400 } else
401 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
402
403 }
404 return 1;
405}
406
407static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
408{
409 return 1;
410}
411
412int kvm_emulate_halt(struct kvm_vcpu *vcpu)
413{
414
415 ktime_t kt;
416 long itc_diff;
417 unsigned long vcpu_now_itc;
418
419 unsigned long expires;
420 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
421 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
422 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
423
424 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
425
426 if (time_after(vcpu_now_itc, vpd->itm)) {
427 vcpu->arch.timer_check = 1;
428 return 1;
429 }
430 itc_diff = vpd->itm - vcpu_now_itc;
431 if (itc_diff < 0)
432 itc_diff = -itc_diff;
433
434 expires = div64_64(itc_diff, cyc_per_usec);
435 kt = ktime_set(0, 1000 * expires);
436 vcpu->arch.ht_active = 1;
437 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
438
439 if (irqchip_in_kernel(vcpu->kvm)) {
440 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
441 kvm_vcpu_block(vcpu);
442 hrtimer_cancel(p_ht);
443 vcpu->arch.ht_active = 0;
444
445 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
446 return -EINTR;
447 return 1;
448 } else {
449 printk(KERN_ERR"kvm: Unsupported userspace halt!");
450 return 0;
451 }
452}
453
454static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
455 struct kvm_run *kvm_run)
456{
457 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
458 return 0;
459}
460
461static int handle_external_interrupt(struct kvm_vcpu *vcpu,
462 struct kvm_run *kvm_run)
463{
464 return 1;
465}
466
467static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
468 struct kvm_run *kvm_run) = {
469 [EXIT_REASON_VM_PANIC] = handle_vm_error,
470 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
471 [EXIT_REASON_PAL_CALL] = handle_pal_call,
472 [EXIT_REASON_SAL_CALL] = handle_sal_call,
473 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
474 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
475 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
476 [EXIT_REASON_IPI] = handle_ipi,
477 [EXIT_REASON_PTC_G] = handle_global_purge,
478
479};
480
481static const int kvm_vti_max_exit_handlers =
482 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
483
484static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
485{
486}
487
488static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
489{
490 struct exit_ctl_data *p_exit_data;
491
492 p_exit_data = kvm_get_exit_data(vcpu);
493 return p_exit_data->exit_reason;
494}
495
496/*
497 * The guest has exited. See if we can fix it or if we need userspace
498 * assistance.
499 */
500static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
501{
502 u32 exit_reason = kvm_get_exit_reason(vcpu);
503 vcpu->arch.last_exit = exit_reason;
504
505 if (exit_reason < kvm_vti_max_exit_handlers
506 && kvm_vti_exit_handlers[exit_reason])
507 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
508 else {
509 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
510 kvm_run->hw.hardware_exit_reason = exit_reason;
511 }
512 return 0;
513}
514
515static inline void vti_set_rr6(unsigned long rr6)
516{
517 ia64_set_rr(RR6, rr6);
518 ia64_srlz_i();
519}
520
521static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
522{
523 unsigned long pte;
524 struct kvm *kvm = vcpu->kvm;
525 int r;
526
527 /*Insert a pair of tr to map vmm*/
528 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
529 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
530 if (r < 0)
531 goto out;
532 vcpu->arch.vmm_tr_slot = r;
533 /*Insert a pairt of tr to map data of vm*/
534 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
535 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
536 pte, KVM_VM_DATA_SHIFT);
537 if (r < 0)
538 goto out;
539 vcpu->arch.vm_tr_slot = r;
540 r = 0;
541out:
542 return r;
543
544}
545
546static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
547{
548
549 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
550 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
551
552}
553
554static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
555{
556 int cpu = smp_processor_id();
557
558 if (vcpu->arch.last_run_cpu != cpu ||
559 per_cpu(last_vcpu, cpu) != vcpu) {
560 per_cpu(last_vcpu, cpu) = vcpu;
561 vcpu->arch.last_run_cpu = cpu;
562 kvm_flush_tlb_all();
563 }
564
565 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
566 vti_set_rr6(vcpu->arch.vmm_rr);
567 return kvm_insert_vmm_mapping(vcpu);
568}
569static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
570{
571 kvm_purge_vmm_mapping(vcpu);
572 vti_set_rr6(vcpu->arch.host_rr6);
573}
574
575static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
576{
577 union context *host_ctx, *guest_ctx;
578 int r;
579
580 /*Get host and guest context with guest address space.*/
581 host_ctx = kvm_get_host_context(vcpu);
582 guest_ctx = kvm_get_guest_context(vcpu);
583
584 r = kvm_vcpu_pre_transition(vcpu);
585 if (r < 0)
586 goto out;
587 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
588 kvm_vcpu_post_transition(vcpu);
589 r = 0;
590out:
591 return r;
592}
593
594static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
595{
596 int r;
597
598again:
599 preempt_disable();
600
601 kvm_prepare_guest_switch(vcpu);
602 local_irq_disable();
603
604 if (signal_pending(current)) {
605 local_irq_enable();
606 preempt_enable();
607 r = -EINTR;
608 kvm_run->exit_reason = KVM_EXIT_INTR;
609 goto out;
610 }
611
612 vcpu->guest_mode = 1;
613 kvm_guest_enter();
614
615 r = vti_vcpu_run(vcpu, kvm_run);
616 if (r < 0) {
617 local_irq_enable();
618 preempt_enable();
619 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
620 goto out;
621 }
622
623 vcpu->arch.launched = 1;
624 vcpu->guest_mode = 0;
625 local_irq_enable();
626
627 /*
628 * We must have an instruction between local_irq_enable() and
629 * kvm_guest_exit(), so the timer interrupt isn't delayed by
630 * the interrupt shadow. The stat.exits increment will do nicely.
631 * But we need to prevent reordering, hence this barrier():
632 */
633 barrier();
634
635 kvm_guest_exit();
636
637 preempt_enable();
638
639 r = kvm_handle_exit(kvm_run, vcpu);
640
641 if (r > 0) {
642 if (!need_resched())
643 goto again;
644 }
645
646out:
647 if (r > 0) {
648 kvm_resched(vcpu);
649 goto again;
650 }
651
652 return r;
653}
654
655static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
656{
657 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
658
659 if (!vcpu->mmio_is_write)
660 memcpy(&p->data, vcpu->mmio_data, 8);
661 p->state = STATE_IORESP_READY;
662}
663
664int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
665{
666 int r;
667 sigset_t sigsaved;
668
669 vcpu_load(vcpu);
670
671 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
672 kvm_vcpu_block(vcpu);
673 vcpu_put(vcpu);
674 return -EAGAIN;
675 }
676
677 if (vcpu->sigset_active)
678 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
679
680 if (vcpu->mmio_needed) {
681 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
682 kvm_set_mmio_data(vcpu);
683 vcpu->mmio_read_completed = 1;
684 vcpu->mmio_needed = 0;
685 }
686 r = __vcpu_run(vcpu, kvm_run);
687
688 if (vcpu->sigset_active)
689 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
690
691 vcpu_put(vcpu);
692 return r;
693}
694
695/*
696 * Allocate 16M memory for every vm to hold its specific data.
697 * Its memory map is defined in kvm_host.h.
698 */
699static struct kvm *kvm_alloc_kvm(void)
700{
701
702 struct kvm *kvm;
703 uint64_t vm_base;
704
705 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
706
707 if (!vm_base)
708 return ERR_PTR(-ENOMEM);
709 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
710
711 /* Zero all pages before use! */
712 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
713
714 kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
715 kvm->arch.vm_base = vm_base;
716
717 return kvm;
718}
719
720struct kvm_io_range {
721 unsigned long start;
722 unsigned long size;
723 unsigned long type;
724};
725
726static const struct kvm_io_range io_ranges[] = {
727 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
728 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
729 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
730 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
731 {PIB_START, PIB_SIZE, GPFN_PIB},
732};
733
734static void kvm_build_io_pmt(struct kvm *kvm)
735{
736 unsigned long i, j;
737
738 /* Mark I/O ranges */
739 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
740 i++) {
741 for (j = io_ranges[i].start;
742 j < io_ranges[i].start + io_ranges[i].size;
743 j += PAGE_SIZE)
744 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
745 io_ranges[i].type, 0);
746 }
747
748}
749
750/*Use unused rids to virtualize guest rid.*/
751#define GUEST_PHYSICAL_RR0 0x1739
752#define GUEST_PHYSICAL_RR4 0x2739
753#define VMM_INIT_RR 0x1660
754
755static void kvm_init_vm(struct kvm *kvm)
756{
757 long vm_base;
758
759 BUG_ON(!kvm);
760
761 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
762 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
763 kvm->arch.vmm_init_rr = VMM_INIT_RR;
764
765 vm_base = kvm->arch.vm_base;
766 if (vm_base) {
767 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
768 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
769 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
770 }
771
772 /*
773 *Fill P2M entries for MMIO/IO ranges
774 */
775 kvm_build_io_pmt(kvm);
776
777}
778
779struct kvm *kvm_arch_create_vm(void)
780{
781 struct kvm *kvm = kvm_alloc_kvm();
782
783 if (IS_ERR(kvm))
784 return ERR_PTR(-ENOMEM);
785 kvm_init_vm(kvm);
786
787 return kvm;
788
789}
790
791static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
792 struct kvm_irqchip *chip)
793{
794 int r;
795
796 r = 0;
797 switch (chip->chip_id) {
798 case KVM_IRQCHIP_IOAPIC:
799 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
800 sizeof(struct kvm_ioapic_state));
801 break;
802 default:
803 r = -EINVAL;
804 break;
805 }
806 return r;
807}
808
809static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
810{
811 int r;
812
813 r = 0;
814 switch (chip->chip_id) {
815 case KVM_IRQCHIP_IOAPIC:
816 memcpy(ioapic_irqchip(kvm),
817 &chip->chip.ioapic,
818 sizeof(struct kvm_ioapic_state));
819 break;
820 default:
821 r = -EINVAL;
822 break;
823 }
824 return r;
825}
826
827#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
828
829int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
830{
831 int i;
832 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
833 int r;
834
835 vcpu_load(vcpu);
836
837 for (i = 0; i < 16; i++) {
838 vpd->vgr[i] = regs->vpd.vgr[i];
839 vpd->vbgr[i] = regs->vpd.vbgr[i];
840 }
841 for (i = 0; i < 128; i++)
842 vpd->vcr[i] = regs->vpd.vcr[i];
843 vpd->vhpi = regs->vpd.vhpi;
844 vpd->vnat = regs->vpd.vnat;
845 vpd->vbnat = regs->vpd.vbnat;
846 vpd->vpsr = regs->vpd.vpsr;
847
848 vpd->vpr = regs->vpd.vpr;
849
850 r = -EFAULT;
851 r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
852 sizeof(union context));
853 if (r)
854 goto out;
855 r = copy_from_user(vcpu + 1, regs->saved_stack +
856 sizeof(struct kvm_vcpu),
857 IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
858 if (r)
859 goto out;
860 vcpu->arch.exit_data =
861 ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
862
863 RESTORE_REGS(mp_state);
864 RESTORE_REGS(vmm_rr);
865 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
866 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
867 RESTORE_REGS(itr_regions);
868 RESTORE_REGS(dtr_regions);
869 RESTORE_REGS(tc_regions);
870 RESTORE_REGS(irq_check);
871 RESTORE_REGS(itc_check);
872 RESTORE_REGS(timer_check);
873 RESTORE_REGS(timer_pending);
874 RESTORE_REGS(last_itc);
875 for (i = 0; i < 8; i++) {
876 vcpu->arch.vrr[i] = regs->vrr[i];
877 vcpu->arch.ibr[i] = regs->ibr[i];
878 vcpu->arch.dbr[i] = regs->dbr[i];
879 }
880 for (i = 0; i < 4; i++)
881 vcpu->arch.insvc[i] = regs->insvc[i];
882 RESTORE_REGS(xtp);
883 RESTORE_REGS(metaphysical_rr0);
884 RESTORE_REGS(metaphysical_rr4);
885 RESTORE_REGS(metaphysical_saved_rr0);
886 RESTORE_REGS(metaphysical_saved_rr4);
887 RESTORE_REGS(fp_psr);
888 RESTORE_REGS(saved_gp);
889
890 vcpu->arch.irq_new_pending = 1;
891 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
892 set_bit(KVM_REQ_RESUME, &vcpu->requests);
893
894 vcpu_put(vcpu);
895 r = 0;
896out:
897 return r;
898}
899
900long kvm_arch_vm_ioctl(struct file *filp,
901 unsigned int ioctl, unsigned long arg)
902{
903 struct kvm *kvm = filp->private_data;
904 void __user *argp = (void __user *)arg;
905 int r = -EINVAL;
906
907 switch (ioctl) {
908 case KVM_SET_MEMORY_REGION: {
909 struct kvm_memory_region kvm_mem;
910 struct kvm_userspace_memory_region kvm_userspace_mem;
911
912 r = -EFAULT;
913 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
914 goto out;
915 kvm_userspace_mem.slot = kvm_mem.slot;
916 kvm_userspace_mem.flags = kvm_mem.flags;
917 kvm_userspace_mem.guest_phys_addr =
918 kvm_mem.guest_phys_addr;
919 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
920 r = kvm_vm_ioctl_set_memory_region(kvm,
921 &kvm_userspace_mem, 0);
922 if (r)
923 goto out;
924 break;
925 }
926 case KVM_CREATE_IRQCHIP:
927 r = -EFAULT;
928 r = kvm_ioapic_init(kvm);
929 if (r)
930 goto out;
931 break;
932 case KVM_IRQ_LINE: {
933 struct kvm_irq_level irq_event;
934
935 r = -EFAULT;
936 if (copy_from_user(&irq_event, argp, sizeof irq_event))
937 goto out;
938 if (irqchip_in_kernel(kvm)) {
939 mutex_lock(&kvm->lock);
940 kvm_ioapic_set_irq(kvm->arch.vioapic,
941 irq_event.irq,
942 irq_event.level);
943 mutex_unlock(&kvm->lock);
944 r = 0;
945 }
946 break;
947 }
948 case KVM_GET_IRQCHIP: {
949 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
950 struct kvm_irqchip chip;
951
952 r = -EFAULT;
953 if (copy_from_user(&chip, argp, sizeof chip))
954 goto out;
955 r = -ENXIO;
956 if (!irqchip_in_kernel(kvm))
957 goto out;
958 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
959 if (r)
960 goto out;
961 r = -EFAULT;
962 if (copy_to_user(argp, &chip, sizeof chip))
963 goto out;
964 r = 0;
965 break;
966 }
967 case KVM_SET_IRQCHIP: {
968 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
969 struct kvm_irqchip chip;
970
971 r = -EFAULT;
972 if (copy_from_user(&chip, argp, sizeof chip))
973 goto out;
974 r = -ENXIO;
975 if (!irqchip_in_kernel(kvm))
976 goto out;
977 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
978 if (r)
979 goto out;
980 r = 0;
981 break;
982 }
983 default:
984 ;
985 }
986out:
987 return r;
988}
989
990int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
991 struct kvm_sregs *sregs)
992{
993 return -EINVAL;
994}
995
996int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
997 struct kvm_sregs *sregs)
998{
999 return -EINVAL;
1000
1001}
1002int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1003 struct kvm_translation *tr)
1004{
1005
1006 return -EINVAL;
1007}
1008
1009static int kvm_alloc_vmm_area(void)
1010{
1011 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1012 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1013 get_order(KVM_VMM_SIZE));
1014 if (!kvm_vmm_base)
1015 return -ENOMEM;
1016
1017 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1018 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1019
1020 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1021 kvm_vmm_base, kvm_vm_buffer);
1022 }
1023
1024 return 0;
1025}
1026
1027static void kvm_free_vmm_area(void)
1028{
1029 if (kvm_vmm_base) {
1030 /*Zero this area before free to avoid bits leak!!*/
1031 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1032 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1033 kvm_vmm_base = 0;
1034 kvm_vm_buffer = 0;
1035 kvm_vsa_base = 0;
1036 }
1037}
1038
1039/*
1040 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
1041 * cached on it. Leave it as blank for IA64.
1042 */
1043void decache_vcpus_on_cpu(int cpu)
1044{
1045}
1046
1047static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1048{
1049}
1050
1051static int vti_init_vpd(struct kvm_vcpu *vcpu)
1052{
1053 int i;
1054 union cpuid3_t cpuid3;
1055 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1056
1057 if (IS_ERR(vpd))
1058 return PTR_ERR(vpd);
1059
1060 /* CPUID init */
1061 for (i = 0; i < 5; i++)
1062 vpd->vcpuid[i] = ia64_get_cpuid(i);
1063
1064 /* Limit the CPUID number to 5 */
1065 cpuid3.value = vpd->vcpuid[3];
1066 cpuid3.number = 4; /* 5 - 1 */
1067 vpd->vcpuid[3] = cpuid3.value;
1068
1069 /*Set vac and vdc fields*/
1070 vpd->vac.a_from_int_cr = 1;
1071 vpd->vac.a_to_int_cr = 1;
1072 vpd->vac.a_from_psr = 1;
1073 vpd->vac.a_from_cpuid = 1;
1074 vpd->vac.a_cover = 1;
1075 vpd->vac.a_bsw = 1;
1076 vpd->vac.a_int = 1;
1077 vpd->vdc.d_vmsw = 1;
1078
1079 /*Set virtual buffer*/
1080 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1081
1082 return 0;
1083}
1084
1085static int vti_create_vp(struct kvm_vcpu *vcpu)
1086{
1087 long ret;
1088 struct vpd *vpd = vcpu->arch.vpd;
1089 unsigned long vmm_ivt;
1090
1091 vmm_ivt = kvm_vmm_info->vmm_ivt;
1092
1093 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1094
1095 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1096
1097 if (ret) {
1098 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1099 return -EINVAL;
1100 }
1101 return 0;
1102}
1103
1104static void init_ptce_info(struct kvm_vcpu *vcpu)
1105{
1106 ia64_ptce_info_t ptce = {0};
1107
1108 ia64_get_ptce(&ptce);
1109 vcpu->arch.ptce_base = ptce.base;
1110 vcpu->arch.ptce_count[0] = ptce.count[0];
1111 vcpu->arch.ptce_count[1] = ptce.count[1];
1112 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1113 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1114}
1115
1116static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1117{
1118 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1119
1120 if (hrtimer_cancel(p_ht))
1121 hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
1122}
1123
1124static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1125{
1126 struct kvm_vcpu *vcpu;
1127 wait_queue_head_t *q;
1128
1129 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1130 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1131 goto out;
1132
1133 q = &vcpu->wq;
1134 if (waitqueue_active(q)) {
1135 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1136 wake_up_interruptible(q);
1137 }
1138out:
1139 vcpu->arch.timer_check = 1;
1140 return HRTIMER_NORESTART;
1141}
1142
1143#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1144
1145int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1146{
1147 struct kvm_vcpu *v;
1148 int r;
1149 int i;
1150 long itc_offset;
1151 struct kvm *kvm = vcpu->kvm;
1152 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1153
1154 union context *p_ctx = &vcpu->arch.guest;
1155 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1156
1157 /*Init vcpu context for first run.*/
1158 if (IS_ERR(vmm_vcpu))
1159 return PTR_ERR(vmm_vcpu);
1160
1161 if (vcpu->vcpu_id == 0) {
1162 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1163
1164 /*Set entry address for first run.*/
1165 regs->cr_iip = PALE_RESET_ENTRY;
1166
1167 /*Initilize itc offset for vcpus*/
1168 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1169 for (i = 0; i < MAX_VCPU_NUM; i++) {
1170 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
1171 v->arch.itc_offset = itc_offset;
1172 v->arch.last_itc = 0;
1173 }
1174 } else
1175 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1176
1177 r = -ENOMEM;
1178 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1179 if (!vcpu->arch.apic)
1180 goto out;
1181 vcpu->arch.apic->vcpu = vcpu;
1182
1183 p_ctx->gr[1] = 0;
1184 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
1185 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1186 p_ctx->psr = 0x1008522000UL;
1187 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1188 p_ctx->caller_unat = 0;
1189 p_ctx->pr = 0x0;
1190 p_ctx->ar[36] = 0x0; /*unat*/
1191 p_ctx->ar[19] = 0x0; /*rnat*/
1192 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1193 ((sizeof(struct kvm_vcpu)+15) & ~15);
1194 p_ctx->ar[64] = 0x0; /*pfs*/
1195 p_ctx->cr[0] = 0x7e04UL;
1196 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1197 p_ctx->cr[8] = 0x3c;
1198
1199 /*Initilize region register*/
1200 p_ctx->rr[0] = 0x30;
1201 p_ctx->rr[1] = 0x30;
1202 p_ctx->rr[2] = 0x30;
1203 p_ctx->rr[3] = 0x30;
1204 p_ctx->rr[4] = 0x30;
1205 p_ctx->rr[5] = 0x30;
1206 p_ctx->rr[7] = 0x30;
1207
1208 /*Initilize branch register 0*/
1209 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1210
1211 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1212 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1213 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1214
1215 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1216 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1217
1218 vcpu->arch.last_run_cpu = -1;
1219 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
1220 vcpu->arch.vsa_base = kvm_vsa_base;
1221 vcpu->arch.__gp = kvm_vmm_gp;
1222 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1223 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
1224 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
1225 init_ptce_info(vcpu);
1226
1227 r = 0;
1228out:
1229 return r;
1230}
1231
1232static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1233{
1234 unsigned long psr;
1235 int r;
1236
1237 local_irq_save(psr);
1238 r = kvm_insert_vmm_mapping(vcpu);
1239 if (r)
1240 goto fail;
1241 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1242 if (r)
1243 goto fail;
1244
1245 r = vti_init_vpd(vcpu);
1246 if (r) {
1247 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1248 goto uninit;
1249 }
1250
1251 r = vti_create_vp(vcpu);
1252 if (r)
1253 goto uninit;
1254
1255 kvm_purge_vmm_mapping(vcpu);
1256 local_irq_restore(psr);
1257
1258 return 0;
1259uninit:
1260 kvm_vcpu_uninit(vcpu);
1261fail:
1262 return r;
1263}
1264
1265struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1266 unsigned int id)
1267{
1268 struct kvm_vcpu *vcpu;
1269 unsigned long vm_base = kvm->arch.vm_base;
1270 int r;
1271 int cpu;
1272
1273 r = -ENOMEM;
1274 if (!vm_base) {
1275 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1276 goto fail;
1277 }
1278 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
1279 vcpu->kvm = kvm;
1280
1281 cpu = get_cpu();
1282 vti_vcpu_load(vcpu, cpu);
1283 r = vti_vcpu_setup(vcpu, id);
1284 put_cpu();
1285
1286 if (r) {
1287 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1288 goto fail;
1289 }
1290
1291 return vcpu;
1292fail:
1293 return ERR_PTR(r);
1294}
1295
1296int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1297{
1298 return 0;
1299}
1300
1301int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1302{
1303 return -EINVAL;
1304}
1305
1306int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1307{
1308 return -EINVAL;
1309}
1310
1311int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1312 struct kvm_debug_guest *dbg)
1313{
1314 return -EINVAL;
1315}
1316
1317static void free_kvm(struct kvm *kvm)
1318{
1319 unsigned long vm_base = kvm->arch.vm_base;
1320
1321 if (vm_base) {
1322 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1323 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1324 }
1325
1326}
1327
1328static void kvm_release_vm_pages(struct kvm *kvm)
1329{
1330 struct kvm_memory_slot *memslot;
1331 int i, j;
1332 unsigned long base_gfn;
1333
1334 for (i = 0; i < kvm->nmemslots; i++) {
1335 memslot = &kvm->memslots[i];
1336 base_gfn = memslot->base_gfn;
1337
1338 for (j = 0; j < memslot->npages; j++) {
1339 if (memslot->rmap[j])
1340 put_page((struct page *)memslot->rmap[j]);
1341 }
1342 }
1343}
1344
1345void kvm_arch_destroy_vm(struct kvm *kvm)
1346{
1347 kfree(kvm->arch.vioapic);
1348 kvm_release_vm_pages(kvm);
1349 kvm_free_physmem(kvm);
1350 free_kvm(kvm);
1351}
1352
1353void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1354{
1355}
1356
1357void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1358{
1359 if (cpu != vcpu->cpu) {
1360 vcpu->cpu = cpu;
1361 if (vcpu->arch.ht_active)
1362 kvm_migrate_hlt_timer(vcpu);
1363 }
1364}
1365
1366#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1367
1368int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1369{
1370 int i;
1371 int r;
1372 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1373 vcpu_load(vcpu);
1374
1375 for (i = 0; i < 16; i++) {
1376 regs->vpd.vgr[i] = vpd->vgr[i];
1377 regs->vpd.vbgr[i] = vpd->vbgr[i];
1378 }
1379 for (i = 0; i < 128; i++)
1380 regs->vpd.vcr[i] = vpd->vcr[i];
1381 regs->vpd.vhpi = vpd->vhpi;
1382 regs->vpd.vnat = vpd->vnat;
1383 regs->vpd.vbnat = vpd->vbnat;
1384 regs->vpd.vpsr = vpd->vpsr;
1385 regs->vpd.vpr = vpd->vpr;
1386
1387 r = -EFAULT;
1388 r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
1389 sizeof(union context));
1390 if (r)
1391 goto out;
1392 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
1393 if (r)
1394 goto out;
1395 SAVE_REGS(mp_state);
1396 SAVE_REGS(vmm_rr);
1397 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1398 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1399 SAVE_REGS(itr_regions);
1400 SAVE_REGS(dtr_regions);
1401 SAVE_REGS(tc_regions);
1402 SAVE_REGS(irq_check);
1403 SAVE_REGS(itc_check);
1404 SAVE_REGS(timer_check);
1405 SAVE_REGS(timer_pending);
1406 SAVE_REGS(last_itc);
1407 for (i = 0; i < 8; i++) {
1408 regs->vrr[i] = vcpu->arch.vrr[i];
1409 regs->ibr[i] = vcpu->arch.ibr[i];
1410 regs->dbr[i] = vcpu->arch.dbr[i];
1411 }
1412 for (i = 0; i < 4; i++)
1413 regs->insvc[i] = vcpu->arch.insvc[i];
1414 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1415 SAVE_REGS(xtp);
1416 SAVE_REGS(metaphysical_rr0);
1417 SAVE_REGS(metaphysical_rr4);
1418 SAVE_REGS(metaphysical_saved_rr0);
1419 SAVE_REGS(metaphysical_saved_rr4);
1420 SAVE_REGS(fp_psr);
1421 SAVE_REGS(saved_gp);
1422 vcpu_put(vcpu);
1423 r = 0;
1424out:
1425 return r;
1426}
1427
1428void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1429{
1430
1431 hrtimer_cancel(&vcpu->arch.hlt_timer);
1432 kfree(vcpu->arch.apic);
1433}
1434
1435
1436long kvm_arch_vcpu_ioctl(struct file *filp,
1437 unsigned int ioctl, unsigned long arg)
1438{
1439 return -EINVAL;
1440}
1441
1442int kvm_arch_set_memory_region(struct kvm *kvm,
1443 struct kvm_userspace_memory_region *mem,
1444 struct kvm_memory_slot old,
1445 int user_alloc)
1446{
1447 unsigned long i;
1448 struct page *page;
1449 int npages = mem->memory_size >> PAGE_SHIFT;
1450 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1451 unsigned long base_gfn = memslot->base_gfn;
1452
1453 for (i = 0; i < npages; i++) {
1454 page = gfn_to_page(kvm, base_gfn + i);
1455 kvm_set_pmt_entry(kvm, base_gfn + i,
1456 page_to_pfn(page) << PAGE_SHIFT,
1457 _PAGE_AR_RWX|_PAGE_MA_WB);
1458 memslot->rmap[i] = (unsigned long)page;
1459 }
1460
1461 return 0;
1462}
1463
1464
1465long kvm_arch_dev_ioctl(struct file *filp,
1466 unsigned int ioctl, unsigned long arg)
1467{
1468 return -EINVAL;
1469}
1470
1471void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1472{
1473 kvm_vcpu_uninit(vcpu);
1474}
1475
1476static int vti_cpu_has_kvm_support(void)
1477{
1478 long avail = 1, status = 1, control = 1;
1479 long ret;
1480
1481 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1482 if (ret)
1483 goto out;
1484
1485 if (!(avail & PAL_PROC_VM_BIT))
1486 goto out;
1487
1488 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1489
1490 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1491 if (ret)
1492 goto out;
1493 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1494
1495 if (!(vp_env_info & VP_OPCODE)) {
1496 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1497 "vm_env_info:0x%lx\n", vp_env_info);
1498 }
1499
1500 return 1;
1501out:
1502 return 0;
1503}
1504
1505static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1506 struct module *module)
1507{
1508 unsigned long module_base;
1509 unsigned long vmm_size;
1510
1511 unsigned long vmm_offset, func_offset, fdesc_offset;
1512 struct fdesc *p_fdesc;
1513
1514 BUG_ON(!module);
1515
1516 if (!kvm_vmm_base) {
1517 printk("kvm: kvm area hasn't been initilized yet!!\n");
1518 return -EFAULT;
1519 }
1520
1521 /*Calculate new position of relocated vmm module.*/
1522 module_base = (unsigned long)module->module_core;
1523 vmm_size = module->core_size;
1524 if (unlikely(vmm_size > KVM_VMM_SIZE))
1525 return -EFAULT;
1526
1527 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1528 kvm_flush_icache(kvm_vmm_base, vmm_size);
1529
1530 /*Recalculate kvm_vmm_info based on new VMM*/
1531 vmm_offset = vmm_info->vmm_ivt - module_base;
1532 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1533 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1534 kvm_vmm_info->vmm_ivt);
1535
1536 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1537 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1538 fdesc_offset);
1539 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1540 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1541 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1542 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1543
1544 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1545 KVM_VMM_BASE+func_offset);
1546
1547 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1548 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1549 fdesc_offset);
1550 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1551 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1552 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1553 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1554
1555 kvm_vmm_gp = p_fdesc->gp;
1556
1557 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1558 kvm_vmm_info->vmm_entry);
1559 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1560 KVM_VMM_BASE + func_offset);
1561
1562 return 0;
1563}
1564
1565int kvm_arch_init(void *opaque)
1566{
1567 int r;
1568 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1569
1570 if (!vti_cpu_has_kvm_support()) {
1571 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1572 r = -EOPNOTSUPP;
1573 goto out;
1574 }
1575
1576 if (kvm_vmm_info) {
1577 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1578 r = -EEXIST;
1579 goto out;
1580 }
1581
1582 r = -ENOMEM;
1583 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1584 if (!kvm_vmm_info)
1585 goto out;
1586
1587 if (kvm_alloc_vmm_area())
1588 goto out_free0;
1589
1590 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1591 if (r)
1592 goto out_free1;
1593
1594 return 0;
1595
1596out_free1:
1597 kvm_free_vmm_area();
1598out_free0:
1599 kfree(kvm_vmm_info);
1600out:
1601 return r;
1602}
1603
1604void kvm_arch_exit(void)
1605{
1606 kvm_free_vmm_area();
1607 kfree(kvm_vmm_info);
1608 kvm_vmm_info = NULL;
1609}
1610
1611static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1612 struct kvm_dirty_log *log)
1613{
1614 struct kvm_memory_slot *memslot;
1615 int r, i;
1616 long n, base;
1617 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
1618 + KVM_MEM_DIRTY_LOG_OFS);
1619
1620 r = -EINVAL;
1621 if (log->slot >= KVM_MEMORY_SLOTS)
1622 goto out;
1623
1624 memslot = &kvm->memslots[log->slot];
1625 r = -ENOENT;
1626 if (!memslot->dirty_bitmap)
1627 goto out;
1628
1629 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1630 base = memslot->base_gfn / BITS_PER_LONG;
1631
1632 for (i = 0; i < n/sizeof(long); ++i) {
1633 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1634 dirty_bitmap[base + i] = 0;
1635 }
1636 r = 0;
1637out:
1638 return r;
1639}
1640
1641int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1642 struct kvm_dirty_log *log)
1643{
1644 int r;
1645 int n;
1646 struct kvm_memory_slot *memslot;
1647 int is_dirty = 0;
1648
1649 spin_lock(&kvm->arch.dirty_log_lock);
1650
1651 r = kvm_ia64_sync_dirty_log(kvm, log);
1652 if (r)
1653 goto out;
1654
1655 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1656 if (r)
1657 goto out;
1658
1659 /* If nothing is dirty, don't bother messing with page tables. */
1660 if (is_dirty) {
1661 kvm_flush_remote_tlbs(kvm);
1662 memslot = &kvm->memslots[log->slot];
1663 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1664 memset(memslot->dirty_bitmap, 0, n);
1665 }
1666 r = 0;
1667out:
1668 spin_unlock(&kvm->arch.dirty_log_lock);
1669 return r;
1670}
1671
1672int kvm_arch_hardware_setup(void)
1673{
1674 return 0;
1675}
1676
1677void kvm_arch_hardware_unsetup(void)
1678{
1679}
1680
1681static void vcpu_kick_intr(void *info)
1682{
1683#ifdef DEBUG
1684 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1685 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1686#endif
1687}
1688
1689void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1690{
1691 int ipi_pcpu = vcpu->cpu;
1692
1693 if (waitqueue_active(&vcpu->wq))
1694 wake_up_interruptible(&vcpu->wq);
1695
1696 if (vcpu->guest_mode)
1697 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
1698}
1699
1700int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1701{
1702
1703 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1704
1705 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1706 vcpu->arch.irq_new_pending = 1;
1707 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
1708 kvm_vcpu_kick(vcpu);
1709 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
1710 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1711 if (waitqueue_active(&vcpu->wq))
1712 wake_up_interruptible(&vcpu->wq);
1713 }
1714 return 1;
1715 }
1716 return 0;
1717}
1718
1719int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1720{
1721 return apic->vcpu->vcpu_id == dest;
1722}
1723
1724int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1725{
1726 return 0;
1727}
1728
1729struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1730 unsigned long bitmap)
1731{
1732 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1733 int i;
1734
1735 for (i = 1; i < KVM_MAX_VCPUS; i++) {
1736 if (!kvm->vcpus[i])
1737 continue;
1738 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1739 lvcpu = kvm->vcpus[i];
1740 }
1741
1742 return lvcpu;
1743}
1744
1745static int find_highest_bits(int *dat)
1746{
1747 u32 bits, bitnum;
1748 int i;
1749
1750 /* loop for all 256 bits */
1751 for (i = 7; i >= 0 ; i--) {
1752 bits = dat[i];
1753 if (bits) {
1754 bitnum = fls(bits);
1755 return i * 32 + bitnum - 1;
1756 }
1757 }
1758
1759 return -1;
1760}
1761
1762int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1763{
1764 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1765
1766 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1767 return NMI_VECTOR;
1768 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1769 return ExtINT_VECTOR;
1770
1771 return find_highest_bits((int *)&vpd->irr[0]);
1772}
1773
1774int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1775{
1776 if (kvm_highest_pending_irq(vcpu) != -1)
1777 return 1;
1778 return 0;
1779}
1780
1781int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1782{
1783 return 0;
1784}
1785
1786gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1787{
1788 return gfn;
1789}
1790
1791int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1792{
1793 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
1794}
1795
1796int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1797 struct kvm_mp_state *mp_state)
1798{
1799 return -EINVAL;
1800}
1801
1802int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1803 struct kvm_mp_state *mp_state)
1804{
1805 return -EINVAL;
1806}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
new file mode 100644
index 000000000000..091f936c4485
--- /dev/null
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -0,0 +1,500 @@
1/*
2 * PAL/SAL call delegation
3 *
4 * Copyright (c) 2004 Li Susie <susie.li@intel.com>
5 * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
6 * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21
22#include <linux/kvm_host.h>
23#include <linux/smp.h>
24
25#include "vti.h"
26#include "misc.h"
27
28#include <asm/pal.h>
29#include <asm/sal.h>
30#include <asm/tlb.h>
31
32/*
33 * Handy macros to make sure that the PAL return values start out
34 * as something meaningful.
35 */
36#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
37 { \
38 x.status = PAL_STATUS_UNIMPLEMENTED; \
39 x.v0 = 0; \
40 x.v1 = 0; \
41 x.v2 = 0; \
42 }
43
44#define INIT_PAL_STATUS_SUCCESS(x) \
45 { \
46 x.status = PAL_STATUS_SUCCESS; \
47 x.v0 = 0; \
48 x.v1 = 0; \
49 x.v2 = 0; \
50 }
51
52static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
53 u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
54 struct exit_ctl_data *p;
55
56 if (vcpu) {
57 p = &vcpu->arch.exit_data;
58 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
59 *gr28 = p->u.pal_data.gr28;
60 *gr29 = p->u.pal_data.gr29;
61 *gr30 = p->u.pal_data.gr30;
62 *gr31 = p->u.pal_data.gr31;
63 return ;
64 }
65 }
66 printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
67}
68
69static void set_pal_result(struct kvm_vcpu *vcpu,
70 struct ia64_pal_retval result) {
71
72 struct exit_ctl_data *p;
73
74 p = kvm_get_exit_data(vcpu);
75 if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
76 p->u.pal_data.ret = result;
77 return ;
78 }
79 INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
80}
81
82static void set_sal_result(struct kvm_vcpu *vcpu,
83 struct sal_ret_values result) {
84 struct exit_ctl_data *p;
85
86 p = kvm_get_exit_data(vcpu);
87 if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
88 p->u.sal_data.ret = result;
89 return ;
90 }
91 printk(KERN_WARNING"Failed to set sal result!!\n");
92}
93
94struct cache_flush_args {
95 u64 cache_type;
96 u64 operation;
97 u64 progress;
98 long status;
99};
100
101cpumask_t cpu_cache_coherent_map;
102
103static void remote_pal_cache_flush(void *data)
104{
105 struct cache_flush_args *args = data;
106 long status;
107 u64 progress = args->progress;
108
109 status = ia64_pal_cache_flush(args->cache_type, args->operation,
110 &progress, NULL);
111 if (status != 0)
112 args->status = status;
113}
114
115static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
116{
117 u64 gr28, gr29, gr30, gr31;
118 struct ia64_pal_retval result = {0, 0, 0, 0};
119 struct cache_flush_args args = {0, 0, 0, 0};
120 long psr;
121
122 gr28 = gr29 = gr30 = gr31 = 0;
123 kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
124
125 if (gr31 != 0)
126 printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
127
128 /* Always call Host Pal in int=1 */
129 gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
130 args.cache_type = gr29;
131 args.operation = gr30;
132 smp_call_function(remote_pal_cache_flush,
133 (void *)&args, 1, 1);
134 if (args.status != 0)
135 printk(KERN_ERR"pal_cache_flush error!,"
136 "status:0x%lx\n", args.status);
137 /*
138 * Call Host PAL cache flush
139 * Clear psr.ic when call PAL_CACHE_FLUSH
140 */
141 local_irq_save(psr);
142 result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
143 &result.v0);
144 local_irq_restore(psr);
145 if (result.status != 0)
146 printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
147 "in1:%lx,in2:%lx\n",
148 vcpu, result.status, gr29, gr30);
149
150#if 0
151 if (gr29 == PAL_CACHE_TYPE_COHERENT) {
152 cpus_setall(vcpu->arch.cache_coherent_map);
153 cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
154 cpus_setall(cpu_cache_coherent_map);
155 cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
156 }
157#endif
158 return result;
159}
160
161struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
162{
163
164 struct ia64_pal_retval result;
165
166 PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
167 return result;
168}
169
170static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
171{
172
173 struct ia64_pal_retval result;
174
175 PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
176
177 /*
178 * PAL_FREQ_BASE may not be implemented in some platforms,
179 * call SAL instead.
180 */
181 if (result.v0 == 0) {
182 result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
183 &result.v0,
184 &result.v1);
185 result.v2 = 0;
186 }
187
188 return result;
189}
190
191static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
192{
193
194 struct ia64_pal_retval result;
195
196 PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
197 return result;
198}
199
200static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
201{
202 struct ia64_pal_retval result;
203
204 INIT_PAL_STATUS_UNIMPLEMENTED(result);
205 return result;
206}
207
208static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
209{
210
211 struct ia64_pal_retval result;
212
213 INIT_PAL_STATUS_SUCCESS(result);
214 return result;
215}
216
217static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
218{
219
220 struct ia64_pal_retval result = {0, 0, 0, 0};
221 long in0, in1, in2, in3;
222
223 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
224 result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
225 &result.v2, in2);
226
227 return result;
228}
229
230static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
231{
232
233 pal_cache_config_info_t ci;
234 long status;
235 unsigned long in0, in1, in2, in3, r9, r10;
236
237 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
238 status = ia64_pal_cache_config_info(in1, in2, &ci);
239 r9 = ci.pcci_info_1.pcci1_data;
240 r10 = ci.pcci_info_2.pcci2_data;
241 return ((struct ia64_pal_retval){status, r9, r10, 0});
242}
243
244#define GUEST_IMPL_VA_MSB 59
245#define GUEST_RID_BITS 18
246
247static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
248{
249
250 pal_vm_info_1_u_t vminfo1;
251 pal_vm_info_2_u_t vminfo2;
252 struct ia64_pal_retval result;
253
254 PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
255 if (!result.status) {
256 vminfo1.pvi1_val = result.v0;
257 vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
258 vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
259 result.v0 = vminfo1.pvi1_val;
260 vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
261 vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
262 result.v1 = vminfo2.pvi2_val;
263 }
264
265 return result;
266}
267
268static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
269{
270 struct ia64_pal_retval result;
271
272 INIT_PAL_STATUS_UNIMPLEMENTED(result);
273
274 return result;
275}
276
277static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
278{
279 u64 index = 0;
280 struct exit_ctl_data *p;
281
282 p = kvm_get_exit_data(vcpu);
283 if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
284 index = p->u.pal_data.gr28;
285
286 return index;
287}
288
289int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
290{
291
292 u64 gr28;
293 struct ia64_pal_retval result;
294 int ret = 1;
295
296 gr28 = kvm_get_pal_call_index(vcpu);
297 /*printk("pal_call index:%lx\n",gr28);*/
298 switch (gr28) {
299 case PAL_CACHE_FLUSH:
300 result = pal_cache_flush(vcpu);
301 break;
302 case PAL_CACHE_SUMMARY:
303 result = pal_cache_summary(vcpu);
304 break;
305 case PAL_HALT_LIGHT:
306 {
307 vcpu->arch.timer_pending = 1;
308 INIT_PAL_STATUS_SUCCESS(result);
309 if (kvm_highest_pending_irq(vcpu) == -1)
310 ret = kvm_emulate_halt(vcpu);
311
312 }
313 break;
314
315 case PAL_FREQ_RATIOS:
316 result = pal_freq_ratios(vcpu);
317 break;
318
319 case PAL_FREQ_BASE:
320 result = pal_freq_base(vcpu);
321 break;
322
323 case PAL_LOGICAL_TO_PHYSICAL :
324 result = pal_logical_to_physica(vcpu);
325 break;
326
327 case PAL_VM_SUMMARY :
328 result = pal_vm_summary(vcpu);
329 break;
330
331 case PAL_VM_INFO :
332 result = pal_vm_info(vcpu);
333 break;
334 case PAL_PLATFORM_ADDR :
335 result = pal_platform_addr(vcpu);
336 break;
337 case PAL_CACHE_INFO:
338 result = pal_cache_info(vcpu);
339 break;
340 case PAL_PTCE_INFO:
341 INIT_PAL_STATUS_SUCCESS(result);
342 result.v1 = (1L << 32) | 1L;
343 break;
344 case PAL_VM_PAGE_SIZE:
345 result.status = ia64_pal_vm_page_size(&result.v0,
346 &result.v1);
347 break;
348 case PAL_RSE_INFO:
349 result.status = ia64_pal_rse_info(&result.v0,
350 (pal_hints_u_t *)&result.v1);
351 break;
352 case PAL_PROC_GET_FEATURES:
353 result = pal_proc_get_features(vcpu);
354 break;
355 case PAL_DEBUG_INFO:
356 result.status = ia64_pal_debug_info(&result.v0,
357 &result.v1);
358 break;
359 case PAL_VERSION:
360 result.status = ia64_pal_version(
361 (pal_version_u_t *)&result.v0,
362 (pal_version_u_t *)&result.v1);
363
364 break;
365 case PAL_FIXED_ADDR:
366 result.status = PAL_STATUS_SUCCESS;
367 result.v0 = vcpu->vcpu_id;
368 break;
369 default:
370 INIT_PAL_STATUS_UNIMPLEMENTED(result);
371 printk(KERN_WARNING"kvm: Unsupported pal call,"
372 " index:0x%lx\n", gr28);
373 }
374 set_pal_result(vcpu, result);
375 return ret;
376}
377
378static struct sal_ret_values sal_emulator(struct kvm *kvm,
379 long index, unsigned long in1,
380 unsigned long in2, unsigned long in3,
381 unsigned long in4, unsigned long in5,
382 unsigned long in6, unsigned long in7)
383{
384 unsigned long r9 = 0;
385 unsigned long r10 = 0;
386 long r11 = 0;
387 long status;
388
389 status = 0;
390 switch (index) {
391 case SAL_FREQ_BASE:
392 status = ia64_sal_freq_base(in1, &r9, &r10);
393 break;
394 case SAL_PCI_CONFIG_READ:
395 printk(KERN_WARNING"kvm: Not allowed to call here!"
396 " SAL_PCI_CONFIG_READ\n");
397 break;
398 case SAL_PCI_CONFIG_WRITE:
399 printk(KERN_WARNING"kvm: Not allowed to call here!"
400 " SAL_PCI_CONFIG_WRITE\n");
401 break;
402 case SAL_SET_VECTORS:
403 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
404 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
405 status = -2;
406 } else {
407 kvm->arch.rdv_sal_data.boot_ip = in2;
408 kvm->arch.rdv_sal_data.boot_gp = in3;
409 }
410 printk("Rendvous called! iip:%lx\n\n", in2);
411 } else
412 printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
413 "ignored...\n", in1);
414 break;
415 case SAL_GET_STATE_INFO:
416 /* No more info. */
417 status = -5;
418 r9 = 0;
419 break;
420 case SAL_GET_STATE_INFO_SIZE:
421 /* Return a dummy size. */
422 status = 0;
423 r9 = 128;
424 break;
425 case SAL_CLEAR_STATE_INFO:
426 /* Noop. */
427 break;
428 case SAL_MC_RENDEZ:
429 printk(KERN_WARNING
430 "kvm: called SAL_MC_RENDEZ. ignored...\n");
431 break;
432 case SAL_MC_SET_PARAMS:
433 printk(KERN_WARNING
434 "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
435 break;
436 case SAL_CACHE_FLUSH:
437 if (1) {
438 /*Flush using SAL.
439 This method is faster but has a side
440 effect on other vcpu running on
441 this cpu. */
442 status = ia64_sal_cache_flush(in1);
443 } else {
444 /*Maybe need to implement the method
445 without side effect!*/
446 status = 0;
447 }
448 break;
449 case SAL_CACHE_INIT:
450 printk(KERN_WARNING
451 "kvm: called SAL_CACHE_INIT. ignored...\n");
452 break;
453 case SAL_UPDATE_PAL:
454 printk(KERN_WARNING
455 "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
456 break;
457 default:
458 printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
459 " index:%ld\n", index);
460 status = -1;
461 break;
462 }
463 return ((struct sal_ret_values) {status, r9, r10, r11});
464}
465
466static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
467 u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
468
469 struct exit_ctl_data *p;
470
471 p = kvm_get_exit_data(vcpu);
472
473 if (p) {
474 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
475 *in0 = p->u.sal_data.in0;
476 *in1 = p->u.sal_data.in1;
477 *in2 = p->u.sal_data.in2;
478 *in3 = p->u.sal_data.in3;
479 *in4 = p->u.sal_data.in4;
480 *in5 = p->u.sal_data.in5;
481 *in6 = p->u.sal_data.in6;
482 *in7 = p->u.sal_data.in7;
483 return ;
484 }
485 }
486 *in0 = 0;
487}
488
489void kvm_sal_emul(struct kvm_vcpu *vcpu)
490{
491
492 struct sal_ret_values result;
493 u64 index, in1, in2, in3, in4, in5, in6, in7;
494
495 kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
496 &in3, &in4, &in5, &in6, &in7);
497 result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
498 in4, in5, in6, in7);
499 set_sal_result(vcpu, result);
500}
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
new file mode 100644
index 000000000000..13980d9b8bcf
--- /dev/null
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -0,0 +1,273 @@
1/*
2 * kvm_minstate.h: min save macros
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23
24#include <asm/asmmacro.h>
25#include <asm/types.h>
26#include <asm/kregs.h>
27#include "asm-offsets.h"
28
29#define KVM_MINSTATE_START_SAVE_MIN \
30 mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
31 ;; \
32 mov.m r28 = ar.rnat; \
33 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
34 ;; \
35 lfetch.fault.excl.nt1 [r22]; \
36 addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
37 mov r23 = ar.bspstore; /* save ar.bspstore */ \
38 ;; \
39 mov ar.bspstore = r22; /* switch to kernel RBS */\
40 ;; \
41 mov r18 = ar.bsp; \
42 mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
43
44
45
46#define KVM_MINSTATE_END_SAVE_MIN \
47 bsw.1; /* switch back to bank 1 (must be last in insn group) */\
48 ;;
49
50
51#define PAL_VSA_SYNC_READ \
52 /* begin to call pal vps sync_read */ \
53 add r25 = VMM_VPD_BASE_OFFSET, r21; \
54 adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
55 ;; \
56 ld8 r25 = [r25]; /* read vpd base */ \
57 ld8 r20 = [r20]; \
58 ;; \
59 add r20 = PAL_VPS_SYNC_READ,r20; \
60 ;; \
61{ .mii; \
62 nop 0x0; \
63 mov r24 = ip; \
64 mov b0 = r20; \
65 ;; \
66}; \
67{ .mmb; \
68 add r24 = 0x20, r24; \
69 nop 0x0; \
70 br.cond.sptk b0; /* call the service */ \
71 ;; \
72};
73
74
75
76#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
77
78/*
79 * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
80 * the minimum state necessary that allows us to turn psr.ic back
81 * on.
82 *
83 * Assumed state upon entry:
84 * psr.ic: off
85 * r31: contains saved predicates (pr)
86 *
87 * Upon exit, the state is as follows:
88 * psr.ic: off
89 * r2 = points to &pt_regs.r16
90 * r8 = contents of ar.ccv
91 * r9 = contents of ar.csd
92 * r10 = contents of ar.ssd
93 * r11 = FPSR_DEFAULT
94 * r12 = kernel sp (kernel virtual address)
95 * r13 = points to current task_struct (kernel virtual address)
96 * p15 = TRUE if psr.i is set in cr.ipsr
97 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
98 * preserved
99 *
100 * Note that psr.ic is NOT turned on by this macro. This is so that
101 * we can pass interruption state as arguments to a handler.
102 */
103
104
105#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
106
107#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
108 KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
109 mov r27 = ar.rsc; /* M */ \
110 mov r20 = r1; /* A */ \
111 mov r25 = ar.unat; /* M */ \
112 mov r29 = cr.ipsr; /* M */ \
113 mov r26 = ar.pfs; /* I */ \
114 mov r18 = cr.isr; \
115 COVER; /* B;; (or nothing) */ \
116 ;; \
117 tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
118 mov r1 = r16; \
119/* mov r21=r16; */ \
120 /* switch from user to kernel RBS: */ \
121 ;; \
122 invala; /* M */ \
123 SAVE_IFS; \
124 ;; \
125 KVM_MINSTATE_START_SAVE_MIN \
126 adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
127 adds r16 = PT(CR_IPSR),r1; \
128 ;; \
129 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
130 st8 [r16] = r29; /* save cr.ipsr */ \
131 ;; \
132 lfetch.fault.excl.nt1 [r17]; \
133 tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
134 mov r29 = b0 \
135 ;; \
136 adds r16 = PT(R8),r1; /* initialize first base pointer */\
137 adds r17 = PT(R9),r1; /* initialize second base pointer */\
138 ;; \
139.mem.offset 0,0; st8.spill [r16] = r8,16; \
140.mem.offset 8,0; st8.spill [r17] = r9,16; \
141 ;; \
142.mem.offset 0,0; st8.spill [r16] = r10,24; \
143.mem.offset 8,0; st8.spill [r17] = r11,24; \
144 ;; \
145 mov r9 = cr.iip; /* M */ \
146 mov r10 = ar.fpsr; /* M */ \
147 ;; \
148 st8 [r16] = r9,16; /* save cr.iip */ \
149 st8 [r17] = r30,16; /* save cr.ifs */ \
150 sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
151 ;; \
152 st8 [r16] = r25,16; /* save ar.unat */ \
153 st8 [r17] = r26,16; /* save ar.pfs */ \
154 shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
155 ;; \
156 st8 [r16] = r27,16; /* save ar.rsc */ \
157 st8 [r17] = r28,16; /* save ar.rnat */ \
158 ;; /* avoid RAW on r16 & r17 */ \
159 st8 [r16] = r23,16; /* save ar.bspstore */ \
160 st8 [r17] = r31,16; /* save predicates */ \
161 ;; \
162 st8 [r16] = r29,16; /* save b0 */ \
163 st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
164 ;; \
165.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
166.mem.offset 8,0; st8.spill [r17] = r12,16; \
167 adds r12 = -16,r1; /* switch to kernel memory stack */ \
168 ;; \
169.mem.offset 0,0; st8.spill [r16] = r13,16; \
170.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
171 mov r13 = r21; /* establish `current' */ \
172 ;; \
173.mem.offset 0,0; st8.spill [r16] = r15,16; \
174.mem.offset 8,0; st8.spill [r17] = r14,16; \
175 ;; \
176.mem.offset 0,0; st8.spill [r16] = r2,16; \
177.mem.offset 8,0; st8.spill [r17] = r3,16; \
178 adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
179 ;; \
180 adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
181 adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
182 mov r26 = cr.iipa; \
183 mov r27 = cr.isr; \
184 ;; \
185 st8 [r16] = r26; \
186 st8 [r17] = r27; \
187 ;; \
188 EXTRA; \
189 mov r8 = ar.ccv; \
190 mov r9 = ar.csd; \
191 mov r10 = ar.ssd; \
192 movl r11 = FPSR_DEFAULT; /* L-unit */ \
193 adds r17 = VMM_VCPU_GP_OFFSET,r13; \
194 ;; \
195 ld8 r1 = [r17];/* establish kernel global pointer */ \
196 ;; \
197 PAL_VSA_SYNC_READ \
198 KVM_MINSTATE_END_SAVE_MIN
199
200/*
201 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
202 *
203 * Assumed state upon entry:
204 * psr.ic: on
205 * r2: points to &pt_regs.f6
206 * r3: points to &pt_regs.f7
207 * r8: contents of ar.ccv
208 * r9: contents of ar.csd
209 * r10: contents of ar.ssd
210 * r11: FPSR_DEFAULT
211 *
212 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
213 */
214#define KVM_SAVE_REST \
215.mem.offset 0,0; st8.spill [r2] = r16,16; \
216.mem.offset 8,0; st8.spill [r3] = r17,16; \
217 ;; \
218.mem.offset 0,0; st8.spill [r2] = r18,16; \
219.mem.offset 8,0; st8.spill [r3] = r19,16; \
220 ;; \
221.mem.offset 0,0; st8.spill [r2] = r20,16; \
222.mem.offset 8,0; st8.spill [r3] = r21,16; \
223 mov r18=b6; \
224 ;; \
225.mem.offset 0,0; st8.spill [r2] = r22,16; \
226.mem.offset 8,0; st8.spill [r3] = r23,16; \
227 mov r19 = b7; \
228 ;; \
229.mem.offset 0,0; st8.spill [r2] = r24,16; \
230.mem.offset 8,0; st8.spill [r3] = r25,16; \
231 ;; \
232.mem.offset 0,0; st8.spill [r2] = r26,16; \
233.mem.offset 8,0; st8.spill [r3] = r27,16; \
234 ;; \
235.mem.offset 0,0; st8.spill [r2] = r28,16; \
236.mem.offset 8,0; st8.spill [r3] = r29,16; \
237 ;; \
238.mem.offset 0,0; st8.spill [r2] = r30,16; \
239.mem.offset 8,0; st8.spill [r3] = r31,32; \
240 ;; \
241 mov ar.fpsr = r11; \
242 st8 [r2] = r8,8; \
243 adds r24 = PT(B6)-PT(F7),r3; \
244 adds r25 = PT(B7)-PT(F7),r3; \
245 ;; \
246 st8 [r24] = r18,16; /* b6 */ \
247 st8 [r25] = r19,16; /* b7 */ \
248 adds r2 = PT(R4)-PT(F6),r2; \
249 adds r3 = PT(R5)-PT(F7),r3; \
250 ;; \
251 st8 [r24] = r9; /* ar.csd */ \
252 st8 [r25] = r10; /* ar.ssd */ \
253 ;; \
254 mov r18 = ar.unat; \
255 adds r19 = PT(EML_UNAT)-PT(R4),r2; \
256 ;; \
257 st8 [r19] = r18; /* eml_unat */ \
258
259
260#define KVM_SAVE_EXTRA \
261.mem.offset 0,0; st8.spill [r2] = r4,16; \
262.mem.offset 8,0; st8.spill [r3] = r5,16; \
263 ;; \
264.mem.offset 0,0; st8.spill [r2] = r6,16; \
265.mem.offset 8,0; st8.spill [r3] = r7; \
266 ;; \
267 mov r26 = ar.unat; \
268 ;; \
269 st8 [r2] = r26;/* eml_unat */ \
270
271#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
272#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
273#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
new file mode 100644
index 000000000000..6d6cbcb14893
--- /dev/null
+++ b/arch/ia64/kvm/lapic.h
@@ -0,0 +1,25 @@
1#ifndef __KVM_IA64_LAPIC_H
2#define __KVM_IA64_LAPIC_H
3
4#include <linux/kvm_host.h>
5
6/*
7 * vlsapic
8 */
9struct kvm_lapic{
10 struct kvm_vcpu *vcpu;
11 uint64_t insvc[4];
12 uint64_t vhpi;
13 uint8_t xtp;
14 uint8_t pal_init_pending;
15 uint8_t pad[2];
16};
17
18int kvm_create_lapic(struct kvm_vcpu *vcpu);
19void kvm_free_lapic(struct kvm_vcpu *vcpu);
20
21int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
22int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
23int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
24
25#endif
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
new file mode 100644
index 000000000000..e585c4607344
--- /dev/null
+++ b/arch/ia64/kvm/misc.h
@@ -0,0 +1,93 @@
1#ifndef __KVM_IA64_MISC_H
2#define __KVM_IA64_MISC_H
3
4#include <linux/kvm_host.h>
5/*
6 * misc.h
7 * Copyright (C) 2007, Intel Corporation.
8 * Xiantao Zhang (xiantao.zhang@intel.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 *
23 */
24
25/*
26 *Return p2m base address at host side!
27 */
28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
29{
30 return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
31}
32
33static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
34 u64 paddr, u64 mem_flags)
35{
36 uint64_t *pmt_base = kvm_host_get_pmt(kvm);
37 unsigned long pte;
38
39 pte = PAGE_ALIGN(paddr) | mem_flags;
40 pmt_base[gfn] = pte;
41}
42
43/*Function for translating host address to guest address*/
44
45static inline void *to_guest(struct kvm *kvm, void *addr)
46{
47 return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
48 KVM_VM_DATA_BASE);
49}
50
51/*Function for translating guest address to host address*/
52
53static inline void *to_host(struct kvm *kvm, void *addr)
54{
55 return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
56 + kvm->arch.vm_base);
57}
58
59/* Get host context of the vcpu */
60static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
61{
62 union context *ctx = &vcpu->arch.host;
63 return to_guest(vcpu->kvm, ctx);
64}
65
66/* Get guest context of the vcpu */
67static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
68{
69 union context *ctx = &vcpu->arch.guest;
70 return to_guest(vcpu->kvm, ctx);
71}
72
73/* kvm get exit data from gvmm! */
74static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
75{
76 return &vcpu->arch.exit_data;
77}
78
79/*kvm get vcpu ioreq for kvm module!*/
80static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
81{
82 struct exit_ctl_data *p_ctl_data;
83
84 if (vcpu) {
85 p_ctl_data = kvm_get_exit_data(vcpu);
86 if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
87 return &p_ctl_data->u.ioreq;
88 }
89
90 return NULL;
91}
92
93#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
new file mode 100644
index 000000000000..351bf70da463
--- /dev/null
+++ b/arch/ia64/kvm/mmio.c
@@ -0,0 +1,341 @@
1/*
2 * mmio.c: MMIO emulation components.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
5 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
6 *
7 * Copyright (c) 2007 Intel Corporation KVM support.
8 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26#include <linux/kvm_host.h>
27
28#include "vcpu.h"
29
30static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
31{
32 VLSAPIC_XTP(v) = val;
33}
34
35/*
36 * LSAPIC OFFSET
37 */
38#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
39#define PIB_OFST_INTA 0x1E0000
40#define PIB_OFST_XTP 0x1E0008
41
42/*
43 * execute write IPI op.
44 */
45static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
46 uint64_t addr, uint64_t data)
47{
48 struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
49 unsigned long psr;
50
51 local_irq_save(psr);
52
53 p->exit_reason = EXIT_REASON_IPI;
54 p->u.ipi_data.addr.val = addr;
55 p->u.ipi_data.data.val = data;
56 vmm_transition(current_vcpu);
57
58 local_irq_restore(psr);
59
60}
61
62void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
63 unsigned long length, unsigned long val)
64{
65 addr &= (PIB_SIZE - 1);
66
67 switch (addr) {
68 case PIB_OFST_INTA:
69 /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
70 panic_vm(v);
71 break;
72 case PIB_OFST_XTP:
73 if (length == 1) {
74 vlsapic_write_xtp(v, val);
75 } else {
76 /*panic_domain(NULL,
77 "Undefined write on PIB XTP\n");*/
78 panic_vm(v);
79 }
80 break;
81 default:
82 if (PIB_LOW_HALF(addr)) {
83 /*lower half */
84 if (length != 8)
85 /*panic_domain(NULL,
86 "Can't LHF write with size %ld!\n",
87 length);*/
88 panic_vm(v);
89 else
90 vlsapic_write_ipi(v, addr, val);
91 } else { /* upper half
92 printk("IPI-UHF write %lx\n",addr);*/
93 panic_vm(v);
94 }
95 break;
96 }
97}
98
99unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
100 unsigned long length)
101{
102 uint64_t result = 0;
103
104 addr &= (PIB_SIZE - 1);
105
106 switch (addr) {
107 case PIB_OFST_INTA:
108 if (length == 1) /* 1 byte load */
109 ; /* There is no i8259, there is no INTA access*/
110 else
111 /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
112 panic_vm(v);
113
114 break;
115 case PIB_OFST_XTP:
116 if (length == 1) {
117 result = VLSAPIC_XTP(v);
118 /* printk("read xtp %lx\n", result); */
119 } else {
120 /*panic_domain(NULL,
121 "Undefined read on PIB XTP\n");*/
122 panic_vm(v);
123 }
124 break;
125 default:
126 panic_vm(v);
127 break;
128 }
129 return result;
130}
131
132static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
133 u16 s, int ma, int dir)
134{
135 unsigned long iot;
136 struct exit_ctl_data *p = &vcpu->arch.exit_data;
137 unsigned long psr;
138
139 iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
140
141 local_irq_save(psr);
142
143 /*Intercept the acces for PIB range*/
144 if (iot == GPFN_PIB) {
145 if (!dir)
146 lsapic_write(vcpu, src_pa, s, *dest);
147 else
148 *dest = lsapic_read(vcpu, src_pa, s);
149 goto out;
150 }
151 p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
152 p->u.ioreq.addr = src_pa;
153 p->u.ioreq.size = s;
154 p->u.ioreq.dir = dir;
155 if (dir == IOREQ_WRITE)
156 p->u.ioreq.data = *dest;
157 p->u.ioreq.state = STATE_IOREQ_READY;
158 vmm_transition(vcpu);
159
160 if (p->u.ioreq.state == STATE_IORESP_READY) {
161 if (dir == IOREQ_READ)
162 *dest = p->u.ioreq.data;
163 } else
164 panic_vm(vcpu);
165out:
166 local_irq_restore(psr);
167 return ;
168}
169
170/*
171 dir 1: read 0:write
172 inst_type 0:integer 1:floating point
173 */
174#define SL_INTEGER 0 /* store/load interger*/
175#define SL_FLOATING 1 /* store/load floating*/
176
177void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
178{
179 struct kvm_pt_regs *regs;
180 IA64_BUNDLE bundle;
181 int slot, dir = 0;
182 int inst_type = -1;
183 u16 size = 0;
184 u64 data, slot1a, slot1b, temp, update_reg;
185 s32 imm;
186 INST64 inst;
187
188 regs = vcpu_regs(vcpu);
189
190 if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
191 /* if fetch code fail, return and try again */
192 return;
193 }
194 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
195 if (!slot)
196 inst.inst = bundle.slot0;
197 else if (slot == 1) {
198 slot1a = bundle.slot1a;
199 slot1b = bundle.slot1b;
200 inst.inst = slot1a + (slot1b << 18);
201 } else if (slot == 2)
202 inst.inst = bundle.slot2;
203
204 /* Integer Load/Store */
205 if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
206 inst_type = SL_INTEGER;
207 size = (inst.M1.x6 & 0x3);
208 if ((inst.M1.x6 >> 2) > 0xb) {
209 /*write*/
210 dir = IOREQ_WRITE;
211 data = vcpu_get_gr(vcpu, inst.M4.r2);
212 } else if ((inst.M1.x6 >> 2) < 0xb) {
213 /*read*/
214 dir = IOREQ_READ;
215 }
216 } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
217 /* Integer Load + Reg update */
218 inst_type = SL_INTEGER;
219 dir = IOREQ_READ;
220 size = (inst.M2.x6 & 0x3);
221 temp = vcpu_get_gr(vcpu, inst.M2.r3);
222 update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
223 temp += update_reg;
224 vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
225 } else if (inst.M3.major == 5) {
226 /*Integer Load/Store + Imm update*/
227 inst_type = SL_INTEGER;
228 size = (inst.M3.x6&0x3);
229 if ((inst.M5.x6 >> 2) > 0xb) {
230 /*write*/
231 dir = IOREQ_WRITE;
232 data = vcpu_get_gr(vcpu, inst.M5.r2);
233 temp = vcpu_get_gr(vcpu, inst.M5.r3);
234 imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
235 (inst.M5.imm7 << 23);
236 temp += imm >> 23;
237 vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
238
239 } else if ((inst.M3.x6 >> 2) < 0xb) {
240 /*read*/
241 dir = IOREQ_READ;
242 temp = vcpu_get_gr(vcpu, inst.M3.r3);
243 imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
244 (inst.M3.imm7 << 23);
245 temp += imm >> 23;
246 vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
247
248 }
249 } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
250 && inst.M9.m == 0 && inst.M9.x == 0) {
251 /* Floating-point spill*/
252 struct ia64_fpreg v;
253
254 inst_type = SL_FLOATING;
255 dir = IOREQ_WRITE;
256 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
257 /* Write high word. FIXME: this is a kludge! */
258 v.u.bits[1] &= 0x3ffff;
259 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
260 data = v.u.bits[0];
261 size = 3;
262 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
263 /* Floating-point spill + Imm update */
264 struct ia64_fpreg v;
265
266 inst_type = SL_FLOATING;
267 dir = IOREQ_WRITE;
268 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
269 temp = vcpu_get_gr(vcpu, inst.M10.r3);
270 imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
271 (inst.M10.imm7 << 23);
272 temp += imm >> 23;
273 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
274
275 /* Write high word.FIXME: this is a kludge! */
276 v.u.bits[1] &= 0x3ffff;
277 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
278 data = v.u.bits[0];
279 size = 3;
280 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
281 /* Floating-point stf8 + Imm update */
282 struct ia64_fpreg v;
283 inst_type = SL_FLOATING;
284 dir = IOREQ_WRITE;
285 size = 3;
286 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
287 data = v.u.bits[0]; /* Significand. */
288 temp = vcpu_get_gr(vcpu, inst.M10.r3);
289 imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
290 (inst.M10.imm7 << 23);
291 temp += imm >> 23;
292 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
293 } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
294 && inst.M15.x6 <= 0x2f) {
295 temp = vcpu_get_gr(vcpu, inst.M15.r3);
296 imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
297 (inst.M15.imm7 << 23);
298 temp += imm >> 23;
299 vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
300
301 vcpu_increment_iip(vcpu);
302 return;
303 } else if (inst.M12.major == 6 && inst.M12.m == 1
304 && inst.M12.x == 1 && inst.M12.x6 == 1) {
305 /* Floating-point Load Pair + Imm ldfp8 M12*/
306 struct ia64_fpreg v;
307
308 inst_type = SL_FLOATING;
309 dir = IOREQ_READ;
310 size = 8; /*ldfd*/
311 mmio_access(vcpu, padr, &data, size, ma, dir);
312 v.u.bits[0] = data;
313 v.u.bits[1] = 0x1003E;
314 vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
315 padr += 8;
316 mmio_access(vcpu, padr, &data, size, ma, dir);
317 v.u.bits[0] = data;
318 v.u.bits[1] = 0x1003E;
319 vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
320 padr += 8;
321 vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
322 vcpu_increment_iip(vcpu);
323 return;
324 } else {
325 inst_type = -1;
326 panic_vm(vcpu);
327 }
328
329 size = 1 << size;
330 if (dir == IOREQ_WRITE) {
331 mmio_access(vcpu, padr, &data, size, ma, dir);
332 } else {
333 mmio_access(vcpu, padr, &data, size, ma, dir);
334 if (inst_type == SL_INTEGER)
335 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
336 else
337 panic_vm(vcpu);
338
339 }
340 vcpu_increment_iip(vcpu);
341}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
new file mode 100644
index 000000000000..e4f15d641b22
--- /dev/null
+++ b/arch/ia64/kvm/optvfault.S
@@ -0,0 +1,918 @@
1/*
2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
4 *
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 */
8
9#include <asm/asmmacro.h>
10#include <asm/processor.h>
11
12#include "vti.h"
13#include "asm-offsets.h"
14
15#define ACCE_MOV_FROM_AR
16#define ACCE_MOV_FROM_RR
17#define ACCE_MOV_TO_RR
18#define ACCE_RSM
19#define ACCE_SSM
20#define ACCE_MOV_TO_PSR
21#define ACCE_THASH
22
23//mov r1=ar3
24GLOBAL_ENTRY(kvm_asm_mov_from_ar)
25#ifndef ACCE_MOV_FROM_AR
26 br.many kvm_virtualization_fault_back
27#endif
28 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
29 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
30 extr.u r17=r25,6,7
31 ;;
32 ld8 r18=[r18]
33 mov r19=ar.itc
34 mov r24=b0
35 ;;
36 add r19=r19,r18
37 addl r20=@gprel(asm_mov_to_reg),gp
38 ;;
39 st8 [r16] = r19
40 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
41 shladd r17=r17,4,r20
42 ;;
43 mov b0=r17
44 br.sptk.few b0
45 ;;
46END(kvm_asm_mov_from_ar)
47
48
49// mov r1=rr[r3]
50GLOBAL_ENTRY(kvm_asm_mov_from_rr)
51#ifndef ACCE_MOV_FROM_RR
52 br.many kvm_virtualization_fault_back
53#endif
54 extr.u r16=r25,20,7
55 extr.u r17=r25,6,7
56 addl r20=@gprel(asm_mov_from_reg),gp
57 ;;
58 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
59 shladd r16=r16,4,r20
60 mov r24=b0
61 ;;
62 add r27=VMM_VCPU_VRR0_OFFSET,r21
63 mov b0=r16
64 br.many b0
65 ;;
66kvm_asm_mov_from_rr_back_1:
67 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
68 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
69 shr.u r26=r19,61
70 ;;
71 shladd r17=r17,4,r22
72 shladd r27=r26,3,r27
73 ;;
74 ld8 r19=[r27]
75 mov b0=r17
76 br.many b0
77END(kvm_asm_mov_from_rr)
78
79
80// mov rr[r3]=r2
81GLOBAL_ENTRY(kvm_asm_mov_to_rr)
82#ifndef ACCE_MOV_TO_RR
83 br.many kvm_virtualization_fault_back
84#endif
85 extr.u r16=r25,20,7
86 extr.u r17=r25,13,7
87 addl r20=@gprel(asm_mov_from_reg),gp
88 ;;
89 adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
90 shladd r16=r16,4,r20
91 mov r22=b0
92 ;;
93 add r27=VMM_VCPU_VRR0_OFFSET,r21
94 mov b0=r16
95 br.many b0
96 ;;
97kvm_asm_mov_to_rr_back_1:
98 adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
99 shr.u r23=r19,61
100 shladd r17=r17,4,r20
101 ;;
102 //if rr6, go back
103 cmp.eq p6,p0=6,r23
104 mov b0=r22
105 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
106 ;;
107 mov r28=r19
108 mov b0=r17
109 br.many b0
110kvm_asm_mov_to_rr_back_2:
111 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
112 shladd r27=r23,3,r27
113 ;; // vrr.rid<<4 |0xe
114 st8 [r27]=r19
115 mov b0=r30
116 ;;
117 extr.u r16=r19,8,26
118 extr.u r18 =r19,2,6
119 mov r17 =0xe
120 ;;
121 shladd r16 = r16, 4, r17
122 extr.u r19 =r19,0,8
123 ;;
124 shl r16 = r16,8
125 ;;
126 add r19 = r19, r16
127 ;; //set ve 1
128 dep r19=-1,r19,0,1
129 cmp.lt p6,p0=14,r18
130 ;;
131 (p6) mov r18=14
132 ;;
133 (p6) dep r19=r18,r19,2,6
134 ;;
135 cmp.eq p6,p0=0,r23
136 ;;
137 cmp.eq.or p6,p0=4,r23
138 ;;
139 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
140 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
141 ;;
142 ld4 r16=[r16]
143 cmp.eq p7,p0=r0,r0
144 (p6) shladd r17=r23,1,r17
145 ;;
146 (p6) st8 [r17]=r19
147 (p6) tbit.nz p6,p7=r16,0
148 ;;
149 (p7) mov rr[r28]=r19
150 mov r24=r22
151 br.many b0
152END(kvm_asm_mov_to_rr)
153
154
155//rsm
156GLOBAL_ENTRY(kvm_asm_rsm)
157#ifndef ACCE_RSM
158 br.many kvm_virtualization_fault_back
159#endif
160 add r16=VMM_VPD_BASE_OFFSET,r21
161 extr.u r26=r25,6,21
162 extr.u r27=r25,31,2
163 ;;
164 ld8 r16=[r16]
165 extr.u r28=r25,36,1
166 dep r26=r27,r26,21,2
167 ;;
168 add r17=VPD_VPSR_START_OFFSET,r16
169 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
170 //r26 is imm24
171 dep r26=r28,r26,23,1
172 ;;
173 ld8 r18=[r17]
174 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
175 ld4 r23=[r22]
176 sub r27=-1,r26
177 mov r24=b0
178 ;;
179 mov r20=cr.ipsr
180 or r28=r27,r28
181 and r19=r18,r27
182 ;;
183 st8 [r17]=r19
184 and r20=r20,r28
185 /* Comment it out due to short of fp lazy alorgithm support
186 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
187 ;;
188 ld8 r27=[r27]
189 ;;
190 tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
191 ;;
192 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
193 */
194 ;;
195 mov cr.ipsr=r20
196 tbit.nz p6,p0=r23,0
197 ;;
198 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
199 (p6) br.dptk kvm_resume_to_guest
200 ;;
201 add r26=VMM_VCPU_META_RR0_OFFSET,r21
202 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
203 dep r23=-1,r23,0,1
204 ;;
205 ld8 r26=[r26]
206 ld8 r27=[r27]
207 st4 [r22]=r23
208 dep.z r28=4,61,3
209 ;;
210 mov rr[r0]=r26
211 ;;
212 mov rr[r28]=r27
213 ;;
214 srlz.d
215 br.many kvm_resume_to_guest
216END(kvm_asm_rsm)
217
218
219//ssm
220GLOBAL_ENTRY(kvm_asm_ssm)
221#ifndef ACCE_SSM
222 br.many kvm_virtualization_fault_back
223#endif
224 add r16=VMM_VPD_BASE_OFFSET,r21
225 extr.u r26=r25,6,21
226 extr.u r27=r25,31,2
227 ;;
228 ld8 r16=[r16]
229 extr.u r28=r25,36,1
230 dep r26=r27,r26,21,2
231 ;; //r26 is imm24
232 add r27=VPD_VPSR_START_OFFSET,r16
233 dep r26=r28,r26,23,1
234 ;; //r19 vpsr
235 ld8 r29=[r27]
236 mov r24=b0
237 ;;
238 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
239 mov r20=cr.ipsr
240 or r19=r29,r26
241 ;;
242 ld4 r23=[r22]
243 st8 [r27]=r19
244 or r20=r20,r26
245 ;;
246 mov cr.ipsr=r20
247 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
248 ;;
249 and r19=r28,r19
250 tbit.z p6,p0=r23,0
251 ;;
252 cmp.ne.or p6,p0=r28,r19
253 (p6) br.dptk kvm_asm_ssm_1
254 ;;
255 add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
256 add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
257 dep r23=0,r23,0,1
258 ;;
259 ld8 r26=[r26]
260 ld8 r27=[r27]
261 st4 [r22]=r23
262 dep.z r28=4,61,3
263 ;;
264 mov rr[r0]=r26
265 ;;
266 mov rr[r28]=r27
267 ;;
268 srlz.d
269 ;;
270kvm_asm_ssm_1:
271 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
272 ;;
273 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
274 (p6) br.dptk kvm_resume_to_guest
275 ;;
276 add r29=VPD_VTPR_START_OFFSET,r16
277 add r30=VPD_VHPI_START_OFFSET,r16
278 ;;
279 ld8 r29=[r29]
280 ld8 r30=[r30]
281 ;;
282 extr.u r17=r29,4,4
283 extr.u r18=r29,16,1
284 ;;
285 dep r17=r18,r17,4,1
286 ;;
287 cmp.gt p6,p0=r30,r17
288 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
289 br.many kvm_resume_to_guest
290END(kvm_asm_ssm)
291
292
293//mov psr.l=r2
294GLOBAL_ENTRY(kvm_asm_mov_to_psr)
295#ifndef ACCE_MOV_TO_PSR
296 br.many kvm_virtualization_fault_back
297#endif
298 add r16=VMM_VPD_BASE_OFFSET,r21
299 extr.u r26=r25,13,7 //r2
300 ;;
301 ld8 r16=[r16]
302 addl r20=@gprel(asm_mov_from_reg),gp
303 ;;
304 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
305 shladd r26=r26,4,r20
306 mov r24=b0
307 ;;
308 add r27=VPD_VPSR_START_OFFSET,r16
309 mov b0=r26
310 br.many b0
311 ;;
312kvm_asm_mov_to_psr_back:
313 ld8 r17=[r27]
314 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
315 dep r19=0,r19,32,32
316 ;;
317 ld4 r23=[r22]
318 dep r18=0,r17,0,32
319 ;;
320 add r30=r18,r19
321 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
322 ;;
323 st8 [r27]=r30
324 and r27=r28,r30
325 and r29=r28,r17
326 ;;
327 cmp.eq p5,p0=r29,r27
328 cmp.eq p6,p7=r28,r27
329 (p5) br.many kvm_asm_mov_to_psr_1
330 ;;
331 //virtual to physical
332 (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
333 (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
334 (p7) dep r23=-1,r23,0,1
335 ;;
336 //physical to virtual
337 (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
338 (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
339 (p6) dep r23=0,r23,0,1
340 ;;
341 ld8 r26=[r26]
342 ld8 r27=[r27]
343 st4 [r22]=r23
344 dep.z r28=4,61,3
345 ;;
346 mov rr[r0]=r26
347 ;;
348 mov rr[r28]=r27
349 ;;
350 srlz.d
351 ;;
352kvm_asm_mov_to_psr_1:
353 mov r20=cr.ipsr
354 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
355 ;;
356 or r19=r19,r28
357 dep r20=0,r20,0,32
358 ;;
359 add r20=r19,r20
360 mov b0=r24
361 ;;
362 /* Comment it out due to short of fp lazy algorithm support
363 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
364 ;;
365 ld8 r27=[r27]
366 ;;
367 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
368 ;;
369 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
370 ;;
371 */
372 mov cr.ipsr=r20
373 cmp.ne p6,p0=r0,r0
374 ;;
375 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
376 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
377 (p6) br.dpnt.few kvm_resume_to_guest
378 ;;
379 add r29=VPD_VTPR_START_OFFSET,r16
380 add r30=VPD_VHPI_START_OFFSET,r16
381 ;;
382 ld8 r29=[r29]
383 ld8 r30=[r30]
384 ;;
385 extr.u r17=r29,4,4
386 extr.u r18=r29,16,1
387 ;;
388 dep r17=r18,r17,4,1
389 ;;
390 cmp.gt p6,p0=r30,r17
391 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
392 br.many kvm_resume_to_guest
393END(kvm_asm_mov_to_psr)
394
395
396ENTRY(kvm_asm_dispatch_vexirq)
397//increment iip
398 mov r16=cr.ipsr
399 ;;
400 extr.u r17=r16,IA64_PSR_RI_BIT,2
401 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
402 ;;
403 (p6) mov r18=cr.iip
404 (p6) mov r17=r0
405 (p7) add r17=1,r17
406 ;;
407 (p6) add r18=0x10,r18
408 dep r16=r17,r16,IA64_PSR_RI_BIT,2
409 ;;
410 (p6) mov cr.iip=r18
411 mov cr.ipsr=r16
412 mov r30 =1
413 br.many kvm_dispatch_vexirq
414END(kvm_asm_dispatch_vexirq)
415
416// thash
417// TODO: add support when pta.vf = 1
418GLOBAL_ENTRY(kvm_asm_thash)
419#ifndef ACCE_THASH
420 br.many kvm_virtualization_fault_back
421#endif
422 extr.u r17=r25,20,7 // get r3 from opcode in r25
423 extr.u r18=r25,6,7 // get r1 from opcode in r25
424 addl r20=@gprel(asm_mov_from_reg),gp
425 ;;
426 adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
427 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
428 adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
429 ;;
430 mov r24=b0
431 ;;
432 ld8 r16=[r16] // get VPD addr
433 mov b0=r17
434 br.many b0 // r19 return value
435 ;;
436kvm_asm_thash_back1:
437 shr.u r23=r19,61 // get RR number
438 adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
439 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
440 ;;
441 shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
442 ld8 r17=[r16] // get PTA
443 mov r26=1
444 ;;
445 extr.u r29=r17,2,6 // get pta.size
446 ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
447 ;;
448 extr.u r25=r25,2,6 // get rr.ps
449 shl r22=r26,r29 // 1UL << pta.size
450 ;;
451 shr.u r23=r19,r25 // vaddr >> rr.ps
452 adds r26=3,r29 // pta.size + 3
453 shl r27=r17,3 // pta << 3
454 ;;
455 shl r23=r23,3 // (vaddr >> rr.ps) << 3
456 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
457 movl r16=7<<61
458 ;;
459 adds r22=-1,r22 // (1UL << pta.size) - 1
460 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
461 and r19=r19,r16 // vaddr & VRN_MASK
462 ;;
463 and r22=r22,r23 // vhpt_offset
464 or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
465 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
466 ;;
467 or r19=r19,r22 // calc pval
468 shladd r17=r18,4,r26
469 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
470 ;;
471 mov b0=r17
472 br.many b0
473END(kvm_asm_thash)
474
475#define MOV_TO_REG0 \
476{; \
477 nop.b 0x0; \
478 nop.b 0x0; \
479 nop.b 0x0; \
480 ;; \
481};
482
483
484#define MOV_TO_REG(n) \
485{; \
486 mov r##n##=r19; \
487 mov b0=r30; \
488 br.sptk.many b0; \
489 ;; \
490};
491
492
493#define MOV_FROM_REG(n) \
494{; \
495 mov r19=r##n##; \
496 mov b0=r30; \
497 br.sptk.many b0; \
498 ;; \
499};
500
501
502#define MOV_TO_BANK0_REG(n) \
503ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
504{; \
505 mov r26=r2; \
506 mov r2=r19; \
507 bsw.1; \
508 ;; \
509}; \
510{; \
511 mov r##n##=r2; \
512 nop.b 0x0; \
513 bsw.0; \
514 ;; \
515}; \
516{; \
517 mov r2=r26; \
518 mov b0=r30; \
519 br.sptk.many b0; \
520 ;; \
521}; \
522END(asm_mov_to_bank0_reg##n##)
523
524
525#define MOV_FROM_BANK0_REG(n) \
526ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
527{; \
528 mov r26=r2; \
529 nop.b 0x0; \
530 bsw.1; \
531 ;; \
532}; \
533{; \
534 mov r2=r##n##; \
535 nop.b 0x0; \
536 bsw.0; \
537 ;; \
538}; \
539{; \
540 mov r19=r2; \
541 mov r2=r26; \
542 mov b0=r30; \
543}; \
544{; \
545 nop.b 0x0; \
546 nop.b 0x0; \
547 br.sptk.many b0; \
548 ;; \
549}; \
550END(asm_mov_from_bank0_reg##n##)
551
552
553#define JMP_TO_MOV_TO_BANK0_REG(n) \
554{; \
555 nop.b 0x0; \
556 nop.b 0x0; \
557 br.sptk.many asm_mov_to_bank0_reg##n##; \
558 ;; \
559}
560
561
562#define JMP_TO_MOV_FROM_BANK0_REG(n) \
563{; \
564 nop.b 0x0; \
565 nop.b 0x0; \
566 br.sptk.many asm_mov_from_bank0_reg##n##; \
567 ;; \
568}
569
570
571MOV_FROM_BANK0_REG(16)
572MOV_FROM_BANK0_REG(17)
573MOV_FROM_BANK0_REG(18)
574MOV_FROM_BANK0_REG(19)
575MOV_FROM_BANK0_REG(20)
576MOV_FROM_BANK0_REG(21)
577MOV_FROM_BANK0_REG(22)
578MOV_FROM_BANK0_REG(23)
579MOV_FROM_BANK0_REG(24)
580MOV_FROM_BANK0_REG(25)
581MOV_FROM_BANK0_REG(26)
582MOV_FROM_BANK0_REG(27)
583MOV_FROM_BANK0_REG(28)
584MOV_FROM_BANK0_REG(29)
585MOV_FROM_BANK0_REG(30)
586MOV_FROM_BANK0_REG(31)
587
588
589// mov from reg table
590ENTRY(asm_mov_from_reg)
591 MOV_FROM_REG(0)
592 MOV_FROM_REG(1)
593 MOV_FROM_REG(2)
594 MOV_FROM_REG(3)
595 MOV_FROM_REG(4)
596 MOV_FROM_REG(5)
597 MOV_FROM_REG(6)
598 MOV_FROM_REG(7)
599 MOV_FROM_REG(8)
600 MOV_FROM_REG(9)
601 MOV_FROM_REG(10)
602 MOV_FROM_REG(11)
603 MOV_FROM_REG(12)
604 MOV_FROM_REG(13)
605 MOV_FROM_REG(14)
606 MOV_FROM_REG(15)
607 JMP_TO_MOV_FROM_BANK0_REG(16)
608 JMP_TO_MOV_FROM_BANK0_REG(17)
609 JMP_TO_MOV_FROM_BANK0_REG(18)
610 JMP_TO_MOV_FROM_BANK0_REG(19)
611 JMP_TO_MOV_FROM_BANK0_REG(20)
612 JMP_TO_MOV_FROM_BANK0_REG(21)
613 JMP_TO_MOV_FROM_BANK0_REG(22)
614 JMP_TO_MOV_FROM_BANK0_REG(23)
615 JMP_TO_MOV_FROM_BANK0_REG(24)
616 JMP_TO_MOV_FROM_BANK0_REG(25)
617 JMP_TO_MOV_FROM_BANK0_REG(26)
618 JMP_TO_MOV_FROM_BANK0_REG(27)
619 JMP_TO_MOV_FROM_BANK0_REG(28)
620 JMP_TO_MOV_FROM_BANK0_REG(29)
621 JMP_TO_MOV_FROM_BANK0_REG(30)
622 JMP_TO_MOV_FROM_BANK0_REG(31)
623 MOV_FROM_REG(32)
624 MOV_FROM_REG(33)
625 MOV_FROM_REG(34)
626 MOV_FROM_REG(35)
627 MOV_FROM_REG(36)
628 MOV_FROM_REG(37)
629 MOV_FROM_REG(38)
630 MOV_FROM_REG(39)
631 MOV_FROM_REG(40)
632 MOV_FROM_REG(41)
633 MOV_FROM_REG(42)
634 MOV_FROM_REG(43)
635 MOV_FROM_REG(44)
636 MOV_FROM_REG(45)
637 MOV_FROM_REG(46)
638 MOV_FROM_REG(47)
639 MOV_FROM_REG(48)
640 MOV_FROM_REG(49)
641 MOV_FROM_REG(50)
642 MOV_FROM_REG(51)
643 MOV_FROM_REG(52)
644 MOV_FROM_REG(53)
645 MOV_FROM_REG(54)
646 MOV_FROM_REG(55)
647 MOV_FROM_REG(56)
648 MOV_FROM_REG(57)
649 MOV_FROM_REG(58)
650 MOV_FROM_REG(59)
651 MOV_FROM_REG(60)
652 MOV_FROM_REG(61)
653 MOV_FROM_REG(62)
654 MOV_FROM_REG(63)
655 MOV_FROM_REG(64)
656 MOV_FROM_REG(65)
657 MOV_FROM_REG(66)
658 MOV_FROM_REG(67)
659 MOV_FROM_REG(68)
660 MOV_FROM_REG(69)
661 MOV_FROM_REG(70)
662 MOV_FROM_REG(71)
663 MOV_FROM_REG(72)
664 MOV_FROM_REG(73)
665 MOV_FROM_REG(74)
666 MOV_FROM_REG(75)
667 MOV_FROM_REG(76)
668 MOV_FROM_REG(77)
669 MOV_FROM_REG(78)
670 MOV_FROM_REG(79)
671 MOV_FROM_REG(80)
672 MOV_FROM_REG(81)
673 MOV_FROM_REG(82)
674 MOV_FROM_REG(83)
675 MOV_FROM_REG(84)
676 MOV_FROM_REG(85)
677 MOV_FROM_REG(86)
678 MOV_FROM_REG(87)
679 MOV_FROM_REG(88)
680 MOV_FROM_REG(89)
681 MOV_FROM_REG(90)
682 MOV_FROM_REG(91)
683 MOV_FROM_REG(92)
684 MOV_FROM_REG(93)
685 MOV_FROM_REG(94)
686 MOV_FROM_REG(95)
687 MOV_FROM_REG(96)
688 MOV_FROM_REG(97)
689 MOV_FROM_REG(98)
690 MOV_FROM_REG(99)
691 MOV_FROM_REG(100)
692 MOV_FROM_REG(101)
693 MOV_FROM_REG(102)
694 MOV_FROM_REG(103)
695 MOV_FROM_REG(104)
696 MOV_FROM_REG(105)
697 MOV_FROM_REG(106)
698 MOV_FROM_REG(107)
699 MOV_FROM_REG(108)
700 MOV_FROM_REG(109)
701 MOV_FROM_REG(110)
702 MOV_FROM_REG(111)
703 MOV_FROM_REG(112)
704 MOV_FROM_REG(113)
705 MOV_FROM_REG(114)
706 MOV_FROM_REG(115)
707 MOV_FROM_REG(116)
708 MOV_FROM_REG(117)
709 MOV_FROM_REG(118)
710 MOV_FROM_REG(119)
711 MOV_FROM_REG(120)
712 MOV_FROM_REG(121)
713 MOV_FROM_REG(122)
714 MOV_FROM_REG(123)
715 MOV_FROM_REG(124)
716 MOV_FROM_REG(125)
717 MOV_FROM_REG(126)
718 MOV_FROM_REG(127)
719END(asm_mov_from_reg)
720
721
722/* must be in bank 0
723 * parameter:
724 * r31: pr
725 * r24: b0
726 */
727ENTRY(kvm_resume_to_guest)
728 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
729 ;;
730 ld8 r1 =[r16]
731 adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
732 ;;
733 mov r16=cr.ipsr
734 ;;
735 ld8 r20 = [r20]
736 adds r19=VMM_VPD_BASE_OFFSET,r21
737 ;;
738 ld8 r25=[r19]
739 extr.u r17=r16,IA64_PSR_RI_BIT,2
740 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
741 ;;
742 (p6) mov r18=cr.iip
743 (p6) mov r17=r0
744 ;;
745 (p6) add r18=0x10,r18
746 (p7) add r17=1,r17
747 ;;
748 (p6) mov cr.iip=r18
749 dep r16=r17,r16,IA64_PSR_RI_BIT,2
750 ;;
751 mov cr.ipsr=r16
752 adds r19= VPD_VPSR_START_OFFSET,r25
753 add r28=PAL_VPS_RESUME_NORMAL,r20
754 add r29=PAL_VPS_RESUME_HANDLER,r20
755 ;;
756 ld8 r19=[r19]
757 mov b0=r29
758 cmp.ne p6,p7 = r0,r0
759 ;;
760 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
761 ;;
762 (p6) ld8 r26=[r25]
763 (p7) mov b0=r28
764 mov pr=r31,-2
765 br.sptk.many b0 // call pal service
766 ;;
767END(kvm_resume_to_guest)
768
769
770MOV_TO_BANK0_REG(16)
771MOV_TO_BANK0_REG(17)
772MOV_TO_BANK0_REG(18)
773MOV_TO_BANK0_REG(19)
774MOV_TO_BANK0_REG(20)
775MOV_TO_BANK0_REG(21)
776MOV_TO_BANK0_REG(22)
777MOV_TO_BANK0_REG(23)
778MOV_TO_BANK0_REG(24)
779MOV_TO_BANK0_REG(25)
780MOV_TO_BANK0_REG(26)
781MOV_TO_BANK0_REG(27)
782MOV_TO_BANK0_REG(28)
783MOV_TO_BANK0_REG(29)
784MOV_TO_BANK0_REG(30)
785MOV_TO_BANK0_REG(31)
786
787
788// mov to reg table
789ENTRY(asm_mov_to_reg)
790 MOV_TO_REG0
791 MOV_TO_REG(1)
792 MOV_TO_REG(2)
793 MOV_TO_REG(3)
794 MOV_TO_REG(4)
795 MOV_TO_REG(5)
796 MOV_TO_REG(6)
797 MOV_TO_REG(7)
798 MOV_TO_REG(8)
799 MOV_TO_REG(9)
800 MOV_TO_REG(10)
801 MOV_TO_REG(11)
802 MOV_TO_REG(12)
803 MOV_TO_REG(13)
804 MOV_TO_REG(14)
805 MOV_TO_REG(15)
806 JMP_TO_MOV_TO_BANK0_REG(16)
807 JMP_TO_MOV_TO_BANK0_REG(17)
808 JMP_TO_MOV_TO_BANK0_REG(18)
809 JMP_TO_MOV_TO_BANK0_REG(19)
810 JMP_TO_MOV_TO_BANK0_REG(20)
811 JMP_TO_MOV_TO_BANK0_REG(21)
812 JMP_TO_MOV_TO_BANK0_REG(22)
813 JMP_TO_MOV_TO_BANK0_REG(23)
814 JMP_TO_MOV_TO_BANK0_REG(24)
815 JMP_TO_MOV_TO_BANK0_REG(25)
816 JMP_TO_MOV_TO_BANK0_REG(26)
817 JMP_TO_MOV_TO_BANK0_REG(27)
818 JMP_TO_MOV_TO_BANK0_REG(28)
819 JMP_TO_MOV_TO_BANK0_REG(29)
820 JMP_TO_MOV_TO_BANK0_REG(30)
821 JMP_TO_MOV_TO_BANK0_REG(31)
822 MOV_TO_REG(32)
823 MOV_TO_REG(33)
824 MOV_TO_REG(34)
825 MOV_TO_REG(35)
826 MOV_TO_REG(36)
827 MOV_TO_REG(37)
828 MOV_TO_REG(38)
829 MOV_TO_REG(39)
830 MOV_TO_REG(40)
831 MOV_TO_REG(41)
832 MOV_TO_REG(42)
833 MOV_TO_REG(43)
834 MOV_TO_REG(44)
835 MOV_TO_REG(45)
836 MOV_TO_REG(46)
837 MOV_TO_REG(47)
838 MOV_TO_REG(48)
839 MOV_TO_REG(49)
840 MOV_TO_REG(50)
841 MOV_TO_REG(51)
842 MOV_TO_REG(52)
843 MOV_TO_REG(53)
844 MOV_TO_REG(54)
845 MOV_TO_REG(55)
846 MOV_TO_REG(56)
847 MOV_TO_REG(57)
848 MOV_TO_REG(58)
849 MOV_TO_REG(59)
850 MOV_TO_REG(60)
851 MOV_TO_REG(61)
852 MOV_TO_REG(62)
853 MOV_TO_REG(63)
854 MOV_TO_REG(64)
855 MOV_TO_REG(65)
856 MOV_TO_REG(66)
857 MOV_TO_REG(67)
858 MOV_TO_REG(68)
859 MOV_TO_REG(69)
860 MOV_TO_REG(70)
861 MOV_TO_REG(71)
862 MOV_TO_REG(72)
863 MOV_TO_REG(73)
864 MOV_TO_REG(74)
865 MOV_TO_REG(75)
866 MOV_TO_REG(76)
867 MOV_TO_REG(77)
868 MOV_TO_REG(78)
869 MOV_TO_REG(79)
870 MOV_TO_REG(80)
871 MOV_TO_REG(81)
872 MOV_TO_REG(82)
873 MOV_TO_REG(83)
874 MOV_TO_REG(84)
875 MOV_TO_REG(85)
876 MOV_TO_REG(86)
877 MOV_TO_REG(87)
878 MOV_TO_REG(88)
879 MOV_TO_REG(89)
880 MOV_TO_REG(90)
881 MOV_TO_REG(91)
882 MOV_TO_REG(92)
883 MOV_TO_REG(93)
884 MOV_TO_REG(94)
885 MOV_TO_REG(95)
886 MOV_TO_REG(96)
887 MOV_TO_REG(97)
888 MOV_TO_REG(98)
889 MOV_TO_REG(99)
890 MOV_TO_REG(100)
891 MOV_TO_REG(101)
892 MOV_TO_REG(102)
893 MOV_TO_REG(103)
894 MOV_TO_REG(104)
895 MOV_TO_REG(105)
896 MOV_TO_REG(106)
897 MOV_TO_REG(107)
898 MOV_TO_REG(108)
899 MOV_TO_REG(109)
900 MOV_TO_REG(110)
901 MOV_TO_REG(111)
902 MOV_TO_REG(112)
903 MOV_TO_REG(113)
904 MOV_TO_REG(114)
905 MOV_TO_REG(115)
906 MOV_TO_REG(116)
907 MOV_TO_REG(117)
908 MOV_TO_REG(118)
909 MOV_TO_REG(119)
910 MOV_TO_REG(120)
911 MOV_TO_REG(121)
912 MOV_TO_REG(122)
913 MOV_TO_REG(123)
914 MOV_TO_REG(124)
915 MOV_TO_REG(125)
916 MOV_TO_REG(126)
917 MOV_TO_REG(127)
918END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
new file mode 100644
index 000000000000..5a33f7ed29a0
--- /dev/null
+++ b/arch/ia64/kvm/process.c
@@ -0,0 +1,970 @@
1/*
2 * process.c: handle interruption inject for guests.
3 * Copyright (c) 2005, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang (xiantao.zhang@intel.com)
22 */
23#include "vcpu.h"
24
25#include <asm/pal.h>
26#include <asm/sal.h>
27#include <asm/fpswa.h>
28#include <asm/kregs.h>
29#include <asm/tlb.h>
30
31fpswa_interface_t *vmm_fpswa_interface;
32
33#define IA64_VHPT_TRANS_VECTOR 0x0000
34#define IA64_INST_TLB_VECTOR 0x0400
35#define IA64_DATA_TLB_VECTOR 0x0800
36#define IA64_ALT_INST_TLB_VECTOR 0x0c00
37#define IA64_ALT_DATA_TLB_VECTOR 0x1000
38#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
39#define IA64_INST_KEY_MISS_VECTOR 0x1800
40#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
41#define IA64_DIRTY_BIT_VECTOR 0x2000
42#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
43#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
44#define IA64_BREAK_VECTOR 0x2c00
45#define IA64_EXTINT_VECTOR 0x3000
46#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
47#define IA64_KEY_PERMISSION_VECTOR 0x5100
48#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
49#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
50#define IA64_GENEX_VECTOR 0x5400
51#define IA64_DISABLED_FPREG_VECTOR 0x5500
52#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
53#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
54#define IA64_DEBUG_VECTOR 0x5900
55#define IA64_UNALIGNED_REF_VECTOR 0x5a00
56#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
57#define IA64_FP_FAULT_VECTOR 0x5c00
58#define IA64_FP_TRAP_VECTOR 0x5d00
59#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
60#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
61#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
62
63/* SDM vol2 5.5 - IVA based interruption handling */
64#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
65 IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
66 IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
67
68#define DOMN_PAL_REQUEST 0x110000
69#define DOMN_SAL_REQUEST 0x110001
70
71static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
72 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
73 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
74 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
75 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
76 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
77 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
78 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
79};
80
81static void collect_interruption(struct kvm_vcpu *vcpu)
82{
83 u64 ipsr;
84 u64 vdcr;
85 u64 vifs;
86 unsigned long vpsr;
87 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
88
89 vpsr = vcpu_get_psr(vcpu);
90 vcpu_bsw0(vcpu);
91 if (vpsr & IA64_PSR_IC) {
92
93 /* Sync mpsr id/da/dd/ss/ed bits to vipsr
94 * since after guest do rfi, we still want these bits on in
95 * mpsr
96 */
97
98 ipsr = regs->cr_ipsr;
99 vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
100 | IA64_PSR_DD | IA64_PSR_SS
101 | IA64_PSR_ED));
102 vcpu_set_ipsr(vcpu, vpsr);
103
104 /* Currently, for trap, we do not advance IIP to next
105 * instruction. That's because we assume caller already
106 * set up IIP correctly
107 */
108
109 vcpu_set_iip(vcpu , regs->cr_iip);
110
111 /* set vifs.v to zero */
112 vifs = VCPU(vcpu, ifs);
113 vifs &= ~IA64_IFS_V;
114 vcpu_set_ifs(vcpu, vifs);
115
116 vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
117 }
118
119 vdcr = VCPU(vcpu, dcr);
120
121 /* Set guest psr
122 * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
123 * be: set to the value of dcr.be
124 * pp: set to the value of dcr.pp
125 */
126 vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
127 vpsr |= (vdcr & IA64_DCR_BE);
128
129 /* VDCR pp bit position is different from VPSR pp bit */
130 if (vdcr & IA64_DCR_PP) {
131 vpsr |= IA64_PSR_PP;
132 } else {
133 vpsr &= ~IA64_PSR_PP;;
134 }
135
136 vcpu_set_psr(vcpu, vpsr);
137
138}
139
140void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
141{
142 u64 viva;
143 struct kvm_pt_regs *regs;
144 union ia64_isr pt_isr;
145
146 regs = vcpu_regs(vcpu);
147
148 /* clear cr.isr.ir (incomplete register frame)*/
149 pt_isr.val = VMX(vcpu, cr_isr);
150 pt_isr.ir = 0;
151 VMX(vcpu, cr_isr) = pt_isr.val;
152
153 collect_interruption(vcpu);
154
155 viva = vcpu_get_iva(vcpu);
156 regs->cr_iip = viva + vec;
157}
158
159static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
160{
161 union ia64_rr rr, rr1;
162
163 rr.val = vcpu_get_rr(vcpu, ifa);
164 rr1.val = 0;
165 rr1.ps = rr.ps;
166 rr1.rid = rr.rid;
167 return (rr1.val);
168}
169
170
171/*
172 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173 * Parameter:
174 * set_ifa: if true, set vIFA
175 * set_itir: if true, set vITIR
176 * set_iha: if true, set vIHA
177 */
178void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
179 int set_ifa, int set_itir, int set_iha)
180{
181 long vpsr;
182 u64 value;
183
184 vpsr = VCPU(vcpu, vpsr);
185 /* Vol2, Table 8-1 */
186 if (vpsr & IA64_PSR_IC) {
187 if (set_ifa)
188 vcpu_set_ifa(vcpu, vadr);
189 if (set_itir) {
190 value = vcpu_get_itir_on_fault(vcpu, vadr);
191 vcpu_set_itir(vcpu, value);
192 }
193
194 if (set_iha) {
195 value = vcpu_thash(vcpu, vadr);
196 vcpu_set_iha(vcpu, value);
197 }
198 }
199}
200
201/*
202 * Data TLB Fault
203 * @ Data TLB vector
204 * Refer to SDM Vol2 Table 5-6 & 8-1
205 */
206void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
207{
208 /* If vPSR.ic, IFA, ITIR, IHA */
209 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
210 inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
211}
212
213/*
214 * Instruction TLB Fault
215 * @ Instruction TLB vector
216 * Refer to SDM Vol2 Table 5-6 & 8-1
217 */
218void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
219{
220 /* If vPSR.ic, IFA, ITIR, IHA */
221 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
222 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223}
224
225
226
227/*
228 * Data Nested TLB Fault
229 * @ Data Nested TLB Vector
230 * Refer to SDM Vol2 Table 5-6 & 8-1
231 */
232void nested_dtlb(struct kvm_vcpu *vcpu)
233{
234 inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
235}
236
237/*
238 * Alternate Data TLB Fault
239 * @ Alternate Data TLB vector
240 * Refer to SDM Vol2 Table 5-6 & 8-1
241 */
242void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
243{
244 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
245 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246}
247
248
249/*
250 * Data TLB Fault
251 * @ Data TLB vector
252 * Refer to SDM Vol2 Table 5-6 & 8-1
253 */
254void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
255{
256 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
257 inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
258}
259
260/* Deal with:
261 * VHPT Translation Vector
262 */
263static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
264{
265 /* If vPSR.ic, IFA, ITIR, IHA*/
266 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270}
271
272/*
273 * VHPT Instruction Fault
274 * @ VHPT Translation vector
275 * Refer to SDM Vol2 Table 5-6 & 8-1
276 */
277void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
278{
279 _vhpt_fault(vcpu, vadr);
280}
281
282
283/*
284 * VHPT Data Fault
285 * @ VHPT Translation vector
286 * Refer to SDM Vol2 Table 5-6 & 8-1
287 */
288void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
289{
290 _vhpt_fault(vcpu, vadr);
291}
292
293
294
295/*
296 * Deal with:
297 * General Exception vector
298 */
299void _general_exception(struct kvm_vcpu *vcpu)
300{
301 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302}
303
304
305/*
306 * Illegal Operation Fault
307 * @ General Exception Vector
308 * Refer to SDM Vol2 Table 5-6 & 8-1
309 */
310void illegal_op(struct kvm_vcpu *vcpu)
311{
312 _general_exception(vcpu);
313}
314
315/*
316 * Illegal Dependency Fault
317 * @ General Exception Vector
318 * Refer to SDM Vol2 Table 5-6 & 8-1
319 */
320void illegal_dep(struct kvm_vcpu *vcpu)
321{
322 _general_exception(vcpu);
323}
324
325/*
326 * Reserved Register/Field Fault
327 * @ General Exception Vector
328 * Refer to SDM Vol2 Table 5-6 & 8-1
329 */
330void rsv_reg_field(struct kvm_vcpu *vcpu)
331{
332 _general_exception(vcpu);
333}
334/*
335 * Privileged Operation Fault
336 * @ General Exception Vector
337 * Refer to SDM Vol2 Table 5-6 & 8-1
338 */
339
340void privilege_op(struct kvm_vcpu *vcpu)
341{
342 _general_exception(vcpu);
343}
344
345/*
346 * Unimplement Data Address Fault
347 * @ General Exception Vector
348 * Refer to SDM Vol2 Table 5-6 & 8-1
349 */
350void unimpl_daddr(struct kvm_vcpu *vcpu)
351{
352 _general_exception(vcpu);
353}
354
355/*
356 * Privileged Register Fault
357 * @ General Exception Vector
358 * Refer to SDM Vol2 Table 5-6 & 8-1
359 */
360void privilege_reg(struct kvm_vcpu *vcpu)
361{
362 _general_exception(vcpu);
363}
364
365/* Deal with
366 * Nat consumption vector
367 * Parameter:
368 * vaddr: Optional, if t == REGISTER
369 */
370static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
371 enum tlb_miss_type t)
372{
373 /* If vPSR.ic && t == DATA/INST, IFA */
374 if (t == DATA || t == INSTRUCTION) {
375 /* IFA */
376 set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
377 }
378
379 inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
380}
381
382/*
383 * Instruction Nat Page Consumption Fault
384 * @ Nat Consumption Vector
385 * Refer to SDM Vol2 Table 5-6 & 8-1
386 */
387void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
388{
389 _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
390}
391
392/*
393 * Register Nat Consumption Fault
394 * @ Nat Consumption Vector
395 * Refer to SDM Vol2 Table 5-6 & 8-1
396 */
397void rnat_consumption(struct kvm_vcpu *vcpu)
398{
399 _nat_consumption_fault(vcpu, 0, REGISTER);
400}
401
402/*
403 * Data Nat Page Consumption Fault
404 * @ Nat Consumption Vector
405 * Refer to SDM Vol2 Table 5-6 & 8-1
406 */
407void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
408{
409 _nat_consumption_fault(vcpu, vadr, DATA);
410}
411
412/* Deal with
413 * Page not present vector
414 */
415static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
416{
417 /* If vPSR.ic, IFA, ITIR */
418 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
419 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420}
421
422
423void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424{
425 __page_not_present(vcpu, vadr);
426}
427
428
429void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430{
431 __page_not_present(vcpu, vadr);
432}
433
434
435/* Deal with
436 * Data access rights vector
437 */
438void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
439{
440 /* If vPSR.ic, IFA, ITIR */
441 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
442 inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
443}
444
445fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
446 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
447 unsigned long *ifs, struct kvm_pt_regs *regs)
448{
449 fp_state_t fp_state;
450 fpswa_ret_t ret;
451 struct kvm_vcpu *vcpu = current_vcpu;
452
453 uint64_t old_rr7 = ia64_get_rr(7UL<<61);
454
455 if (!vmm_fpswa_interface)
456 return (fpswa_ret_t) {-1, 0, 0, 0};
457
458 /*
459 * Just let fpswa driver to use hardware fp registers.
460 * No fp register is valid in memory.
461 */
462 memset(&fp_state, 0, sizeof(fp_state_t));
463
464 /*
465 * unsigned long (*EFI_FPSWA) (
466 * unsigned long trap_type,
467 * void *Bundle,
468 * unsigned long *pipsr,
469 * unsigned long *pfsr,
470 * unsigned long *pisr,
471 * unsigned long *ppreds,
472 * unsigned long *pifs,
473 * void *fp_state);
474 */
475 /*Call host fpswa interface directly to virtualize
476 *guest fpswa request!
477 */
478 ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
479 ia64_srlz_d();
480
481 ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
482 ipsr, fpsr, isr, pr, ifs, &fp_state);
483 ia64_set_rr(7UL << 61, old_rr7);
484 ia64_srlz_d();
485 return ret;
486}
487
488/*
489 * Handle floating-point assist faults and traps for domain.
490 */
491unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
492 unsigned long isr)
493{
494 struct kvm_vcpu *v = current_vcpu;
495 IA64_BUNDLE bundle;
496 unsigned long fault_ip;
497 fpswa_ret_t ret;
498
499 fault_ip = regs->cr_iip;
500 /*
501 * When the FP trap occurs, the trapping instruction is completed.
502 * If ipsr.ri == 0, there is the trapping instruction in previous
503 * bundle.
504 */
505 if (!fp_fault && (ia64_psr(regs)->ri == 0))
506 fault_ip -= 16;
507
508 if (fetch_code(v, fault_ip, &bundle))
509 return -EAGAIN;
510
511 if (!bundle.i64[0] && !bundle.i64[1])
512 return -EACCES;
513
514 ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
515 &isr, &regs->pr, &regs->cr_ifs, regs);
516 return ret.status;
517}
518
519void reflect_interruption(u64 ifa, u64 isr, u64 iim,
520 u64 vec, struct kvm_pt_regs *regs)
521{
522 u64 vector;
523 int status ;
524 struct kvm_vcpu *vcpu = current_vcpu;
525 u64 vpsr = VCPU(vcpu, vpsr);
526
527 vector = vec2off[vec];
528
529 if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
530 panic_vm(vcpu);
531 return;
532 }
533
534 switch (vec) {
535 case 32: /*IA64_FP_FAULT_VECTOR*/
536 status = vmm_handle_fpu_swa(1, regs, isr);
537 if (!status) {
538 vcpu_increment_iip(vcpu);
539 return;
540 } else if (-EAGAIN == status)
541 return;
542 break;
543 case 33: /*IA64_FP_TRAP_VECTOR*/
544 status = vmm_handle_fpu_swa(0, regs, isr);
545 if (!status)
546 return ;
547 else if (-EAGAIN == status) {
548 vcpu_decrement_iip(vcpu);
549 return ;
550 }
551 break;
552 }
553
554 VCPU(vcpu, isr) = isr;
555 VCPU(vcpu, iipa) = regs->cr_iip;
556 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
557 VCPU(vcpu, iim) = iim;
558 else
559 set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
560
561 inject_guest_interruption(vcpu, vector);
562}
563
564static void set_pal_call_data(struct kvm_vcpu *vcpu)
565{
566 struct exit_ctl_data *p = &vcpu->arch.exit_data;
567
568 /*FIXME:For static and stacked convention, firmware
569 * has put the parameters in gr28-gr31 before
570 * break to vmm !!*/
571
572 p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
573 p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
574 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
575 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
576 p->exit_reason = EXIT_REASON_PAL_CALL;
577}
578
579static void set_pal_call_result(struct kvm_vcpu *vcpu)
580{
581 struct exit_ctl_data *p = &vcpu->arch.exit_data;
582
583 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
584 vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
585 vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
586 vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
587 vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
588 } else
589 panic_vm(vcpu);
590}
591
592static void set_sal_call_data(struct kvm_vcpu *vcpu)
593{
594 struct exit_ctl_data *p = &vcpu->arch.exit_data;
595
596 p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
597 p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
598 p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
599 p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
600 p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
601 p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
602 p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
603 p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
604 p->exit_reason = EXIT_REASON_SAL_CALL;
605}
606
607static void set_sal_call_result(struct kvm_vcpu *vcpu)
608{
609 struct exit_ctl_data *p = &vcpu->arch.exit_data;
610
611 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
612 vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
613 vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
614 vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
615 vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
616 } else
617 panic_vm(vcpu);
618}
619
620void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
621 unsigned long isr, unsigned long iim)
622{
623 struct kvm_vcpu *v = current_vcpu;
624
625 if (ia64_psr(regs)->cpl == 0) {
626 /* Allow hypercalls only when cpl = 0. */
627 if (iim == DOMN_PAL_REQUEST) {
628 set_pal_call_data(v);
629 vmm_transition(v);
630 set_pal_call_result(v);
631 vcpu_increment_iip(v);
632 return;
633 } else if (iim == DOMN_SAL_REQUEST) {
634 set_sal_call_data(v);
635 vmm_transition(v);
636 set_sal_call_result(v);
637 vcpu_increment_iip(v);
638 return;
639 }
640 }
641 reflect_interruption(ifa, isr, iim, 11, regs);
642}
643
644void check_pending_irq(struct kvm_vcpu *vcpu)
645{
646 int mask, h_pending, h_inservice;
647 u64 isr;
648 unsigned long vpsr;
649 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
650
651 h_pending = highest_pending_irq(vcpu);
652 if (h_pending == NULL_VECTOR) {
653 update_vhpi(vcpu, NULL_VECTOR);
654 return;
655 }
656 h_inservice = highest_inservice_irq(vcpu);
657
658 vpsr = VCPU(vcpu, vpsr);
659 mask = irq_masked(vcpu, h_pending, h_inservice);
660 if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
661 isr = vpsr & IA64_PSR_RI;
662 update_vhpi(vcpu, h_pending);
663 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
664 } else if (mask == IRQ_MASKED_BY_INSVC) {
665 if (VCPU(vcpu, vhpi))
666 update_vhpi(vcpu, NULL_VECTOR);
667 } else {
668 /* masked by vpsr.i or vtpr.*/
669 update_vhpi(vcpu, h_pending);
670 }
671}
672
673static void generate_exirq(struct kvm_vcpu *vcpu)
674{
675 unsigned vpsr;
676 uint64_t isr;
677
678 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
679
680 vpsr = VCPU(vcpu, vpsr);
681 isr = vpsr & IA64_PSR_RI;
682 if (!(vpsr & IA64_PSR_IC))
683 panic_vm(vcpu);
684 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
685}
686
687void vhpi_detection(struct kvm_vcpu *vcpu)
688{
689 uint64_t threshold, vhpi;
690 union ia64_tpr vtpr;
691 struct ia64_psr vpsr;
692
693 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
694 vtpr.val = VCPU(vcpu, tpr);
695
696 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
697 vhpi = VCPU(vcpu, vhpi);
698 if (vhpi > threshold) {
699 /* interrupt actived*/
700 generate_exirq(vcpu);
701 }
702}
703
704
705void leave_hypervisor_tail(void)
706{
707 struct kvm_vcpu *v = current_vcpu;
708
709 if (VMX(v, timer_check)) {
710 VMX(v, timer_check) = 0;
711 if (VMX(v, itc_check)) {
712 if (vcpu_get_itc(v) > VCPU(v, itm)) {
713 if (!(VCPU(v, itv) & (1 << 16))) {
714 vcpu_pend_interrupt(v, VCPU(v, itv)
715 & 0xff);
716 VMX(v, itc_check) = 0;
717 } else {
718 v->arch.timer_pending = 1;
719 }
720 VMX(v, last_itc) = VCPU(v, itm) + 1;
721 }
722 }
723 }
724
725 rmb();
726 if (v->arch.irq_new_pending) {
727 v->arch.irq_new_pending = 0;
728 VMX(v, irq_check) = 0;
729 check_pending_irq(v);
730 return;
731 }
732 if (VMX(v, irq_check)) {
733 VMX(v, irq_check) = 0;
734 vhpi_detection(v);
735 }
736}
737
738
739static inline void handle_lds(struct kvm_pt_regs *regs)
740{
741 regs->cr_ipsr |= IA64_PSR_ED;
742}
743
744void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
745{
746 unsigned long pte;
747 union ia64_rr rr;
748
749 rr.val = ia64_get_rr(vadr);
750 pte = vadr & _PAGE_PPN_MASK;
751 pte = pte | PHY_PAGE_WB;
752 thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
753 return;
754}
755
756void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
757{
758 unsigned long vpsr;
759 int type;
760
761 u64 vhpt_adr, gppa, pteval, rr, itir;
762 union ia64_isr misr;
763 union ia64_pta vpta;
764 struct thash_data *data;
765 struct kvm_vcpu *v = current_vcpu;
766
767 vpsr = VCPU(v, vpsr);
768 misr.val = VMX(v, cr_isr);
769
770 type = vec;
771
772 if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
773 if (vec == 2) {
774 if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
775 emulate_io_inst(v, ((vadr << 1) >> 1), 4);
776 return;
777 }
778 }
779 physical_tlb_miss(v, vadr, type);
780 return;
781 }
782 data = vtlb_lookup(v, vadr, type);
783 if (data != 0) {
784 if (type == D_TLB) {
785 gppa = (vadr & ((1UL << data->ps) - 1))
786 + (data->ppn >> (data->ps - 12) << data->ps);
787 if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
788 if (data->pl >= ((regs->cr_ipsr >>
789 IA64_PSR_CPL0_BIT) & 3))
790 emulate_io_inst(v, gppa, data->ma);
791 else {
792 vcpu_set_isr(v, misr.val);
793 data_access_rights(v, vadr);
794 }
795 return ;
796 }
797 }
798 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
799
800 } else if (type == D_TLB) {
801 if (misr.sp) {
802 handle_lds(regs);
803 return;
804 }
805
806 rr = vcpu_get_rr(v, vadr);
807 itir = rr & (RR_RID_MASK | RR_PS_MASK);
808
809 if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
810 if (vpsr & IA64_PSR_IC) {
811 vcpu_set_isr(v, misr.val);
812 alt_dtlb(v, vadr);
813 } else {
814 nested_dtlb(v);
815 }
816 return ;
817 }
818
819 vpta.val = vcpu_get_pta(v);
820 /* avoid recursively walking (short format) VHPT */
821
822 vhpt_adr = vcpu_thash(v, vadr);
823 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
824 /* VHPT successfully read. */
825 if (!(pteval & _PAGE_P)) {
826 if (vpsr & IA64_PSR_IC) {
827 vcpu_set_isr(v, misr.val);
828 dtlb_fault(v, vadr);
829 } else {
830 nested_dtlb(v);
831 }
832 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
833 thash_purge_and_insert(v, pteval, itir,
834 vadr, D_TLB);
835 } else if (vpsr & IA64_PSR_IC) {
836 vcpu_set_isr(v, misr.val);
837 dtlb_fault(v, vadr);
838 } else {
839 nested_dtlb(v);
840 }
841 } else {
842 /* Can't read VHPT. */
843 if (vpsr & IA64_PSR_IC) {
844 vcpu_set_isr(v, misr.val);
845 dvhpt_fault(v, vadr);
846 } else {
847 nested_dtlb(v);
848 }
849 }
850 } else if (type == I_TLB) {
851 if (!(vpsr & IA64_PSR_IC))
852 misr.ni = 1;
853 if (!vhpt_enabled(v, vadr, INST_REF)) {
854 vcpu_set_isr(v, misr.val);
855 alt_itlb(v, vadr);
856 return;
857 }
858
859 vpta.val = vcpu_get_pta(v);
860
861 vhpt_adr = vcpu_thash(v, vadr);
862 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
863 /* VHPT successfully read. */
864 if (pteval & _PAGE_P) {
865 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
866 vcpu_set_isr(v, misr.val);
867 itlb_fault(v, vadr);
868 return ;
869 }
870 rr = vcpu_get_rr(v, vadr);
871 itir = rr & (RR_RID_MASK | RR_PS_MASK);
872 thash_purge_and_insert(v, pteval, itir,
873 vadr, I_TLB);
874 } else {
875 vcpu_set_isr(v, misr.val);
876 inst_page_not_present(v, vadr);
877 }
878 } else {
879 vcpu_set_isr(v, misr.val);
880 ivhpt_fault(v, vadr);
881 }
882 }
883}
884
885void kvm_vexirq(struct kvm_vcpu *vcpu)
886{
887 u64 vpsr, isr;
888 struct kvm_pt_regs *regs;
889
890 regs = vcpu_regs(vcpu);
891 vpsr = VCPU(vcpu, vpsr);
892 isr = vpsr & IA64_PSR_RI;
893 reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
894}
895
896void kvm_ia64_handle_irq(struct kvm_vcpu *v)
897{
898 struct exit_ctl_data *p = &v->arch.exit_data;
899 long psr;
900
901 local_irq_save(psr);
902 p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
903 vmm_transition(v);
904 local_irq_restore(psr);
905
906 VMX(v, timer_check) = 1;
907
908}
909
910static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
911{
912 u64 oldrid, moldrid, oldpsbits, vaddr;
913 struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
914 vaddr = p->vaddr;
915
916 oldrid = VMX(v, vrr[0]);
917 VMX(v, vrr[0]) = p->rr;
918 oldpsbits = VMX(v, psbits[0]);
919 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
920 moldrid = ia64_get_rr(0x0);
921 ia64_set_rr(0x0, vrrtomrr(p->rr));
922 ia64_srlz_d();
923
924 vaddr = PAGEALIGN(vaddr, p->ps);
925 thash_purge_entries_remote(v, vaddr, p->ps);
926
927 VMX(v, vrr[0]) = oldrid;
928 VMX(v, psbits[0]) = oldpsbits;
929 ia64_set_rr(0x0, moldrid);
930 ia64_dv_serialize_data();
931}
932
933static void vcpu_do_resume(struct kvm_vcpu *vcpu)
934{
935 /*Re-init VHPT and VTLB once from resume*/
936 vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
937 thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
938 vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
939 thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
940
941 ia64_set_pta(vcpu->arch.vhpt.pta.val);
942}
943
944static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
945{
946 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
947 vcpu_do_resume(vcpu);
948 return;
949 }
950
951 if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
952 thash_purge_all(vcpu);
953 return;
954 }
955
956 if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
957 while (vcpu->arch.ptc_g_count > 0)
958 ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
959 }
960}
961
962void vmm_transition(struct kvm_vcpu *vcpu)
963{
964 ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
965 0, 0, 0, 0, 0, 0);
966 vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
967 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
968 0, 0, 0, 0, 0, 0);
969 kvm_do_resume_op(vcpu);
970}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
new file mode 100644
index 000000000000..30897d44d61e
--- /dev/null
+++ b/arch/ia64/kvm/trampoline.S
@@ -0,0 +1,1038 @@
1/* Save all processor states
2 *
3 * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
4 * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com>
5 */
6
7#include <asm/asmmacro.h>
8#include "asm-offsets.h"
9
10
11#define CTX(name) VMM_CTX_##name##_OFFSET
12
13 /*
14 * r32: context_t base address
15 */
16#define SAVE_BRANCH_REGS \
17 add r2 = CTX(B0),r32; \
18 add r3 = CTX(B1),r32; \
19 mov r16 = b0; \
20 mov r17 = b1; \
21 ;; \
22 st8 [r2]=r16,16; \
23 st8 [r3]=r17,16; \
24 ;; \
25 mov r16 = b2; \
26 mov r17 = b3; \
27 ;; \
28 st8 [r2]=r16,16; \
29 st8 [r3]=r17,16; \
30 ;; \
31 mov r16 = b4; \
32 mov r17 = b5; \
33 ;; \
34 st8 [r2]=r16; \
35 st8 [r3]=r17; \
36 ;;
37
38 /*
39 * r33: context_t base address
40 */
41#define RESTORE_BRANCH_REGS \
42 add r2 = CTX(B0),r33; \
43 add r3 = CTX(B1),r33; \
44 ;; \
45 ld8 r16=[r2],16; \
46 ld8 r17=[r3],16; \
47 ;; \
48 mov b0 = r16; \
49 mov b1 = r17; \
50 ;; \
51 ld8 r16=[r2],16; \
52 ld8 r17=[r3],16; \
53 ;; \
54 mov b2 = r16; \
55 mov b3 = r17; \
56 ;; \
57 ld8 r16=[r2]; \
58 ld8 r17=[r3]; \
59 ;; \
60 mov b4=r16; \
61 mov b5=r17; \
62 ;;
63
64
65 /*
66 * r32: context_t base address
67 * bsw == 1
68 * Save all bank1 general registers, r4 ~ r7
69 */
70#define SAVE_GENERAL_REGS \
71 add r2=CTX(R4),r32; \
72 add r3=CTX(R5),r32; \
73 ;; \
74.mem.offset 0,0; \
75 st8.spill [r2]=r4,16; \
76.mem.offset 8,0; \
77 st8.spill [r3]=r5,16; \
78 ;; \
79.mem.offset 0,0; \
80 st8.spill [r2]=r6,48; \
81.mem.offset 8,0; \
82 st8.spill [r3]=r7,48; \
83 ;; \
84.mem.offset 0,0; \
85 st8.spill [r2]=r12; \
86.mem.offset 8,0; \
87 st8.spill [r3]=r13; \
88 ;;
89
90 /*
91 * r33: context_t base address
92 * bsw == 1
93 */
94#define RESTORE_GENERAL_REGS \
95 add r2=CTX(R4),r33; \
96 add r3=CTX(R5),r33; \
97 ;; \
98 ld8.fill r4=[r2],16; \
99 ld8.fill r5=[r3],16; \
100 ;; \
101 ld8.fill r6=[r2],48; \
102 ld8.fill r7=[r3],48; \
103 ;; \
104 ld8.fill r12=[r2]; \
105 ld8.fill r13 =[r3]; \
106 ;;
107
108
109
110
111 /*
112 * r32: context_t base address
113 */
114#define SAVE_KERNEL_REGS \
115 add r2 = CTX(KR0),r32; \
116 add r3 = CTX(KR1),r32; \
117 mov r16 = ar.k0; \
118 mov r17 = ar.k1; \
119 ;; \
120 st8 [r2] = r16,16; \
121 st8 [r3] = r17,16; \
122 ;; \
123 mov r16 = ar.k2; \
124 mov r17 = ar.k3; \
125 ;; \
126 st8 [r2] = r16,16; \
127 st8 [r3] = r17,16; \
128 ;; \
129 mov r16 = ar.k4; \
130 mov r17 = ar.k5; \
131 ;; \
132 st8 [r2] = r16,16; \
133 st8 [r3] = r17,16; \
134 ;; \
135 mov r16 = ar.k6; \
136 mov r17 = ar.k7; \
137 ;; \
138 st8 [r2] = r16; \
139 st8 [r3] = r17; \
140 ;;
141
142
143
144 /*
145 * r33: context_t base address
146 */
147#define RESTORE_KERNEL_REGS \
148 add r2 = CTX(KR0),r33; \
149 add r3 = CTX(KR1),r33; \
150 ;; \
151 ld8 r16=[r2],16; \
152 ld8 r17=[r3],16; \
153 ;; \
154 mov ar.k0=r16; \
155 mov ar.k1=r17; \
156 ;; \
157 ld8 r16=[r2],16; \
158 ld8 r17=[r3],16; \
159 ;; \
160 mov ar.k2=r16; \
161 mov ar.k3=r17; \
162 ;; \
163 ld8 r16=[r2],16; \
164 ld8 r17=[r3],16; \
165 ;; \
166 mov ar.k4=r16; \
167 mov ar.k5=r17; \
168 ;; \
169 ld8 r16=[r2],16; \
170 ld8 r17=[r3],16; \
171 ;; \
172 mov ar.k6=r16; \
173 mov ar.k7=r17; \
174 ;;
175
176
177
178 /*
179 * r32: context_t base address
180 */
181#define SAVE_APP_REGS \
182 add r2 = CTX(BSPSTORE),r32; \
183 mov r16 = ar.bspstore; \
184 ;; \
185 st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
186 mov r16 = ar.rnat; \
187 ;; \
188 st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \
189 mov r16 = ar.fcr; \
190 ;; \
191 st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \
192 mov r16 = ar.eflag; \
193 ;; \
194 st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \
195 mov r16 = ar.cflg; \
196 ;; \
197 st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \
198 mov r16 = ar.fsr; \
199 ;; \
200 st8 [r2] = r16,CTX(FIR)-CTX(FSR); \
201 mov r16 = ar.fir; \
202 ;; \
203 st8 [r2] = r16,CTX(FDR)-CTX(FIR); \
204 mov r16 = ar.fdr; \
205 ;; \
206 st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \
207 mov r16 = ar.unat; \
208 ;; \
209 st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \
210 mov r16 = ar.fpsr; \
211 ;; \
212 st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \
213 mov r16 = ar.pfs; \
214 ;; \
215 st8 [r2] = r16,CTX(LC)-CTX(PFS); \
216 mov r16 = ar.lc; \
217 ;; \
218 st8 [r2] = r16; \
219 ;;
220
221 /*
222 * r33: context_t base address
223 */
224#define RESTORE_APP_REGS \
225 add r2=CTX(BSPSTORE),r33; \
226 ;; \
227 ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \
228 ;; \
229 mov ar.bspstore=r16; \
230 ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \
231 ;; \
232 mov ar.rnat=r16; \
233 ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \
234 ;; \
235 mov ar.fcr=r16; \
236 ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \
237 ;; \
238 mov ar.eflag=r16; \
239 ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \
240 ;; \
241 mov ar.cflg=r16; \
242 ld8 r16=[r2],CTX(FIR)-CTX(FSR); \
243 ;; \
244 mov ar.fsr=r16; \
245 ld8 r16=[r2],CTX(FDR)-CTX(FIR); \
246 ;; \
247 mov ar.fir=r16; \
248 ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \
249 ;; \
250 mov ar.fdr=r16; \
251 ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \
252 ;; \
253 mov ar.unat=r16; \
254 ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \
255 ;; \
256 mov ar.fpsr=r16; \
257 ld8 r16=[r2],CTX(LC)-CTX(PFS); \
258 ;; \
259 mov ar.pfs=r16; \
260 ld8 r16=[r2]; \
261 ;; \
262 mov ar.lc=r16; \
263 ;;
264
265 /*
266 * r32: context_t base address
267 */
268#define SAVE_CTL_REGS \
269 add r2 = CTX(DCR),r32; \
270 mov r16 = cr.dcr; \
271 ;; \
272 st8 [r2] = r16,CTX(IVA)-CTX(DCR); \
273 ;; \
274 mov r16 = cr.iva; \
275 ;; \
276 st8 [r2] = r16,CTX(PTA)-CTX(IVA); \
277 ;; \
278 mov r16 = cr.pta; \
279 ;; \
280 st8 [r2] = r16 ; \
281 ;;
282
283 /*
284 * r33: context_t base address
285 */
286#define RESTORE_CTL_REGS \
287 add r2 = CTX(DCR),r33; \
288 ;; \
289 ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \
290 ;; \
291 mov cr.dcr = r16; \
292 dv_serialize_data; \
293 ;; \
294 ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \
295 ;; \
296 mov cr.iva = r16; \
297 dv_serialize_data; \
298 ;; \
299 ld8 r16 = [r2]; \
300 ;; \
301 mov cr.pta = r16; \
302 dv_serialize_data; \
303 ;;
304
305
306 /*
307 * r32: context_t base address
308 */
309#define SAVE_REGION_REGS \
310 add r2=CTX(RR0),r32; \
311 mov r16=rr[r0]; \
312 dep.z r18=1,61,3; \
313 ;; \
314 st8 [r2]=r16,8; \
315 mov r17=rr[r18]; \
316 dep.z r18=2,61,3; \
317 ;; \
318 st8 [r2]=r17,8; \
319 mov r16=rr[r18]; \
320 dep.z r18=3,61,3; \
321 ;; \
322 st8 [r2]=r16,8; \
323 mov r17=rr[r18]; \
324 dep.z r18=4,61,3; \
325 ;; \
326 st8 [r2]=r17,8; \
327 mov r16=rr[r18]; \
328 dep.z r18=5,61,3; \
329 ;; \
330 st8 [r2]=r16,8; \
331 mov r17=rr[r18]; \
332 dep.z r18=7,61,3; \
333 ;; \
334 st8 [r2]=r17,16; \
335 mov r16=rr[r18]; \
336 ;; \
337 st8 [r2]=r16,8; \
338 ;;
339
340 /*
341 * r33:context_t base address
342 */
343#define RESTORE_REGION_REGS \
344 add r2=CTX(RR0),r33;\
345 mov r18=r0; \
346 ;; \
347 ld8 r20=[r2],8; \
348 ;; /* rr0 */ \
349 ld8 r21=[r2],8; \
350 ;; /* rr1 */ \
351 ld8 r22=[r2],8; \
352 ;; /* rr2 */ \
353 ld8 r23=[r2],8; \
354 ;; /* rr3 */ \
355 ld8 r24=[r2],8; \
356 ;; /* rr4 */ \
357 ld8 r25=[r2],16; \
358 ;; /* rr5 */ \
359 ld8 r27=[r2]; \
360 ;; /* rr7 */ \
361 mov rr[r18]=r20; \
362 dep.z r18=1,61,3; \
363 ;; /* rr1 */ \
364 mov rr[r18]=r21; \
365 dep.z r18=2,61,3; \
366 ;; /* rr2 */ \
367 mov rr[r18]=r22; \
368 dep.z r18=3,61,3; \
369 ;; /* rr3 */ \
370 mov rr[r18]=r23; \
371 dep.z r18=4,61,3; \
372 ;; /* rr4 */ \
373 mov rr[r18]=r24; \
374 dep.z r18=5,61,3; \
375 ;; /* rr5 */ \
376 mov rr[r18]=r25; \
377 dep.z r18=7,61,3; \
378 ;; /* rr7 */ \
379 mov rr[r18]=r27; \
380 ;; \
381 srlz.i; \
382 ;;
383
384
385
386 /*
387 * r32: context_t base address
388 * r36~r39:scratch registers
389 */
390#define SAVE_DEBUG_REGS \
391 add r2=CTX(IBR0),r32; \
392 add r3=CTX(DBR0),r32; \
393 mov r16=ibr[r0]; \
394 mov r17=dbr[r0]; \
395 ;; \
396 st8 [r2]=r16,8; \
397 st8 [r3]=r17,8; \
398 add r18=1,r0; \
399 ;; \
400 mov r16=ibr[r18]; \
401 mov r17=dbr[r18]; \
402 ;; \
403 st8 [r2]=r16,8; \
404 st8 [r3]=r17,8; \
405 add r18=2,r0; \
406 ;; \
407 mov r16=ibr[r18]; \
408 mov r17=dbr[r18]; \
409 ;; \
410 st8 [r2]=r16,8; \
411 st8 [r3]=r17,8; \
412 add r18=2,r0; \
413 ;; \
414 mov r16=ibr[r18]; \
415 mov r17=dbr[r18]; \
416 ;; \
417 st8 [r2]=r16,8; \
418 st8 [r3]=r17,8; \
419 add r18=3,r0; \
420 ;; \
421 mov r16=ibr[r18]; \
422 mov r17=dbr[r18]; \
423 ;; \
424 st8 [r2]=r16,8; \
425 st8 [r3]=r17,8; \
426 add r18=4,r0; \
427 ;; \
428 mov r16=ibr[r18]; \
429 mov r17=dbr[r18]; \
430 ;; \
431 st8 [r2]=r16,8; \
432 st8 [r3]=r17,8; \
433 add r18=5,r0; \
434 ;; \
435 mov r16=ibr[r18]; \
436 mov r17=dbr[r18]; \
437 ;; \
438 st8 [r2]=r16,8; \
439 st8 [r3]=r17,8; \
440 add r18=6,r0; \
441 ;; \
442 mov r16=ibr[r18]; \
443 mov r17=dbr[r18]; \
444 ;; \
445 st8 [r2]=r16,8; \
446 st8 [r3]=r17,8; \
447 add r18=7,r0; \
448 ;; \
449 mov r16=ibr[r18]; \
450 mov r17=dbr[r18]; \
451 ;; \
452 st8 [r2]=r16,8; \
453 st8 [r3]=r17,8; \
454 ;;
455
456
457/*
458 * r33: point to context_t structure
459 * ar.lc are corrupted.
460 */
461#define RESTORE_DEBUG_REGS \
462 add r2=CTX(IBR0),r33; \
463 add r3=CTX(DBR0),r33; \
464 mov r16=7; \
465 mov r17=r0; \
466 ;; \
467 mov ar.lc = r16; \
468 ;; \
4691: \
470 ld8 r18=[r2],8; \
471 ld8 r19=[r3],8; \
472 ;; \
473 mov ibr[r17]=r18; \
474 mov dbr[r17]=r19; \
475 ;; \
476 srlz.i; \
477 ;; \
478 add r17=1,r17; \
479 br.cloop.sptk 1b; \
480 ;;
481
482
483 /*
484 * r32: context_t base address
485 */
486#define SAVE_FPU_LOW \
487 add r2=CTX(F2),r32; \
488 add r3=CTX(F3),r32; \
489 ;; \
490 stf.spill.nta [r2]=f2,32; \
491 stf.spill.nta [r3]=f3,32; \
492 ;; \
493 stf.spill.nta [r2]=f4,32; \
494 stf.spill.nta [r3]=f5,32; \
495 ;; \
496 stf.spill.nta [r2]=f6,32; \
497 stf.spill.nta [r3]=f7,32; \
498 ;; \
499 stf.spill.nta [r2]=f8,32; \
500 stf.spill.nta [r3]=f9,32; \
501 ;; \
502 stf.spill.nta [r2]=f10,32; \
503 stf.spill.nta [r3]=f11,32; \
504 ;; \
505 stf.spill.nta [r2]=f12,32; \
506 stf.spill.nta [r3]=f13,32; \
507 ;; \
508 stf.spill.nta [r2]=f14,32; \
509 stf.spill.nta [r3]=f15,32; \
510 ;; \
511 stf.spill.nta [r2]=f16,32; \
512 stf.spill.nta [r3]=f17,32; \
513 ;; \
514 stf.spill.nta [r2]=f18,32; \
515 stf.spill.nta [r3]=f19,32; \
516 ;; \
517 stf.spill.nta [r2]=f20,32; \
518 stf.spill.nta [r3]=f21,32; \
519 ;; \
520 stf.spill.nta [r2]=f22,32; \
521 stf.spill.nta [r3]=f23,32; \
522 ;; \
523 stf.spill.nta [r2]=f24,32; \
524 stf.spill.nta [r3]=f25,32; \
525 ;; \
526 stf.spill.nta [r2]=f26,32; \
527 stf.spill.nta [r3]=f27,32; \
528 ;; \
529 stf.spill.nta [r2]=f28,32; \
530 stf.spill.nta [r3]=f29,32; \
531 ;; \
532 stf.spill.nta [r2]=f30; \
533 stf.spill.nta [r3]=f31; \
534 ;;
535
536 /*
537 * r32: context_t base address
538 */
539#define SAVE_FPU_HIGH \
540 add r2=CTX(F32),r32; \
541 add r3=CTX(F33),r32; \
542 ;; \
543 stf.spill.nta [r2]=f32,32; \
544 stf.spill.nta [r3]=f33,32; \
545 ;; \
546 stf.spill.nta [r2]=f34,32; \
547 stf.spill.nta [r3]=f35,32; \
548 ;; \
549 stf.spill.nta [r2]=f36,32; \
550 stf.spill.nta [r3]=f37,32; \
551 ;; \
552 stf.spill.nta [r2]=f38,32; \
553 stf.spill.nta [r3]=f39,32; \
554 ;; \
555 stf.spill.nta [r2]=f40,32; \
556 stf.spill.nta [r3]=f41,32; \
557 ;; \
558 stf.spill.nta [r2]=f42,32; \
559 stf.spill.nta [r3]=f43,32; \
560 ;; \
561 stf.spill.nta [r2]=f44,32; \
562 stf.spill.nta [r3]=f45,32; \
563 ;; \
564 stf.spill.nta [r2]=f46,32; \
565 stf.spill.nta [r3]=f47,32; \
566 ;; \
567 stf.spill.nta [r2]=f48,32; \
568 stf.spill.nta [r3]=f49,32; \
569 ;; \
570 stf.spill.nta [r2]=f50,32; \
571 stf.spill.nta [r3]=f51,32; \
572 ;; \
573 stf.spill.nta [r2]=f52,32; \
574 stf.spill.nta [r3]=f53,32; \
575 ;; \
576 stf.spill.nta [r2]=f54,32; \
577 stf.spill.nta [r3]=f55,32; \
578 ;; \
579 stf.spill.nta [r2]=f56,32; \
580 stf.spill.nta [r3]=f57,32; \
581 ;; \
582 stf.spill.nta [r2]=f58,32; \
583 stf.spill.nta [r3]=f59,32; \
584 ;; \
585 stf.spill.nta [r2]=f60,32; \
586 stf.spill.nta [r3]=f61,32; \
587 ;; \
588 stf.spill.nta [r2]=f62,32; \
589 stf.spill.nta [r3]=f63,32; \
590 ;; \
591 stf.spill.nta [r2]=f64,32; \
592 stf.spill.nta [r3]=f65,32; \
593 ;; \
594 stf.spill.nta [r2]=f66,32; \
595 stf.spill.nta [r3]=f67,32; \
596 ;; \
597 stf.spill.nta [r2]=f68,32; \
598 stf.spill.nta [r3]=f69,32; \
599 ;; \
600 stf.spill.nta [r2]=f70,32; \
601 stf.spill.nta [r3]=f71,32; \
602 ;; \
603 stf.spill.nta [r2]=f72,32; \
604 stf.spill.nta [r3]=f73,32; \
605 ;; \
606 stf.spill.nta [r2]=f74,32; \
607 stf.spill.nta [r3]=f75,32; \
608 ;; \
609 stf.spill.nta [r2]=f76,32; \
610 stf.spill.nta [r3]=f77,32; \
611 ;; \
612 stf.spill.nta [r2]=f78,32; \
613 stf.spill.nta [r3]=f79,32; \
614 ;; \
615 stf.spill.nta [r2]=f80,32; \
616 stf.spill.nta [r3]=f81,32; \
617 ;; \
618 stf.spill.nta [r2]=f82,32; \
619 stf.spill.nta [r3]=f83,32; \
620 ;; \
621 stf.spill.nta [r2]=f84,32; \
622 stf.spill.nta [r3]=f85,32; \
623 ;; \
624 stf.spill.nta [r2]=f86,32; \
625 stf.spill.nta [r3]=f87,32; \
626 ;; \
627 stf.spill.nta [r2]=f88,32; \
628 stf.spill.nta [r3]=f89,32; \
629 ;; \
630 stf.spill.nta [r2]=f90,32; \
631 stf.spill.nta [r3]=f91,32; \
632 ;; \
633 stf.spill.nta [r2]=f92,32; \
634 stf.spill.nta [r3]=f93,32; \
635 ;; \
636 stf.spill.nta [r2]=f94,32; \
637 stf.spill.nta [r3]=f95,32; \
638 ;; \
639 stf.spill.nta [r2]=f96,32; \
640 stf.spill.nta [r3]=f97,32; \
641 ;; \
642 stf.spill.nta [r2]=f98,32; \
643 stf.spill.nta [r3]=f99,32; \
644 ;; \
645 stf.spill.nta [r2]=f100,32; \
646 stf.spill.nta [r3]=f101,32; \
647 ;; \
648 stf.spill.nta [r2]=f102,32; \
649 stf.spill.nta [r3]=f103,32; \
650 ;; \
651 stf.spill.nta [r2]=f104,32; \
652 stf.spill.nta [r3]=f105,32; \
653 ;; \
654 stf.spill.nta [r2]=f106,32; \
655 stf.spill.nta [r3]=f107,32; \
656 ;; \
657 stf.spill.nta [r2]=f108,32; \
658 stf.spill.nta [r3]=f109,32; \
659 ;; \
660 stf.spill.nta [r2]=f110,32; \
661 stf.spill.nta [r3]=f111,32; \
662 ;; \
663 stf.spill.nta [r2]=f112,32; \
664 stf.spill.nta [r3]=f113,32; \
665 ;; \
666 stf.spill.nta [r2]=f114,32; \
667 stf.spill.nta [r3]=f115,32; \
668 ;; \
669 stf.spill.nta [r2]=f116,32; \
670 stf.spill.nta [r3]=f117,32; \
671 ;; \
672 stf.spill.nta [r2]=f118,32; \
673 stf.spill.nta [r3]=f119,32; \
674 ;; \
675 stf.spill.nta [r2]=f120,32; \
676 stf.spill.nta [r3]=f121,32; \
677 ;; \
678 stf.spill.nta [r2]=f122,32; \
679 stf.spill.nta [r3]=f123,32; \
680 ;; \
681 stf.spill.nta [r2]=f124,32; \
682 stf.spill.nta [r3]=f125,32; \
683 ;; \
684 stf.spill.nta [r2]=f126; \
685 stf.spill.nta [r3]=f127; \
686 ;;
687
688 /*
689 * r33: point to context_t structure
690 */
691#define RESTORE_FPU_LOW \
692 add r2 = CTX(F2), r33; \
693 add r3 = CTX(F3), r33; \
694 ;; \
695 ldf.fill.nta f2 = [r2], 32; \
696 ldf.fill.nta f3 = [r3], 32; \
697 ;; \
698 ldf.fill.nta f4 = [r2], 32; \
699 ldf.fill.nta f5 = [r3], 32; \
700 ;; \
701 ldf.fill.nta f6 = [r2], 32; \
702 ldf.fill.nta f7 = [r3], 32; \
703 ;; \
704 ldf.fill.nta f8 = [r2], 32; \
705 ldf.fill.nta f9 = [r3], 32; \
706 ;; \
707 ldf.fill.nta f10 = [r2], 32; \
708 ldf.fill.nta f11 = [r3], 32; \
709 ;; \
710 ldf.fill.nta f12 = [r2], 32; \
711 ldf.fill.nta f13 = [r3], 32; \
712 ;; \
713 ldf.fill.nta f14 = [r2], 32; \
714 ldf.fill.nta f15 = [r3], 32; \
715 ;; \
716 ldf.fill.nta f16 = [r2], 32; \
717 ldf.fill.nta f17 = [r3], 32; \
718 ;; \
719 ldf.fill.nta f18 = [r2], 32; \
720 ldf.fill.nta f19 = [r3], 32; \
721 ;; \
722 ldf.fill.nta f20 = [r2], 32; \
723 ldf.fill.nta f21 = [r3], 32; \
724 ;; \
725 ldf.fill.nta f22 = [r2], 32; \
726 ldf.fill.nta f23 = [r3], 32; \
727 ;; \
728 ldf.fill.nta f24 = [r2], 32; \
729 ldf.fill.nta f25 = [r3], 32; \
730 ;; \
731 ldf.fill.nta f26 = [r2], 32; \
732 ldf.fill.nta f27 = [r3], 32; \
733 ;; \
734 ldf.fill.nta f28 = [r2], 32; \
735 ldf.fill.nta f29 = [r3], 32; \
736 ;; \
737 ldf.fill.nta f30 = [r2], 32; \
738 ldf.fill.nta f31 = [r3], 32; \
739 ;;
740
741
742
743 /*
744 * r33: point to context_t structure
745 */
746#define RESTORE_FPU_HIGH \
747 add r2 = CTX(F32), r33; \
748 add r3 = CTX(F33), r33; \
749 ;; \
750 ldf.fill.nta f32 = [r2], 32; \
751 ldf.fill.nta f33 = [r3], 32; \
752 ;; \
753 ldf.fill.nta f34 = [r2], 32; \
754 ldf.fill.nta f35 = [r3], 32; \
755 ;; \
756 ldf.fill.nta f36 = [r2], 32; \
757 ldf.fill.nta f37 = [r3], 32; \
758 ;; \
759 ldf.fill.nta f38 = [r2], 32; \
760 ldf.fill.nta f39 = [r3], 32; \
761 ;; \
762 ldf.fill.nta f40 = [r2], 32; \
763 ldf.fill.nta f41 = [r3], 32; \
764 ;; \
765 ldf.fill.nta f42 = [r2], 32; \
766 ldf.fill.nta f43 = [r3], 32; \
767 ;; \
768 ldf.fill.nta f44 = [r2], 32; \
769 ldf.fill.nta f45 = [r3], 32; \
770 ;; \
771 ldf.fill.nta f46 = [r2], 32; \
772 ldf.fill.nta f47 = [r3], 32; \
773 ;; \
774 ldf.fill.nta f48 = [r2], 32; \
775 ldf.fill.nta f49 = [r3], 32; \
776 ;; \
777 ldf.fill.nta f50 = [r2], 32; \
778 ldf.fill.nta f51 = [r3], 32; \
779 ;; \
780 ldf.fill.nta f52 = [r2], 32; \
781 ldf.fill.nta f53 = [r3], 32; \
782 ;; \
783 ldf.fill.nta f54 = [r2], 32; \
784 ldf.fill.nta f55 = [r3], 32; \
785 ;; \
786 ldf.fill.nta f56 = [r2], 32; \
787 ldf.fill.nta f57 = [r3], 32; \
788 ;; \
789 ldf.fill.nta f58 = [r2], 32; \
790 ldf.fill.nta f59 = [r3], 32; \
791 ;; \
792 ldf.fill.nta f60 = [r2], 32; \
793 ldf.fill.nta f61 = [r3], 32; \
794 ;; \
795 ldf.fill.nta f62 = [r2], 32; \
796 ldf.fill.nta f63 = [r3], 32; \
797 ;; \
798 ldf.fill.nta f64 = [r2], 32; \
799 ldf.fill.nta f65 = [r3], 32; \
800 ;; \
801 ldf.fill.nta f66 = [r2], 32; \
802 ldf.fill.nta f67 = [r3], 32; \
803 ;; \
804 ldf.fill.nta f68 = [r2], 32; \
805 ldf.fill.nta f69 = [r3], 32; \
806 ;; \
807 ldf.fill.nta f70 = [r2], 32; \
808 ldf.fill.nta f71 = [r3], 32; \
809 ;; \
810 ldf.fill.nta f72 = [r2], 32; \
811 ldf.fill.nta f73 = [r3], 32; \
812 ;; \
813 ldf.fill.nta f74 = [r2], 32; \
814 ldf.fill.nta f75 = [r3], 32; \
815 ;; \
816 ldf.fill.nta f76 = [r2], 32; \
817 ldf.fill.nta f77 = [r3], 32; \
818 ;; \
819 ldf.fill.nta f78 = [r2], 32; \
820 ldf.fill.nta f79 = [r3], 32; \
821 ;; \
822 ldf.fill.nta f80 = [r2], 32; \
823 ldf.fill.nta f81 = [r3], 32; \
824 ;; \
825 ldf.fill.nta f82 = [r2], 32; \
826 ldf.fill.nta f83 = [r3], 32; \
827 ;; \
828 ldf.fill.nta f84 = [r2], 32; \
829 ldf.fill.nta f85 = [r3], 32; \
830 ;; \
831 ldf.fill.nta f86 = [r2], 32; \
832 ldf.fill.nta f87 = [r3], 32; \
833 ;; \
834 ldf.fill.nta f88 = [r2], 32; \
835 ldf.fill.nta f89 = [r3], 32; \
836 ;; \
837 ldf.fill.nta f90 = [r2], 32; \
838 ldf.fill.nta f91 = [r3], 32; \
839 ;; \
840 ldf.fill.nta f92 = [r2], 32; \
841 ldf.fill.nta f93 = [r3], 32; \
842 ;; \
843 ldf.fill.nta f94 = [r2], 32; \
844 ldf.fill.nta f95 = [r3], 32; \
845 ;; \
846 ldf.fill.nta f96 = [r2], 32; \
847 ldf.fill.nta f97 = [r3], 32; \
848 ;; \
849 ldf.fill.nta f98 = [r2], 32; \
850 ldf.fill.nta f99 = [r3], 32; \
851 ;; \
852 ldf.fill.nta f100 = [r2], 32; \
853 ldf.fill.nta f101 = [r3], 32; \
854 ;; \
855 ldf.fill.nta f102 = [r2], 32; \
856 ldf.fill.nta f103 = [r3], 32; \
857 ;; \
858 ldf.fill.nta f104 = [r2], 32; \
859 ldf.fill.nta f105 = [r3], 32; \
860 ;; \
861 ldf.fill.nta f106 = [r2], 32; \
862 ldf.fill.nta f107 = [r3], 32; \
863 ;; \
864 ldf.fill.nta f108 = [r2], 32; \
865 ldf.fill.nta f109 = [r3], 32; \
866 ;; \
867 ldf.fill.nta f110 = [r2], 32; \
868 ldf.fill.nta f111 = [r3], 32; \
869 ;; \
870 ldf.fill.nta f112 = [r2], 32; \
871 ldf.fill.nta f113 = [r3], 32; \
872 ;; \
873 ldf.fill.nta f114 = [r2], 32; \
874 ldf.fill.nta f115 = [r3], 32; \
875 ;; \
876 ldf.fill.nta f116 = [r2], 32; \
877 ldf.fill.nta f117 = [r3], 32; \
878 ;; \
879 ldf.fill.nta f118 = [r2], 32; \
880 ldf.fill.nta f119 = [r3], 32; \
881 ;; \
882 ldf.fill.nta f120 = [r2], 32; \
883 ldf.fill.nta f121 = [r3], 32; \
884 ;; \
885 ldf.fill.nta f122 = [r2], 32; \
886 ldf.fill.nta f123 = [r3], 32; \
887 ;; \
888 ldf.fill.nta f124 = [r2], 32; \
889 ldf.fill.nta f125 = [r3], 32; \
890 ;; \
891 ldf.fill.nta f126 = [r2], 32; \
892 ldf.fill.nta f127 = [r3], 32; \
893 ;;
894
895 /*
896 * r32: context_t base address
897 */
898#define SAVE_PTK_REGS \
899 add r2=CTX(PKR0), r32; \
900 mov r16=7; \
901 ;; \
902 mov ar.lc=r16; \
903 mov r17=r0; \
904 ;; \
9051: \
906 mov r18=pkr[r17]; \
907 ;; \
908 srlz.i; \
909 ;; \
910 st8 [r2]=r18, 8; \
911 ;; \
912 add r17 =1,r17; \
913 ;; \
914 br.cloop.sptk 1b; \
915 ;;
916
917/*
918 * r33: point to context_t structure
919 * ar.lc are corrupted.
920 */
921#define RESTORE_PTK_REGS \
922 add r2=CTX(PKR0), r33; \
923 mov r16=7; \
924 ;; \
925 mov ar.lc=r16; \
926 mov r17=r0; \
927 ;; \
9281: \
929 ld8 r18=[r2], 8; \
930 ;; \
931 mov pkr[r17]=r18; \
932 ;; \
933 srlz.i; \
934 ;; \
935 add r17 =1,r17; \
936 ;; \
937 br.cloop.sptk 1b; \
938 ;;
939
940
941/*
942 * void vmm_trampoline( context_t * from,
943 * context_t * to)
944 *
945 * from: r32
946 * to: r33
947 * note: interrupt disabled before call this function.
948 */
949GLOBAL_ENTRY(vmm_trampoline)
950 mov r16 = psr
951 adds r2 = CTX(PSR), r32
952 ;;
953 st8 [r2] = r16, 8 // psr
954 mov r17 = pr
955 ;;
956 st8 [r2] = r17, 8 // pr
957 mov r18 = ar.unat
958 ;;
959 st8 [r2] = r18
960 mov r17 = ar.rsc
961 ;;
962 adds r2 = CTX(RSC),r32
963 ;;
964 st8 [r2]= r17
965 mov ar.rsc =0
966 flushrs
967 ;;
968 SAVE_GENERAL_REGS
969 ;;
970 SAVE_KERNEL_REGS
971 ;;
972 SAVE_APP_REGS
973 ;;
974 SAVE_BRANCH_REGS
975 ;;
976 SAVE_CTL_REGS
977 ;;
978 SAVE_REGION_REGS
979 ;;
980 //SAVE_DEBUG_REGS
981 ;;
982 rsm psr.dfl
983 ;;
984 srlz.d
985 ;;
986 SAVE_FPU_LOW
987 ;;
988 rsm psr.dfh
989 ;;
990 srlz.d
991 ;;
992 SAVE_FPU_HIGH
993 ;;
994 SAVE_PTK_REGS
995 ;;
996 RESTORE_PTK_REGS
997 ;;
998 RESTORE_FPU_HIGH
999 ;;
1000 RESTORE_FPU_LOW
1001 ;;
1002 //RESTORE_DEBUG_REGS
1003 ;;
1004 RESTORE_REGION_REGS
1005 ;;
1006 RESTORE_CTL_REGS
1007 ;;
1008 RESTORE_BRANCH_REGS
1009 ;;
1010 RESTORE_APP_REGS
1011 ;;
1012 RESTORE_KERNEL_REGS
1013 ;;
1014 RESTORE_GENERAL_REGS
1015 ;;
1016 adds r2=CTX(PSR), r33
1017 ;;
1018 ld8 r16=[r2], 8 // psr
1019 ;;
1020 mov psr.l=r16
1021 ;;
1022 srlz.d
1023 ;;
1024 ld8 r16=[r2], 8 // pr
1025 ;;
1026 mov pr =r16,-1
1027 ld8 r16=[r2] // unat
1028 ;;
1029 mov ar.unat=r16
1030 ;;
1031 adds r2=CTX(RSC),r33
1032 ;;
1033 ld8 r16 =[r2]
1034 ;;
1035 mov ar.rsc = r16
1036 ;;
1037 br.ret.sptk.few b0
1038END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
new file mode 100644
index 000000000000..e44027ce5667
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.c
@@ -0,0 +1,2163 @@
1/*
2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
22 */
23
24#include <linux/kvm_host.h>
25#include <linux/types.h>
26
27#include <asm/processor.h>
28#include <asm/ia64regs.h>
29#include <asm/gcc_intrin.h>
30#include <asm/kregs.h>
31#include <asm/pgtable.h>
32#include <asm/tlb.h>
33
34#include "asm-offsets.h"
35#include "vcpu.h"
36
37/*
38 * Special notes:
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
44 */
45int mm_switch_table[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
47 /*
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
52 *
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
57 * emulation is hit.
58 */
59 {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
62 /*
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
65 *
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
68 */
69 {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
72 /*
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
77 */
78 {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
79 {0, 0, 0, 0, 0, 0, 0, 0},
80 /*
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84 *
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
89 * (1,1,1)->(1,0,0)
90 */
91
92 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
93};
94
95void physical_mode_init(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.mode_flags = GUEST_IN_PHY;
98}
99
100void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101{
102 unsigned long psr;
103
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr = ia64_clear_ic();
106 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107 ia64_srlz_d();
108 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109 ia64_srlz_d();
110
111 ia64_set_psr(psr);
112 return;
113}
114
115
116void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117{
118 unsigned long psr;
119
120 psr = ia64_clear_ic();
121 ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
122 ia64_srlz_d();
123 ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
124 ia64_srlz_d();
125 ia64_set_psr(psr);
126 return;
127}
128
129static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
130{
131 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
132}
133
134void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
135 struct ia64_psr new_psr)
136{
137 int act;
138 act = mm_switch_action(old_psr, new_psr);
139 switch (act) {
140 case SW_V2P:
141 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
142 old_psr.val, new_psr.val);*/
143 switch_to_physical_rid(vcpu);
144 /*
145 * Set rse to enforced lazy, to prevent active rse
146 *save/restor when guest physical mode.
147 */
148 vcpu->arch.mode_flags |= GUEST_IN_PHY;
149 break;
150 case SW_P2V:
151 switch_to_virtual_rid(vcpu);
152 /*
153 * recover old mode which is saved when entering
154 * guest physical mode
155 */
156 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
157 break;
158 case SW_SELF:
159 break;
160 case SW_NOP:
161 break;
162 default:
163 /* Sanity check */
164 break;
165 }
166 return;
167}
168
169
170
171/*
172 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation.
174 * However what those inserted tc/tr wants is rid for
175 * virtual mode. So original virtual rid needs to be restored
176 * before insert.
177 *
178 * Operations which required such switch include:
179 * - insertions (itc.*, itr.*)
180 * - purges (ptc.* and ptr.*)
181 * - tpa
182 * - tak
183 * - thash?, ttag?
184 * All above needs actual virtual rid for destination entry.
185 */
186
187void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
188 struct ia64_psr new_psr)
189{
190
191 if ((old_psr.dt != new_psr.dt)
192 || (old_psr.it != new_psr.it)
193 || (old_psr.rt != new_psr.rt))
194 switch_mm_mode(vcpu, old_psr, new_psr);
195
196 return;
197}
198
199
200/*
201 * In physical mode, insert tc/tr for region 0 and 4 uses
202 * RID[0] and RID[4] which is for physical mode emulation.
203 * However what those inserted tc/tr wants is rid for
204 * virtual mode. So original virtual rid needs to be restored
205 * before insert.
206 *
207 * Operations which required such switch include:
208 * - insertions (itc.*, itr.*)
209 * - purges (ptc.* and ptr.*)
210 * - tpa
211 * - tak
212 * - thash?, ttag?
213 * All above needs actual virtual rid for destination entry.
214 */
215
216void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
217{
218 if (is_physical_mode(vcpu)) {
219 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
220 switch_to_virtual_rid(vcpu);
221 }
222 return;
223}
224
225/* Recover always follows prepare */
226void recover_if_physical_mode(struct kvm_vcpu *vcpu)
227{
228 if (is_physical_mode(vcpu))
229 switch_to_physical_rid(vcpu);
230 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
231 return;
232}
233
234#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
235
236static u16 gr_info[32] = {
237 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
238 RPT(r1), RPT(r2), RPT(r3),
239 RPT(r4), RPT(r5), RPT(r6), RPT(r7),
240 RPT(r8), RPT(r9), RPT(r10), RPT(r11),
241 RPT(r12), RPT(r13), RPT(r14), RPT(r15),
242 RPT(r16), RPT(r17), RPT(r18), RPT(r19),
243 RPT(r20), RPT(r21), RPT(r22), RPT(r23),
244 RPT(r24), RPT(r25), RPT(r26), RPT(r27),
245 RPT(r28), RPT(r29), RPT(r30), RPT(r31)
246};
247
248#define IA64_FIRST_STACKED_GR 32
249#define IA64_FIRST_ROTATING_FR 32
250
251static inline unsigned long
252rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
253{
254 reg += rrb;
255 if (reg >= sor)
256 reg -= sor;
257 return reg;
258}
259
260/*
261 * Return the (rotated) index for floating point register
262 * be in the REGNUM (REGNUM must range from 32-127,
263 * result is in the range from 0-95.
264 */
265static inline unsigned long fph_index(struct kvm_pt_regs *regs,
266 long regnum)
267{
268 unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270}
271
272
273/*
274 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp.
276 */
277static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
278 long num_regs)
279{
280 long delta = ia64_rse_slot_num(addr) + num_regs;
281 int i = 0;
282
283 if (num_regs < 0)
284 delta -= 0x3e;
285 if (delta < 0) {
286 while (delta <= -0x3f) {
287 i--;
288 delta += 0x3f;
289 }
290 } else {
291 while (delta >= 0x3f) {
292 i++;
293 delta -= 0x3f;
294 }
295 }
296
297 return addr + num_regs + i;
298}
299
300static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
301 unsigned long *val, int *nat)
302{
303 unsigned long *bsp, *addr, *rnat_addr, *bspstore;
304 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
305 unsigned long nat_mask;
306 unsigned long old_rsc, new_rsc;
307 long sof = (regs->cr_ifs) & 0x7f;
308 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
309 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
310 long ridx = r1 - 32;
311
312 if (ridx < sor)
313 ridx = rotate_reg(sor, rrb_gr, ridx);
314
315 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
316 new_rsc = old_rsc&(~(0x3));
317 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
318
319 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
320 bsp = kbs + (regs->loadrs >> 19);
321
322 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
323 nat_mask = 1UL << ia64_rse_slot_num(addr);
324 rnat_addr = ia64_rse_rnat_addr(addr);
325
326 if (addr >= bspstore) {
327 ia64_flushrs();
328 ia64_mf();
329 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
330 }
331 *val = *addr;
332 if (nat) {
333 if (bspstore < rnat_addr)
334 *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
335 & nat_mask);
336 else
337 *nat = (int)!!((*rnat_addr) & nat_mask);
338 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
339 }
340}
341
342void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
343 unsigned long val, unsigned long nat)
344{
345 unsigned long *bsp, *bspstore, *addr, *rnat_addr;
346 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
347 unsigned long nat_mask;
348 unsigned long old_rsc, new_rsc, psr;
349 unsigned long rnat;
350 long sof = (regs->cr_ifs) & 0x7f;
351 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
352 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
353 long ridx = r1 - 32;
354
355 if (ridx < sor)
356 ridx = rotate_reg(sor, rrb_gr, ridx);
357
358 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
359 /* put RSC to lazy mode, and set loadrs 0 */
360 new_rsc = old_rsc & (~0x3fff0003);
361 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
362 bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
363
364 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
365 nat_mask = 1UL << ia64_rse_slot_num(addr);
366 rnat_addr = ia64_rse_rnat_addr(addr);
367
368 local_irq_save(psr);
369 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
370 if (addr >= bspstore) {
371
372 ia64_flushrs();
373 ia64_mf();
374 *addr = val;
375 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
376 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
377 if (bspstore < rnat_addr)
378 rnat = rnat & (~nat_mask);
379 else
380 *rnat_addr = (*rnat_addr)&(~nat_mask);
381
382 ia64_mf();
383 ia64_loadrs();
384 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
385 } else {
386 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
387 *addr = val;
388 if (bspstore < rnat_addr)
389 rnat = rnat&(~nat_mask);
390 else
391 *rnat_addr = (*rnat_addr) & (~nat_mask);
392
393 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
394 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
395 }
396 local_irq_restore(psr);
397 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
398}
399
400void getreg(unsigned long regnum, unsigned long *val,
401 int *nat, struct kvm_pt_regs *regs)
402{
403 unsigned long addr, *unat;
404 if (regnum >= IA64_FIRST_STACKED_GR) {
405 get_rse_reg(regs, regnum, val, nat);
406 return;
407 }
408
409 /*
410 * Now look at registers in [0-31] range and init correct UNAT
411 */
412 addr = (unsigned long)regs;
413 unat = &regs->eml_unat;;
414
415 addr += gr_info[regnum];
416
417 *val = *(unsigned long *)addr;
418 /*
419 * do it only when requested
420 */
421 if (nat)
422 *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
423}
424
425void setreg(unsigned long regnum, unsigned long val,
426 int nat, struct kvm_pt_regs *regs)
427{
428 unsigned long addr;
429 unsigned long bitmask;
430 unsigned long *unat;
431
432 /*
433 * First takes care of stacked registers
434 */
435 if (regnum >= IA64_FIRST_STACKED_GR) {
436 set_rse_reg(regs, regnum, val, nat);
437 return;
438 }
439
440 /*
441 * Now look at registers in [0-31] range and init correct UNAT
442 */
443 addr = (unsigned long)regs;
444 unat = &regs->eml_unat;
445 /*
446 * add offset from base of struct
447 * and do it !
448 */
449 addr += gr_info[regnum];
450
451 *(unsigned long *)addr = val;
452
453 /*
454 * We need to clear the corresponding UNAT bit to fully emulate the load
455 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
456 */
457 bitmask = 1UL << ((addr >> 3) & 0x3f);
458 if (nat)
459 *unat |= bitmask;
460 else
461 *unat &= ~bitmask;
462
463}
464
465u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
466{
467 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
468 u64 val;
469
470 if (!reg)
471 return 0;
472 getreg(reg, &val, 0, regs);
473 return val;
474}
475
476void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
477{
478 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
479 long sof = (regs->cr_ifs) & 0x7f;
480
481 if (!reg)
482 return;
483 if (reg >= sof + 32)
484 return;
485 setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
486}
487
488void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
489 struct kvm_pt_regs *regs)
490{
491 /* Take floating register rotation into consideration*/
492 if (regnum >= IA64_FIRST_ROTATING_FR)
493 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
494#define CASE_FIXED_FP(reg) \
495 case (reg) : \
496 ia64_stf_spill(fpval, reg); \
497 break
498
499 switch (regnum) {
500 CASE_FIXED_FP(0);
501 CASE_FIXED_FP(1);
502 CASE_FIXED_FP(2);
503 CASE_FIXED_FP(3);
504 CASE_FIXED_FP(4);
505 CASE_FIXED_FP(5);
506
507 CASE_FIXED_FP(6);
508 CASE_FIXED_FP(7);
509 CASE_FIXED_FP(8);
510 CASE_FIXED_FP(9);
511 CASE_FIXED_FP(10);
512 CASE_FIXED_FP(11);
513
514 CASE_FIXED_FP(12);
515 CASE_FIXED_FP(13);
516 CASE_FIXED_FP(14);
517 CASE_FIXED_FP(15);
518 CASE_FIXED_FP(16);
519 CASE_FIXED_FP(17);
520 CASE_FIXED_FP(18);
521 CASE_FIXED_FP(19);
522 CASE_FIXED_FP(20);
523 CASE_FIXED_FP(21);
524 CASE_FIXED_FP(22);
525 CASE_FIXED_FP(23);
526 CASE_FIXED_FP(24);
527 CASE_FIXED_FP(25);
528 CASE_FIXED_FP(26);
529 CASE_FIXED_FP(27);
530 CASE_FIXED_FP(28);
531 CASE_FIXED_FP(29);
532 CASE_FIXED_FP(30);
533 CASE_FIXED_FP(31);
534 CASE_FIXED_FP(32);
535 CASE_FIXED_FP(33);
536 CASE_FIXED_FP(34);
537 CASE_FIXED_FP(35);
538 CASE_FIXED_FP(36);
539 CASE_FIXED_FP(37);
540 CASE_FIXED_FP(38);
541 CASE_FIXED_FP(39);
542 CASE_FIXED_FP(40);
543 CASE_FIXED_FP(41);
544 CASE_FIXED_FP(42);
545 CASE_FIXED_FP(43);
546 CASE_FIXED_FP(44);
547 CASE_FIXED_FP(45);
548 CASE_FIXED_FP(46);
549 CASE_FIXED_FP(47);
550 CASE_FIXED_FP(48);
551 CASE_FIXED_FP(49);
552 CASE_FIXED_FP(50);
553 CASE_FIXED_FP(51);
554 CASE_FIXED_FP(52);
555 CASE_FIXED_FP(53);
556 CASE_FIXED_FP(54);
557 CASE_FIXED_FP(55);
558 CASE_FIXED_FP(56);
559 CASE_FIXED_FP(57);
560 CASE_FIXED_FP(58);
561 CASE_FIXED_FP(59);
562 CASE_FIXED_FP(60);
563 CASE_FIXED_FP(61);
564 CASE_FIXED_FP(62);
565 CASE_FIXED_FP(63);
566 CASE_FIXED_FP(64);
567 CASE_FIXED_FP(65);
568 CASE_FIXED_FP(66);
569 CASE_FIXED_FP(67);
570 CASE_FIXED_FP(68);
571 CASE_FIXED_FP(69);
572 CASE_FIXED_FP(70);
573 CASE_FIXED_FP(71);
574 CASE_FIXED_FP(72);
575 CASE_FIXED_FP(73);
576 CASE_FIXED_FP(74);
577 CASE_FIXED_FP(75);
578 CASE_FIXED_FP(76);
579 CASE_FIXED_FP(77);
580 CASE_FIXED_FP(78);
581 CASE_FIXED_FP(79);
582 CASE_FIXED_FP(80);
583 CASE_FIXED_FP(81);
584 CASE_FIXED_FP(82);
585 CASE_FIXED_FP(83);
586 CASE_FIXED_FP(84);
587 CASE_FIXED_FP(85);
588 CASE_FIXED_FP(86);
589 CASE_FIXED_FP(87);
590 CASE_FIXED_FP(88);
591 CASE_FIXED_FP(89);
592 CASE_FIXED_FP(90);
593 CASE_FIXED_FP(91);
594 CASE_FIXED_FP(92);
595 CASE_FIXED_FP(93);
596 CASE_FIXED_FP(94);
597 CASE_FIXED_FP(95);
598 CASE_FIXED_FP(96);
599 CASE_FIXED_FP(97);
600 CASE_FIXED_FP(98);
601 CASE_FIXED_FP(99);
602 CASE_FIXED_FP(100);
603 CASE_FIXED_FP(101);
604 CASE_FIXED_FP(102);
605 CASE_FIXED_FP(103);
606 CASE_FIXED_FP(104);
607 CASE_FIXED_FP(105);
608 CASE_FIXED_FP(106);
609 CASE_FIXED_FP(107);
610 CASE_FIXED_FP(108);
611 CASE_FIXED_FP(109);
612 CASE_FIXED_FP(110);
613 CASE_FIXED_FP(111);
614 CASE_FIXED_FP(112);
615 CASE_FIXED_FP(113);
616 CASE_FIXED_FP(114);
617 CASE_FIXED_FP(115);
618 CASE_FIXED_FP(116);
619 CASE_FIXED_FP(117);
620 CASE_FIXED_FP(118);
621 CASE_FIXED_FP(119);
622 CASE_FIXED_FP(120);
623 CASE_FIXED_FP(121);
624 CASE_FIXED_FP(122);
625 CASE_FIXED_FP(123);
626 CASE_FIXED_FP(124);
627 CASE_FIXED_FP(125);
628 CASE_FIXED_FP(126);
629 CASE_FIXED_FP(127);
630 }
631#undef CASE_FIXED_FP
632}
633
634void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
635 struct kvm_pt_regs *regs)
636{
637 /* Take floating register rotation into consideration*/
638 if (regnum >= IA64_FIRST_ROTATING_FR)
639 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
640
641#define CASE_FIXED_FP(reg) \
642 case (reg) : \
643 ia64_ldf_fill(reg, fpval); \
644 break
645
646 switch (regnum) {
647 CASE_FIXED_FP(2);
648 CASE_FIXED_FP(3);
649 CASE_FIXED_FP(4);
650 CASE_FIXED_FP(5);
651
652 CASE_FIXED_FP(6);
653 CASE_FIXED_FP(7);
654 CASE_FIXED_FP(8);
655 CASE_FIXED_FP(9);
656 CASE_FIXED_FP(10);
657 CASE_FIXED_FP(11);
658
659 CASE_FIXED_FP(12);
660 CASE_FIXED_FP(13);
661 CASE_FIXED_FP(14);
662 CASE_FIXED_FP(15);
663 CASE_FIXED_FP(16);
664 CASE_FIXED_FP(17);
665 CASE_FIXED_FP(18);
666 CASE_FIXED_FP(19);
667 CASE_FIXED_FP(20);
668 CASE_FIXED_FP(21);
669 CASE_FIXED_FP(22);
670 CASE_FIXED_FP(23);
671 CASE_FIXED_FP(24);
672 CASE_FIXED_FP(25);
673 CASE_FIXED_FP(26);
674 CASE_FIXED_FP(27);
675 CASE_FIXED_FP(28);
676 CASE_FIXED_FP(29);
677 CASE_FIXED_FP(30);
678 CASE_FIXED_FP(31);
679 CASE_FIXED_FP(32);
680 CASE_FIXED_FP(33);
681 CASE_FIXED_FP(34);
682 CASE_FIXED_FP(35);
683 CASE_FIXED_FP(36);
684 CASE_FIXED_FP(37);
685 CASE_FIXED_FP(38);
686 CASE_FIXED_FP(39);
687 CASE_FIXED_FP(40);
688 CASE_FIXED_FP(41);
689 CASE_FIXED_FP(42);
690 CASE_FIXED_FP(43);
691 CASE_FIXED_FP(44);
692 CASE_FIXED_FP(45);
693 CASE_FIXED_FP(46);
694 CASE_FIXED_FP(47);
695 CASE_FIXED_FP(48);
696 CASE_FIXED_FP(49);
697 CASE_FIXED_FP(50);
698 CASE_FIXED_FP(51);
699 CASE_FIXED_FP(52);
700 CASE_FIXED_FP(53);
701 CASE_FIXED_FP(54);
702 CASE_FIXED_FP(55);
703 CASE_FIXED_FP(56);
704 CASE_FIXED_FP(57);
705 CASE_FIXED_FP(58);
706 CASE_FIXED_FP(59);
707 CASE_FIXED_FP(60);
708 CASE_FIXED_FP(61);
709 CASE_FIXED_FP(62);
710 CASE_FIXED_FP(63);
711 CASE_FIXED_FP(64);
712 CASE_FIXED_FP(65);
713 CASE_FIXED_FP(66);
714 CASE_FIXED_FP(67);
715 CASE_FIXED_FP(68);
716 CASE_FIXED_FP(69);
717 CASE_FIXED_FP(70);
718 CASE_FIXED_FP(71);
719 CASE_FIXED_FP(72);
720 CASE_FIXED_FP(73);
721 CASE_FIXED_FP(74);
722 CASE_FIXED_FP(75);
723 CASE_FIXED_FP(76);
724 CASE_FIXED_FP(77);
725 CASE_FIXED_FP(78);
726 CASE_FIXED_FP(79);
727 CASE_FIXED_FP(80);
728 CASE_FIXED_FP(81);
729 CASE_FIXED_FP(82);
730 CASE_FIXED_FP(83);
731 CASE_FIXED_FP(84);
732 CASE_FIXED_FP(85);
733 CASE_FIXED_FP(86);
734 CASE_FIXED_FP(87);
735 CASE_FIXED_FP(88);
736 CASE_FIXED_FP(89);
737 CASE_FIXED_FP(90);
738 CASE_FIXED_FP(91);
739 CASE_FIXED_FP(92);
740 CASE_FIXED_FP(93);
741 CASE_FIXED_FP(94);
742 CASE_FIXED_FP(95);
743 CASE_FIXED_FP(96);
744 CASE_FIXED_FP(97);
745 CASE_FIXED_FP(98);
746 CASE_FIXED_FP(99);
747 CASE_FIXED_FP(100);
748 CASE_FIXED_FP(101);
749 CASE_FIXED_FP(102);
750 CASE_FIXED_FP(103);
751 CASE_FIXED_FP(104);
752 CASE_FIXED_FP(105);
753 CASE_FIXED_FP(106);
754 CASE_FIXED_FP(107);
755 CASE_FIXED_FP(108);
756 CASE_FIXED_FP(109);
757 CASE_FIXED_FP(110);
758 CASE_FIXED_FP(111);
759 CASE_FIXED_FP(112);
760 CASE_FIXED_FP(113);
761 CASE_FIXED_FP(114);
762 CASE_FIXED_FP(115);
763 CASE_FIXED_FP(116);
764 CASE_FIXED_FP(117);
765 CASE_FIXED_FP(118);
766 CASE_FIXED_FP(119);
767 CASE_FIXED_FP(120);
768 CASE_FIXED_FP(121);
769 CASE_FIXED_FP(122);
770 CASE_FIXED_FP(123);
771 CASE_FIXED_FP(124);
772 CASE_FIXED_FP(125);
773 CASE_FIXED_FP(126);
774 CASE_FIXED_FP(127);
775 }
776}
777
778void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
779 struct ia64_fpreg *val)
780{
781 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
782
783 getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
784}
785
786void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
787 struct ia64_fpreg *val)
788{
789 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
790
791 if (reg > 1)
792 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
793}
794
795/************************************************************************
796 * lsapic timer
797 ***********************************************************************/
798u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
799{
800 unsigned long guest_itc;
801 guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
802
803 if (guest_itc >= VMX(vcpu, last_itc)) {
804 VMX(vcpu, last_itc) = guest_itc;
805 return guest_itc;
806 } else
807 return VMX(vcpu, last_itc);
808}
809
810static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812{
813 struct kvm_vcpu *v;
814 int i;
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv);
817
818 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < MAX_VCPU_NUM; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
821 VMX(v, itc_offset) = itc_offset;
822 VMX(v, last_itc) = 0;
823 }
824 }
825 VMX(vcpu, last_itc) = 0;
826 if (VCPU(vcpu, itm) <= val) {
827 VMX(vcpu, itc_check) = 0;
828 vcpu_unpend_interrupt(vcpu, vitv);
829 } else {
830 VMX(vcpu, itc_check) = 1;
831 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
832 }
833
834}
835
836static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
837{
838 return ((u64)VCPU(vcpu, itm));
839}
840
841static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
842{
843 unsigned long vitv = VCPU(vcpu, itv);
844 VCPU(vcpu, itm) = val;
845
846 if (val > vcpu_get_itc(vcpu)) {
847 VMX(vcpu, itc_check) = 1;
848 vcpu_unpend_interrupt(vcpu, vitv);
849 VMX(vcpu, timer_pending) = 0;
850 } else
851 VMX(vcpu, itc_check) = 0;
852}
853
854#define ITV_VECTOR(itv) (itv&0xff)
855#define ITV_IRQ_MASK(itv) (itv&(1<<16))
856
857static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
858{
859 VCPU(vcpu, itv) = val;
860 if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
861 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
862 vcpu->arch.timer_pending = 0;
863 }
864}
865
866static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
867{
868 int vec;
869
870 vec = highest_inservice_irq(vcpu);
871 if (vec == NULL_VECTOR)
872 return;
873 VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
874 VCPU(vcpu, eoi) = 0;
875 vcpu->arch.irq_new_pending = 1;
876
877}
878
879/* See Table 5-8 in SDM vol2 for the definition */
880int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
881{
882 union ia64_tpr vtpr;
883
884 vtpr.val = VCPU(vcpu, tpr);
885
886 if (h_inservice == NMI_VECTOR)
887 return IRQ_MASKED_BY_INSVC;
888
889 if (h_pending == NMI_VECTOR) {
890 /* Non Maskable Interrupt */
891 return IRQ_NO_MASKED;
892 }
893
894 if (h_inservice == ExtINT_VECTOR)
895 return IRQ_MASKED_BY_INSVC;
896
897 if (h_pending == ExtINT_VECTOR) {
898 if (vtpr.mmi) {
899 /* mask all external IRQ */
900 return IRQ_MASKED_BY_VTPR;
901 } else
902 return IRQ_NO_MASKED;
903 }
904
905 if (is_higher_irq(h_pending, h_inservice)) {
906 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
907 return IRQ_NO_MASKED;
908 else
909 return IRQ_MASKED_BY_VTPR;
910 } else {
911 return IRQ_MASKED_BY_INSVC;
912 }
913}
914
915void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
916{
917 long spsr;
918 int ret;
919
920 local_irq_save(spsr);
921 ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
922 local_irq_restore(spsr);
923
924 vcpu->arch.irq_new_pending = 1;
925}
926
927void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
928{
929 long spsr;
930 int ret;
931
932 local_irq_save(spsr);
933 ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
934 local_irq_restore(spsr);
935 if (ret) {
936 vcpu->arch.irq_new_pending = 1;
937 wmb();
938 }
939}
940
941void update_vhpi(struct kvm_vcpu *vcpu, int vec)
942{
943 u64 vhpi;
944
945 if (vec == NULL_VECTOR)
946 vhpi = 0;
947 else if (vec == NMI_VECTOR)
948 vhpi = 32;
949 else if (vec == ExtINT_VECTOR)
950 vhpi = 16;
951 else
952 vhpi = vec >> 4;
953
954 VCPU(vcpu, vhpi) = vhpi;
955 if (VCPU(vcpu, vac).a_int)
956 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
957 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
958}
959
960u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
961{
962 int vec, h_inservice, mask;
963
964 vec = highest_pending_irq(vcpu);
965 h_inservice = highest_inservice_irq(vcpu);
966 mask = irq_masked(vcpu, vec, h_inservice);
967 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
968 if (VCPU(vcpu, vhpi))
969 update_vhpi(vcpu, NULL_VECTOR);
970 return IA64_SPURIOUS_INT_VECTOR;
971 }
972 if (mask == IRQ_MASKED_BY_VTPR) {
973 update_vhpi(vcpu, vec);
974 return IA64_SPURIOUS_INT_VECTOR;
975 }
976 VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
977 vcpu_unpend_interrupt(vcpu, vec);
978 return (u64)vec;
979}
980
981/**************************************************************************
982 Privileged operation emulation routines
983 **************************************************************************/
984u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
985{
986 union ia64_pta vpta;
987 union ia64_rr vrr;
988 u64 pval;
989 u64 vhpt_offset;
990
991 vpta.val = vcpu_get_pta(vcpu);
992 vrr.val = vcpu_get_rr(vcpu, vadr);
993 vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
994 if (vpta.vf) {
995 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
996 vpta.val, 0, 0, 0, 0);
997 } else {
998 pval = (vadr & VRN_MASK) | vhpt_offset |
999 (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1000 }
1001 return pval;
1002}
1003
1004u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1005{
1006 union ia64_rr vrr;
1007 union ia64_pta vpta;
1008 u64 pval;
1009
1010 vpta.val = vcpu_get_pta(vcpu);
1011 vrr.val = vcpu_get_rr(vcpu, vadr);
1012 if (vpta.vf) {
1013 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1014 0, 0, 0, 0, 0);
1015 } else
1016 pval = 1;
1017
1018 return pval;
1019}
1020
1021u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1022{
1023 struct thash_data *data;
1024 union ia64_pta vpta;
1025 u64 key;
1026
1027 vpta.val = vcpu_get_pta(vcpu);
1028 if (vpta.vf == 0) {
1029 key = 1;
1030 return key;
1031 }
1032 data = vtlb_lookup(vcpu, vadr, D_TLB);
1033 if (!data || !data->p)
1034 key = 1;
1035 else
1036 key = data->key;
1037
1038 return key;
1039}
1040
1041
1042
1043void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1044{
1045 unsigned long thash, vadr;
1046
1047 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1048 thash = vcpu_thash(vcpu, vadr);
1049 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1050}
1051
1052
1053void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1054{
1055 unsigned long tag, vadr;
1056
1057 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1058 tag = vcpu_ttag(vcpu, vadr);
1059 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1060}
1061
1062int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1063{
1064 struct thash_data *data;
1065 union ia64_isr visr, pt_isr;
1066 struct kvm_pt_regs *regs;
1067 struct ia64_psr vpsr;
1068
1069 regs = vcpu_regs(vcpu);
1070 pt_isr.val = VMX(vcpu, cr_isr);
1071 visr.val = 0;
1072 visr.ei = pt_isr.ei;
1073 visr.ir = pt_isr.ir;
1074 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1075 visr.na = 1;
1076
1077 data = vhpt_lookup(vadr);
1078 if (data) {
1079 if (data->p == 0) {
1080 vcpu_set_isr(vcpu, visr.val);
1081 data_page_not_present(vcpu, vadr);
1082 return IA64_FAULT;
1083 } else if (data->ma == VA_MATTR_NATPAGE) {
1084 vcpu_set_isr(vcpu, visr.val);
1085 dnat_page_consumption(vcpu, vadr);
1086 return IA64_FAULT;
1087 } else {
1088 *padr = (data->gpaddr >> data->ps << data->ps) |
1089 (vadr & (PSIZE(data->ps) - 1));
1090 return IA64_NO_FAULT;
1091 }
1092 }
1093
1094 data = vtlb_lookup(vcpu, vadr, D_TLB);
1095 if (data) {
1096 if (data->p == 0) {
1097 vcpu_set_isr(vcpu, visr.val);
1098 data_page_not_present(vcpu, vadr);
1099 return IA64_FAULT;
1100 } else if (data->ma == VA_MATTR_NATPAGE) {
1101 vcpu_set_isr(vcpu, visr.val);
1102 dnat_page_consumption(vcpu, vadr);
1103 return IA64_FAULT;
1104 } else{
1105 *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1106 | (vadr & (PSIZE(data->ps) - 1));
1107 return IA64_NO_FAULT;
1108 }
1109 }
1110 if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1111 if (vpsr.ic) {
1112 vcpu_set_isr(vcpu, visr.val);
1113 alt_dtlb(vcpu, vadr);
1114 return IA64_FAULT;
1115 } else {
1116 nested_dtlb(vcpu);
1117 return IA64_FAULT;
1118 }
1119 } else {
1120 if (vpsr.ic) {
1121 vcpu_set_isr(vcpu, visr.val);
1122 dvhpt_fault(vcpu, vadr);
1123 return IA64_FAULT;
1124 } else{
1125 nested_dtlb(vcpu);
1126 return IA64_FAULT;
1127 }
1128 }
1129
1130 return IA64_NO_FAULT;
1131}
1132
1133
1134int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1135{
1136 unsigned long r1, r3;
1137
1138 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1139
1140 if (vcpu_tpa(vcpu, r3, &r1))
1141 return IA64_FAULT;
1142
1143 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1144 return(IA64_NO_FAULT);
1145}
1146
1147void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1148{
1149 unsigned long r1, r3;
1150
1151 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1152 r1 = vcpu_tak(vcpu, r3);
1153 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1154}
1155
1156
1157/************************************
1158 * Insert/Purge translation register/cache
1159 ************************************/
1160void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1161{
1162 thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1163}
1164
1165void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1166{
1167 thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1168}
1169
1170void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1171{
1172 u64 ps, va, rid;
1173 struct thash_data *p_itr;
1174
1175 ps = itir_ps(itir);
1176 va = PAGEALIGN(ifa, ps);
1177 pte &= ~PAGE_FLAGS_RV_MASK;
1178 rid = vcpu_get_rr(vcpu, ifa);
1179 rid = rid & RR_RID_MASK;
1180 p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1181 vcpu_set_tr(p_itr, pte, itir, va, rid);
1182 vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1183}
1184
1185
1186void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1187{
1188 u64 gpfn;
1189 u64 ps, va, rid;
1190 struct thash_data *p_dtr;
1191
1192 ps = itir_ps(itir);
1193 va = PAGEALIGN(ifa, ps);
1194 pte &= ~PAGE_FLAGS_RV_MASK;
1195
1196 if (ps != _PAGE_SIZE_16M)
1197 thash_purge_entries(vcpu, va, ps);
1198 gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1199 if (__gpfn_is_io(gpfn))
1200 pte |= VTLB_PTE_IO;
1201 rid = vcpu_get_rr(vcpu, va);
1202 rid = rid & RR_RID_MASK;
1203 p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1204 vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1205 pte, itir, va, rid);
1206 vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1207}
1208
1209void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1210{
1211 int index;
1212 u64 va;
1213
1214 va = PAGEALIGN(ifa, ps);
1215 while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1216 vcpu->arch.dtrs[index].page_flags = 0;
1217
1218 thash_purge_entries(vcpu, va, ps);
1219}
1220
1221void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1222{
1223 int index;
1224 u64 va;
1225
1226 va = PAGEALIGN(ifa, ps);
1227 while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1228 vcpu->arch.itrs[index].page_flags = 0;
1229
1230 thash_purge_entries(vcpu, va, ps);
1231}
1232
1233void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1234{
1235 va = PAGEALIGN(va, ps);
1236 thash_purge_entries(vcpu, va, ps);
1237}
1238
1239void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1240{
1241 thash_purge_all(vcpu);
1242}
1243
1244void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1245{
1246 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1247 long psr;
1248 local_irq_save(psr);
1249 p->exit_reason = EXIT_REASON_PTC_G;
1250
1251 p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1252 p->u.ptc_g_data.vaddr = va;
1253 p->u.ptc_g_data.ps = ps;
1254 vmm_transition(vcpu);
1255 /* Do Local Purge Here*/
1256 vcpu_ptc_l(vcpu, va, ps);
1257 local_irq_restore(psr);
1258}
1259
1260
1261void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1262{
1263 vcpu_ptc_ga(vcpu, va, ps);
1264}
1265
1266void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1267{
1268 unsigned long ifa;
1269
1270 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1271 vcpu_ptc_e(vcpu, ifa);
1272}
1273
1274void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1275{
1276 unsigned long ifa, itir;
1277
1278 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1279 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1280 vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1281}
1282
1283void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1284{
1285 unsigned long ifa, itir;
1286
1287 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1288 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1289 vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1290}
1291
1292void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1293{
1294 unsigned long ifa, itir;
1295
1296 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1297 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1298 vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1299}
1300
1301void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1302{
1303 unsigned long ifa, itir;
1304
1305 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1306 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1307 vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1308}
1309
1310void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1311{
1312 unsigned long ifa, itir;
1313
1314 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1315 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1316 vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1317}
1318
1319void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1320{
1321 unsigned long itir, ifa, pte, slot;
1322
1323 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1324 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1325 itir = vcpu_get_itir(vcpu);
1326 ifa = vcpu_get_ifa(vcpu);
1327 vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1328}
1329
1330
1331
1332void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1333{
1334 unsigned long itir, ifa, pte, slot;
1335
1336 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1337 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1338 itir = vcpu_get_itir(vcpu);
1339 ifa = vcpu_get_ifa(vcpu);
1340 vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1341}
1342
1343void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1344{
1345 unsigned long itir, ifa, pte;
1346
1347 itir = vcpu_get_itir(vcpu);
1348 ifa = vcpu_get_ifa(vcpu);
1349 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1350 vcpu_itc_d(vcpu, pte, itir, ifa);
1351}
1352
1353void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1354{
1355 unsigned long itir, ifa, pte;
1356
1357 itir = vcpu_get_itir(vcpu);
1358 ifa = vcpu_get_ifa(vcpu);
1359 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1360 vcpu_itc_i(vcpu, pte, itir, ifa);
1361}
1362
1363/*************************************
1364 * Moves to semi-privileged registers
1365 *************************************/
1366
1367void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1368{
1369 unsigned long imm;
1370
1371 if (inst.M30.s)
1372 imm = -inst.M30.imm;
1373 else
1374 imm = inst.M30.imm;
1375
1376 vcpu_set_itc(vcpu, imm);
1377}
1378
1379void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1380{
1381 unsigned long r2;
1382
1383 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1384 vcpu_set_itc(vcpu, r2);
1385}
1386
1387
1388void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1389{
1390 unsigned long r1;
1391
1392 r1 = vcpu_get_itc(vcpu);
1393 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1394}
1395/**************************************************************************
1396 struct kvm_vcpu*protection key register access routines
1397 **************************************************************************/
1398
1399unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1400{
1401 return ((unsigned long)ia64_get_pkr(reg));
1402}
1403
1404void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1405{
1406 ia64_set_pkr(reg, val);
1407}
1408
1409
1410unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1411{
1412 union ia64_rr rr, rr1;
1413
1414 rr.val = vcpu_get_rr(vcpu, ifa);
1415 rr1.val = 0;
1416 rr1.ps = rr.ps;
1417 rr1.rid = rr.rid;
1418 return (rr1.val);
1419}
1420
1421
1422
1423/********************************
1424 * Moves to privileged registers
1425 ********************************/
1426unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1427 unsigned long val)
1428{
1429 union ia64_rr oldrr, newrr;
1430 unsigned long rrval;
1431 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1432 unsigned long psr;
1433
1434 oldrr.val = vcpu_get_rr(vcpu, reg);
1435 newrr.val = val;
1436 vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1437
1438 switch ((unsigned long)(reg >> VRN_SHIFT)) {
1439 case VRN6:
1440 vcpu->arch.vmm_rr = vrrtomrr(val);
1441 local_irq_save(psr);
1442 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1443 vmm_transition(vcpu);
1444 local_irq_restore(psr);
1445 break;
1446 case VRN4:
1447 rrval = vrrtomrr(val);
1448 vcpu->arch.metaphysical_saved_rr4 = rrval;
1449 if (!is_physical_mode(vcpu))
1450 ia64_set_rr(reg, rrval);
1451 break;
1452 case VRN0:
1453 rrval = vrrtomrr(val);
1454 vcpu->arch.metaphysical_saved_rr0 = rrval;
1455 if (!is_physical_mode(vcpu))
1456 ia64_set_rr(reg, rrval);
1457 break;
1458 default:
1459 ia64_set_rr(reg, vrrtomrr(val));
1460 break;
1461 }
1462
1463 return (IA64_NO_FAULT);
1464}
1465
1466
1467
1468void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1469{
1470 unsigned long r3, r2;
1471
1472 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1473 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1474 vcpu_set_rr(vcpu, r3, r2);
1475}
1476
1477void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1478{
1479}
1480
1481void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1482{
1483}
1484
1485void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1486{
1487 unsigned long r3, r2;
1488
1489 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1490 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1491 vcpu_set_pmc(vcpu, r3, r2);
1492}
1493
1494void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1495{
1496 unsigned long r3, r2;
1497
1498 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1499 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1500 vcpu_set_pmd(vcpu, r3, r2);
1501}
1502
1503void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1504{
1505 u64 r3, r2;
1506
1507 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1508 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1509 vcpu_set_pkr(vcpu, r3, r2);
1510}
1511
1512
1513
1514void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1515{
1516 unsigned long r3, r1;
1517
1518 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1519 r1 = vcpu_get_rr(vcpu, r3);
1520 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1521}
1522
1523void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1524{
1525 unsigned long r3, r1;
1526
1527 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1528 r1 = vcpu_get_pkr(vcpu, r3);
1529 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1530}
1531
1532void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1533{
1534 unsigned long r3, r1;
1535
1536 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1537 r1 = vcpu_get_dbr(vcpu, r3);
1538 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1539}
1540
1541void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1542{
1543 unsigned long r3, r1;
1544
1545 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1546 r1 = vcpu_get_ibr(vcpu, r3);
1547 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1548}
1549
1550void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1551{
1552 unsigned long r3, r1;
1553
1554 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1555 r1 = vcpu_get_pmc(vcpu, r3);
1556 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1557}
1558
1559
1560unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1561{
1562 /* FIXME: This could get called as a result of a rsvd-reg fault */
1563 if (reg > (ia64_get_cpuid(3) & 0xff))
1564 return 0;
1565 else
1566 return ia64_get_cpuid(reg);
1567}
1568
1569void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1570{
1571 unsigned long r3, r1;
1572
1573 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1574 r1 = vcpu_get_cpuid(vcpu, r3);
1575 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1576}
1577
1578void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1579{
1580 VCPU(vcpu, tpr) = val;
1581 vcpu->arch.irq_check = 1;
1582}
1583
1584unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1585{
1586 unsigned long r2;
1587
1588 r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1589 VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1590
1591 switch (inst.M32.cr3) {
1592 case 0:
1593 vcpu_set_dcr(vcpu, r2);
1594 break;
1595 case 1:
1596 vcpu_set_itm(vcpu, r2);
1597 break;
1598 case 66:
1599 vcpu_set_tpr(vcpu, r2);
1600 break;
1601 case 67:
1602 vcpu_set_eoi(vcpu, r2);
1603 break;
1604 default:
1605 break;
1606 }
1607
1608 return 0;
1609}
1610
1611
1612unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1613{
1614 unsigned long tgt = inst.M33.r1;
1615 unsigned long val;
1616
1617 switch (inst.M33.cr3) {
1618 case 65:
1619 val = vcpu_get_ivr(vcpu);
1620 vcpu_set_gr(vcpu, tgt, val, 0);
1621 break;
1622
1623 case 67:
1624 vcpu_set_gr(vcpu, tgt, 0L, 0);
1625 break;
1626 default:
1627 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1628 vcpu_set_gr(vcpu, tgt, val, 0);
1629 break;
1630 }
1631
1632 return 0;
1633}
1634
1635
1636
1637void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1638{
1639
1640 unsigned long mask;
1641 struct kvm_pt_regs *regs;
1642 struct ia64_psr old_psr, new_psr;
1643
1644 old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1645
1646 regs = vcpu_regs(vcpu);
1647 /* We only support guest as:
1648 * vpsr.pk = 0
1649 * vpsr.is = 0
1650 * Otherwise panic
1651 */
1652 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1653 panic_vm(vcpu);
1654
1655 /*
1656 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1657 * Since these bits will become 0, after success execution of each
1658 * instruction, we will change set them to mIA64_PSR
1659 */
1660 VCPU(vcpu, vpsr) = val
1661 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1662 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1663
1664 if (!old_psr.i && (val & IA64_PSR_I)) {
1665 /* vpsr.i 0->1 */
1666 vcpu->arch.irq_check = 1;
1667 }
1668 new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1669
1670 /*
1671 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1672 * , except for the following bits:
1673 * ic/i/dt/si/rt/mc/it/bn/vm
1674 */
1675 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1676 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1677 IA64_PSR_VM;
1678
1679 regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1680
1681 check_mm_mode_switch(vcpu, old_psr, new_psr);
1682
1683 return ;
1684}
1685
1686unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1687{
1688 struct ia64_psr vpsr;
1689
1690 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1691 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1692
1693 if (!vpsr.ic)
1694 VCPU(vcpu, ifs) = regs->cr_ifs;
1695 regs->cr_ifs = IA64_IFS_V;
1696 return (IA64_NO_FAULT);
1697}
1698
1699
1700
1701/**************************************************************************
1702 VCPU banked general register access routines
1703 **************************************************************************/
1704#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1705 do { \
1706 __asm__ __volatile__ ( \
1707 ";;extr.u %0 = %3,%6,16;;\n" \
1708 "dep %1 = %0, %1, 0, 16;;\n" \
1709 "st8 [%4] = %1\n" \
1710 "extr.u %0 = %2, 16, 16;;\n" \
1711 "dep %3 = %0, %3, %6, 16;;\n" \
1712 "st8 [%5] = %3\n" \
1713 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1714 "r"(*runat), "r"(b1unat), "r"(runat), \
1715 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1716 } while (0)
1717
1718void vcpu_bsw0(struct kvm_vcpu *vcpu)
1719{
1720 unsigned long i;
1721
1722 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1723 unsigned long *r = &regs->r16;
1724 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1725 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1726 unsigned long *runat = &regs->eml_unat;
1727 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1728 unsigned long *b1unat = &VCPU(vcpu, vnat);
1729
1730
1731 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1732 for (i = 0; i < 16; i++) {
1733 *b1++ = *r;
1734 *r++ = *b0++;
1735 }
1736 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1737 VMM_PT_REGS_R16_SLOT);
1738 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1739 }
1740}
1741
1742#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1743 do { \
1744 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1745 "dep %1 = %0, %1, 16, 16;;\n" \
1746 "st8 [%4] = %1\n" \
1747 "extr.u %0 = %2, 0, 16;;\n" \
1748 "dep %3 = %0, %3, %6, 16;;\n" \
1749 "st8 [%5] = %3\n" \
1750 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1751 "r"(*runat), "r"(b0unat), "r"(runat), \
1752 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1753 } while (0)
1754
1755void vcpu_bsw1(struct kvm_vcpu *vcpu)
1756{
1757 unsigned long i;
1758 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1759 unsigned long *r = &regs->r16;
1760 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1761 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1762 unsigned long *runat = &regs->eml_unat;
1763 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1764 unsigned long *b1unat = &VCPU(vcpu, vnat);
1765
1766 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1767 for (i = 0; i < 16; i++) {
1768 *b0++ = *r;
1769 *r++ = *b1++;
1770 }
1771 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1772 VMM_PT_REGS_R16_SLOT);
1773 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1774 }
1775}
1776
1777
1778
1779
1780void vcpu_rfi(struct kvm_vcpu *vcpu)
1781{
1782 unsigned long ifs, psr;
1783 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1784
1785 psr = VCPU(vcpu, ipsr);
1786 if (psr & IA64_PSR_BN)
1787 vcpu_bsw1(vcpu);
1788 else
1789 vcpu_bsw0(vcpu);
1790 vcpu_set_psr(vcpu, psr);
1791 ifs = VCPU(vcpu, ifs);
1792 if (ifs >> 63)
1793 regs->cr_ifs = ifs;
1794 regs->cr_iip = VCPU(vcpu, iip);
1795}
1796
1797
1798/*
1799 VPSR can't keep track of below bits of guest PSR
1800 This function gets guest PSR
1801 */
1802
1803unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1804{
1805 unsigned long mask;
1806 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1807
1808 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1809 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1810 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1811}
1812
1813void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1814{
1815 unsigned long vpsr;
1816 unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1817 | inst.M44.imm;
1818
1819 vpsr = vcpu_get_psr(vcpu);
1820 vpsr &= (~imm24);
1821 vcpu_set_psr(vcpu, vpsr);
1822}
1823
1824void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1825{
1826 unsigned long vpsr;
1827 unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1828 | inst.M44.imm;
1829
1830 vpsr = vcpu_get_psr(vcpu);
1831 vpsr |= imm24;
1832 vcpu_set_psr(vcpu, vpsr);
1833}
1834
1835/* Generate Mask
1836 * Parameter:
1837 * bit -- starting bit
1838 * len -- how many bits
1839 */
1840#define MASK(bit,len) \
1841({ \
1842 __u64 ret; \
1843 \
1844 __asm __volatile("dep %0=-1, r0, %1, %2"\
1845 : "=r" (ret): \
1846 "M" (bit), \
1847 "M" (len)); \
1848 ret; \
1849})
1850
1851void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1852{
1853 val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1854 vcpu_set_psr(vcpu, val);
1855}
1856
1857void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1858{
1859 unsigned long val;
1860
1861 val = vcpu_get_gr(vcpu, inst.M35.r2);
1862 vcpu_set_psr_l(vcpu, val);
1863}
1864
1865void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1866{
1867 unsigned long val;
1868
1869 val = vcpu_get_psr(vcpu);
1870 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1871 vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1872}
1873
1874void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1875{
1876 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1877 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1878 if (ipsr->ri == 2) {
1879 ipsr->ri = 0;
1880 regs->cr_iip += 16;
1881 } else
1882 ipsr->ri++;
1883}
1884
1885void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1886{
1887 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1888 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1889
1890 if (ipsr->ri == 0) {
1891 ipsr->ri = 2;
1892 regs->cr_iip -= 16;
1893 } else
1894 ipsr->ri--;
1895}
1896
1897/** Emulate a privileged operation.
1898 *
1899 *
1900 * @param vcpu virtual cpu
1901 * @cause the reason cause virtualization fault
1902 * @opcode the instruction code which cause virtualization fault
1903 */
1904
1905void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1906{
1907 unsigned long status, cause, opcode ;
1908 INST64 inst;
1909
1910 status = IA64_NO_FAULT;
1911 cause = VMX(vcpu, cause);
1912 opcode = VMX(vcpu, opcode);
1913 inst.inst = opcode;
1914 /*
1915 * Switch to actual virtual rid in rr0 and rr4,
1916 * which is required by some tlb related instructions.
1917 */
1918 prepare_if_physical_mode(vcpu);
1919
1920 switch (cause) {
1921 case EVENT_RSM:
1922 kvm_rsm(vcpu, inst);
1923 break;
1924 case EVENT_SSM:
1925 kvm_ssm(vcpu, inst);
1926 break;
1927 case EVENT_MOV_TO_PSR:
1928 kvm_mov_to_psr(vcpu, inst);
1929 break;
1930 case EVENT_MOV_FROM_PSR:
1931 kvm_mov_from_psr(vcpu, inst);
1932 break;
1933 case EVENT_MOV_FROM_CR:
1934 kvm_mov_from_cr(vcpu, inst);
1935 break;
1936 case EVENT_MOV_TO_CR:
1937 kvm_mov_to_cr(vcpu, inst);
1938 break;
1939 case EVENT_BSW_0:
1940 vcpu_bsw0(vcpu);
1941 break;
1942 case EVENT_BSW_1:
1943 vcpu_bsw1(vcpu);
1944 break;
1945 case EVENT_COVER:
1946 vcpu_cover(vcpu);
1947 break;
1948 case EVENT_RFI:
1949 vcpu_rfi(vcpu);
1950 break;
1951 case EVENT_ITR_D:
1952 kvm_itr_d(vcpu, inst);
1953 break;
1954 case EVENT_ITR_I:
1955 kvm_itr_i(vcpu, inst);
1956 break;
1957 case EVENT_PTR_D:
1958 kvm_ptr_d(vcpu, inst);
1959 break;
1960 case EVENT_PTR_I:
1961 kvm_ptr_i(vcpu, inst);
1962 break;
1963 case EVENT_ITC_D:
1964 kvm_itc_d(vcpu, inst);
1965 break;
1966 case EVENT_ITC_I:
1967 kvm_itc_i(vcpu, inst);
1968 break;
1969 case EVENT_PTC_L:
1970 kvm_ptc_l(vcpu, inst);
1971 break;
1972 case EVENT_PTC_G:
1973 kvm_ptc_g(vcpu, inst);
1974 break;
1975 case EVENT_PTC_GA:
1976 kvm_ptc_ga(vcpu, inst);
1977 break;
1978 case EVENT_PTC_E:
1979 kvm_ptc_e(vcpu, inst);
1980 break;
1981 case EVENT_MOV_TO_RR:
1982 kvm_mov_to_rr(vcpu, inst);
1983 break;
1984 case EVENT_MOV_FROM_RR:
1985 kvm_mov_from_rr(vcpu, inst);
1986 break;
1987 case EVENT_THASH:
1988 kvm_thash(vcpu, inst);
1989 break;
1990 case EVENT_TTAG:
1991 kvm_ttag(vcpu, inst);
1992 break;
1993 case EVENT_TPA:
1994 status = kvm_tpa(vcpu, inst);
1995 break;
1996 case EVENT_TAK:
1997 kvm_tak(vcpu, inst);
1998 break;
1999 case EVENT_MOV_TO_AR_IMM:
2000 kvm_mov_to_ar_imm(vcpu, inst);
2001 break;
2002 case EVENT_MOV_TO_AR:
2003 kvm_mov_to_ar_reg(vcpu, inst);
2004 break;
2005 case EVENT_MOV_FROM_AR:
2006 kvm_mov_from_ar_reg(vcpu, inst);
2007 break;
2008 case EVENT_MOV_TO_DBR:
2009 kvm_mov_to_dbr(vcpu, inst);
2010 break;
2011 case EVENT_MOV_TO_IBR:
2012 kvm_mov_to_ibr(vcpu, inst);
2013 break;
2014 case EVENT_MOV_TO_PMC:
2015 kvm_mov_to_pmc(vcpu, inst);
2016 break;
2017 case EVENT_MOV_TO_PMD:
2018 kvm_mov_to_pmd(vcpu, inst);
2019 break;
2020 case EVENT_MOV_TO_PKR:
2021 kvm_mov_to_pkr(vcpu, inst);
2022 break;
2023 case EVENT_MOV_FROM_DBR:
2024 kvm_mov_from_dbr(vcpu, inst);
2025 break;
2026 case EVENT_MOV_FROM_IBR:
2027 kvm_mov_from_ibr(vcpu, inst);
2028 break;
2029 case EVENT_MOV_FROM_PMC:
2030 kvm_mov_from_pmc(vcpu, inst);
2031 break;
2032 case EVENT_MOV_FROM_PKR:
2033 kvm_mov_from_pkr(vcpu, inst);
2034 break;
2035 case EVENT_MOV_FROM_CPUID:
2036 kvm_mov_from_cpuid(vcpu, inst);
2037 break;
2038 case EVENT_VMSW:
2039 status = IA64_FAULT;
2040 break;
2041 default:
2042 break;
2043 };
2044 /*Assume all status is NO_FAULT ?*/
2045 if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2046 vcpu_increment_iip(vcpu);
2047
2048 recover_if_physical_mode(vcpu);
2049}
2050
2051void init_vcpu(struct kvm_vcpu *vcpu)
2052{
2053 int i;
2054
2055 vcpu->arch.mode_flags = GUEST_IN_PHY;
2056 VMX(vcpu, vrr[0]) = 0x38;
2057 VMX(vcpu, vrr[1]) = 0x38;
2058 VMX(vcpu, vrr[2]) = 0x38;
2059 VMX(vcpu, vrr[3]) = 0x38;
2060 VMX(vcpu, vrr[4]) = 0x38;
2061 VMX(vcpu, vrr[5]) = 0x38;
2062 VMX(vcpu, vrr[6]) = 0x38;
2063 VMX(vcpu, vrr[7]) = 0x38;
2064 VCPU(vcpu, vpsr) = IA64_PSR_BN;
2065 VCPU(vcpu, dcr) = 0;
2066 /* pta.size must not be 0. The minimum is 15 (32k) */
2067 VCPU(vcpu, pta) = 15 << 2;
2068 VCPU(vcpu, itv) = 0x10000;
2069 VCPU(vcpu, itm) = 0;
2070 VMX(vcpu, last_itc) = 0;
2071
2072 VCPU(vcpu, lid) = VCPU_LID(vcpu);
2073 VCPU(vcpu, ivr) = 0;
2074 VCPU(vcpu, tpr) = 0x10000;
2075 VCPU(vcpu, eoi) = 0;
2076 VCPU(vcpu, irr[0]) = 0;
2077 VCPU(vcpu, irr[1]) = 0;
2078 VCPU(vcpu, irr[2]) = 0;
2079 VCPU(vcpu, irr[3]) = 0;
2080 VCPU(vcpu, pmv) = 0x10000;
2081 VCPU(vcpu, cmcv) = 0x10000;
2082 VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
2083 VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
2084 update_vhpi(vcpu, NULL_VECTOR);
2085 VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
2086
2087 for (i = 0; i < 4; i++)
2088 VLSAPIC_INSVC(vcpu, i) = 0;
2089}
2090
2091void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2092{
2093 unsigned long psr;
2094
2095 local_irq_save(psr);
2096
2097 /* WARNING: not allow co-exist of both virtual mode and physical
2098 * mode in same region
2099 */
2100
2101 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2102 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2103
2104 if (is_physical_mode(vcpu)) {
2105 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2106 panic_vm(vcpu);
2107
2108 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2109 ia64_dv_serialize_data();
2110 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2111 ia64_dv_serialize_data();
2112 } else {
2113 ia64_set_rr((VRN0 << VRN_SHIFT),
2114 vcpu->arch.metaphysical_saved_rr0);
2115 ia64_dv_serialize_data();
2116 ia64_set_rr((VRN4 << VRN_SHIFT),
2117 vcpu->arch.metaphysical_saved_rr4);
2118 ia64_dv_serialize_data();
2119 }
2120 ia64_set_rr((VRN1 << VRN_SHIFT),
2121 vrrtomrr(VMX(vcpu, vrr[VRN1])));
2122 ia64_dv_serialize_data();
2123 ia64_set_rr((VRN2 << VRN_SHIFT),
2124 vrrtomrr(VMX(vcpu, vrr[VRN2])));
2125 ia64_dv_serialize_data();
2126 ia64_set_rr((VRN3 << VRN_SHIFT),
2127 vrrtomrr(VMX(vcpu, vrr[VRN3])));
2128 ia64_dv_serialize_data();
2129 ia64_set_rr((VRN5 << VRN_SHIFT),
2130 vrrtomrr(VMX(vcpu, vrr[VRN5])));
2131 ia64_dv_serialize_data();
2132 ia64_set_rr((VRN7 << VRN_SHIFT),
2133 vrrtomrr(VMX(vcpu, vrr[VRN7])));
2134 ia64_dv_serialize_data();
2135 ia64_srlz_d();
2136 ia64_set_psr(psr);
2137}
2138
2139int vmm_entry(void)
2140{
2141 struct kvm_vcpu *v;
2142 v = current_vcpu;
2143
2144 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2145 0, 0, 0, 0, 0, 0);
2146 kvm_init_vtlb(v);
2147 kvm_init_vhpt(v);
2148 init_vcpu(v);
2149 kvm_init_all_rr(v);
2150 vmm_reset_entry();
2151
2152 return 0;
2153}
2154
2155void panic_vm(struct kvm_vcpu *v)
2156{
2157 struct exit_ctl_data *p = &v->arch.exit_data;
2158
2159 p->exit_reason = EXIT_REASON_VM_PANIC;
2160 vmm_transition(v);
2161 /*Never to return*/
2162 while (1);
2163}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
new file mode 100644
index 000000000000..b0fcfb62c49e
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.h
@@ -0,0 +1,740 @@
1/*
2 * vcpu.h: vcpu routines
3 * Copyright (c) 2005, Intel Corporation.
4 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
5 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
6 *
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26
27#ifndef __KVM_VCPU_H__
28#define __KVM_VCPU_H__
29
30#include <asm/types.h>
31#include <asm/fpu.h>
32#include <asm/processor.h>
33
34#ifndef __ASSEMBLY__
35#include "vti.h"
36
37#include <linux/kvm_host.h>
38#include <linux/spinlock.h>
39
40typedef unsigned long IA64_INST;
41
42typedef union U_IA64_BUNDLE {
43 unsigned long i64[2];
44 struct { unsigned long template:5, slot0:41, slot1a:18,
45 slot1b:23, slot2:41; };
46 /* NOTE: following doesn't work because bitfields can't cross natural
47 size boundaries
48 struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
49} IA64_BUNDLE;
50
51typedef union U_INST64_A5 {
52 IA64_INST inst;
53 struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
54 imm9d:9, s:1, major:4; };
55} INST64_A5;
56
57typedef union U_INST64_B4 {
58 IA64_INST inst;
59 struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
60 wh:2, d:1, un1:1, major:4; };
61} INST64_B4;
62
63typedef union U_INST64_B8 {
64 IA64_INST inst;
65 struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
66} INST64_B8;
67
68typedef union U_INST64_B9 {
69 IA64_INST inst;
70 struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
71} INST64_B9;
72
73typedef union U_INST64_I19 {
74 IA64_INST inst;
75 struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
76} INST64_I19;
77
78typedef union U_INST64_I26 {
79 IA64_INST inst;
80 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
81} INST64_I26;
82
83typedef union U_INST64_I27 {
84 IA64_INST inst;
85 struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
86} INST64_I27;
87
88typedef union U_INST64_I28 { /* not privileged (mov from AR) */
89 IA64_INST inst;
90 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
91} INST64_I28;
92
93typedef union U_INST64_M28 {
94 IA64_INST inst;
95 struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
96} INST64_M28;
97
98typedef union U_INST64_M29 {
99 IA64_INST inst;
100 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
101} INST64_M29;
102
103typedef union U_INST64_M30 {
104 IA64_INST inst;
105 struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
106 x3:3, s:1, major:4; };
107} INST64_M30;
108
109typedef union U_INST64_M31 {
110 IA64_INST inst;
111 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
112} INST64_M31;
113
114typedef union U_INST64_M32 {
115 IA64_INST inst;
116 struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
117} INST64_M32;
118
119typedef union U_INST64_M33 {
120 IA64_INST inst;
121 struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
122} INST64_M33;
123
124typedef union U_INST64_M35 {
125 IA64_INST inst;
126 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
127
128} INST64_M35;
129
130typedef union U_INST64_M36 {
131 IA64_INST inst;
132 struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
133} INST64_M36;
134
135typedef union U_INST64_M37 {
136 IA64_INST inst;
137 struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
138 i:1, major:4; };
139} INST64_M37;
140
141typedef union U_INST64_M41 {
142 IA64_INST inst;
143 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
144} INST64_M41;
145
146typedef union U_INST64_M42 {
147 IA64_INST inst;
148 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
149} INST64_M42;
150
151typedef union U_INST64_M43 {
152 IA64_INST inst;
153 struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
154} INST64_M43;
155
156typedef union U_INST64_M44 {
157 IA64_INST inst;
158 struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
159} INST64_M44;
160
161typedef union U_INST64_M45 {
162 IA64_INST inst;
163 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
164} INST64_M45;
165
166typedef union U_INST64_M46 {
167 IA64_INST inst;
168 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
169 x3:3, un1:1, major:4; };
170} INST64_M46;
171
172typedef union U_INST64_M47 {
173 IA64_INST inst;
174 struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
175} INST64_M47;
176
177typedef union U_INST64_M1{
178 IA64_INST inst;
179 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
180 x6:6, m:1, major:4; };
181} INST64_M1;
182
183typedef union U_INST64_M2{
184 IA64_INST inst;
185 struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
186 x6:6, m:1, major:4; };
187} INST64_M2;
188
189typedef union U_INST64_M3{
190 IA64_INST inst;
191 struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
192 x6:6, s:1, major:4; };
193} INST64_M3;
194
195typedef union U_INST64_M4 {
196 IA64_INST inst;
197 struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
198 x6:6, m:1, major:4; };
199} INST64_M4;
200
201typedef union U_INST64_M5 {
202 IA64_INST inst;
203 struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
204 x6:6, s:1, major:4; };
205} INST64_M5;
206
207typedef union U_INST64_M6 {
208 IA64_INST inst;
209 struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
210 x6:6, m:1, major:4; };
211} INST64_M6;
212
213typedef union U_INST64_M9 {
214 IA64_INST inst;
215 struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
216 x6:6, m:1, major:4; };
217} INST64_M9;
218
219typedef union U_INST64_M10 {
220 IA64_INST inst;
221 struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
222 x6:6, s:1, major:4; };
223} INST64_M10;
224
225typedef union U_INST64_M12 {
226 IA64_INST inst;
227 struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
228 x6:6, m:1, major:4; };
229} INST64_M12;
230
231typedef union U_INST64_M15 {
232 IA64_INST inst;
233 struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
234 x6:6, s:1, major:4; };
235} INST64_M15;
236
237typedef union U_INST64 {
238 IA64_INST inst;
239 struct { unsigned long :37, major:4; } generic;
240 INST64_A5 A5; /* used in build_hypercall_bundle only */
241 INST64_B4 B4; /* used in build_hypercall_bundle only */
242 INST64_B8 B8; /* rfi, bsw.[01] */
243 INST64_B9 B9; /* break.b */
244 INST64_I19 I19; /* used in build_hypercall_bundle only */
245 INST64_I26 I26; /* mov register to ar (I unit) */
246 INST64_I27 I27; /* mov immediate to ar (I unit) */
247 INST64_I28 I28; /* mov from ar (I unit) */
248 INST64_M1 M1; /* ld integer */
249 INST64_M2 M2;
250 INST64_M3 M3;
251 INST64_M4 M4; /* st integer */
252 INST64_M5 M5;
253 INST64_M6 M6; /* ldfd floating pointer */
254 INST64_M9 M9; /* stfd floating pointer */
255 INST64_M10 M10; /* stfd floating pointer */
256 INST64_M12 M12; /* ldfd pair floating pointer */
257 INST64_M15 M15; /* lfetch + imm update */
258 INST64_M28 M28; /* purge translation cache entry */
259 INST64_M29 M29; /* mov register to ar (M unit) */
260 INST64_M30 M30; /* mov immediate to ar (M unit) */
261 INST64_M31 M31; /* mov from ar (M unit) */
262 INST64_M32 M32; /* mov reg to cr */
263 INST64_M33 M33; /* mov from cr */
264 INST64_M35 M35; /* mov to psr */
265 INST64_M36 M36; /* mov from psr */
266 INST64_M37 M37; /* break.m */
267 INST64_M41 M41; /* translation cache insert */
268 INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
269 INST64_M43 M43; /* mov from indirect reg */
270 INST64_M44 M44; /* set/reset system mask */
271 INST64_M45 M45; /* translation purge */
272 INST64_M46 M46; /* translation access (tpa,tak) */
273 INST64_M47 M47; /* purge translation entry */
274} INST64;
275
276#define MASK_41 ((unsigned long)0x1ffffffffff)
277
278/* Virtual address memory attributes encoding */
279#define VA_MATTR_WB 0x0
280#define VA_MATTR_UC 0x4
281#define VA_MATTR_UCE 0x5
282#define VA_MATTR_WC 0x6
283#define VA_MATTR_NATPAGE 0x7
284
285#define PMASK(size) (~((size) - 1))
286#define PSIZE(size) (1UL<<(size))
287#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
288#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
289#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
290#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
291
292#define ARCH_PAGE_SHIFT 12
293
294#define INVALID_TI_TAG (1UL << 63)
295
296#define VTLB_PTE_P_BIT 0
297#define VTLB_PTE_IO_BIT 60
298#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
299#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
300
301#define vcpu_quick_region_check(_tr_regions,_ifa) \
302 (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
303
304#define vcpu_quick_region_set(_tr_regions,_ifa) \
305 do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
306
307static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
308 u64 va, u64 rid)
309{
310 trp->page_flags = pte;
311 trp->itir = itir;
312 trp->vadr = va;
313 trp->rid = rid;
314}
315
316extern u64 kvm_lookup_mpa(u64 gpfn);
317extern u64 kvm_gpa_to_mpa(u64 gpa);
318
319/* Return I/O type if trye */
320#define __gpfn_is_io(gpfn) \
321 ({ \
322 u64 pte, ret = 0; \
323 pte = kvm_lookup_mpa(gpfn); \
324 if (!(pte & GPFN_INV_MASK)) \
325 ret = pte & GPFN_IO_MASK; \
326 ret; \
327 })
328
329#endif
330
331#define IA64_NO_FAULT 0
332#define IA64_FAULT 1
333
334#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
335
336#define SW_BAD 0 /* Bad mode transitition */
337#define SW_V2P 1 /* Physical emulatino is activated */
338#define SW_P2V 2 /* Exit physical mode emulation */
339#define SW_SELF 3 /* No mode transition */
340#define SW_NOP 4 /* Mode transition, but without action required */
341
342#define GUEST_IN_PHY 0x1
343#define GUEST_PHY_EMUL 0x2
344
345#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
346
347#define VRN_SHIFT 61
348#define VRN_MASK 0xe000000000000000
349#define VRN0 0x0UL
350#define VRN1 0x1UL
351#define VRN2 0x2UL
352#define VRN3 0x3UL
353#define VRN4 0x4UL
354#define VRN5 0x5UL
355#define VRN6 0x6UL
356#define VRN7 0x7UL
357
358#define IRQ_NO_MASKED 0
359#define IRQ_MASKED_BY_VTPR 1
360#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
361
362#define PTA_BASE_SHIFT 15
363
364#define IA64_PSR_VM_BIT 46
365#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
366
367/* Interruption Function State */
368#define IA64_IFS_V_BIT 63
369#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
370
371#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
372#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
373
374#ifndef __ASSEMBLY__
375
376#include <asm/gcc_intrin.h>
377
378#define is_physical_mode(v) \
379 ((v->arch.mode_flags) & GUEST_IN_PHY)
380
381#define is_virtual_mode(v) \
382 (!is_physical_mode(v))
383
384#define MODE_IND(psr) \
385 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
386
387#define _vmm_raw_spin_lock(x) \
388 do { \
389 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
390 __u64 ia64_spinlock_val; \
391 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
392 if (unlikely(ia64_spinlock_val)) { \
393 do { \
394 while (*ia64_spinlock_ptr) \
395 ia64_barrier(); \
396 ia64_spinlock_val = \
397 ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
398 } while (ia64_spinlock_val); \
399 } \
400 } while (0)
401
402#define _vmm_raw_spin_unlock(x) \
403 do { barrier(); \
404 ((spinlock_t *)x)->raw_lock.lock = 0; } \
405while (0)
406
407void vmm_spin_lock(spinlock_t *lock);
408void vmm_spin_unlock(spinlock_t *lock);
409enum {
410 I_TLB = 1,
411 D_TLB = 2
412};
413
414union kvm_va {
415 struct {
416 unsigned long off : 60; /* intra-region offset */
417 unsigned long reg : 4; /* region number */
418 } f;
419 unsigned long l;
420 void *p;
421};
422
423#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
424 _v.f.reg = 0; _v.l; })
425#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
426 _v.f.reg = -1; _v.p; })
427
428#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
429 _v.rid; })
430#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
431 _v.ps; })
432#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
433 _v.ve; })
434
435enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
436enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
437
438#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
439#define VMX(_v, _x) ((_v)->arch._x)
440
441#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
442#define VLSAPIC_XTP(_v) VMX(_v, xtp)
443
444static inline unsigned long itir_ps(unsigned long itir)
445{
446 return ((itir >> 2) & 0x3f);
447}
448
449
450/**************************************************************************
451 VCPU control register access routines
452 **************************************************************************/
453
454static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
455{
456 return ((u64)VCPU(vcpu, itir));
457}
458
459static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
460{
461 VCPU(vcpu, itir) = val;
462}
463
464static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
465{
466 return ((u64)VCPU(vcpu, ifa));
467}
468
469static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
470{
471 VCPU(vcpu, ifa) = val;
472}
473
474static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
475{
476 return ((u64)VCPU(vcpu, iva));
477}
478
479static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
480{
481 return ((u64)VCPU(vcpu, pta));
482}
483
484static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
485{
486 return ((u64)VCPU(vcpu, lid));
487}
488
489static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
490{
491 return ((u64)VCPU(vcpu, tpr));
492}
493
494static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
495{
496 return (0UL); /*reads of eoi always return 0 */
497}
498
499static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
500{
501 return ((u64)VCPU(vcpu, irr[0]));
502}
503
504static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
505{
506 return ((u64)VCPU(vcpu, irr[1]));
507}
508
509static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
510{
511 return ((u64)VCPU(vcpu, irr[2]));
512}
513
514static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
515{
516 return ((u64)VCPU(vcpu, irr[3]));
517}
518
519static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
520{
521 ia64_setreg(_IA64_REG_CR_DCR, val);
522}
523
524static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
525{
526 VCPU(vcpu, isr) = val;
527}
528
529static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
530{
531 VCPU(vcpu, lid) = val;
532}
533
534static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
535{
536 VCPU(vcpu, ipsr) = val;
537}
538
539static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
540{
541 VCPU(vcpu, iip) = val;
542}
543
544static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
545{
546 VCPU(vcpu, ifs) = val;
547}
548
549static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
550{
551 VCPU(vcpu, iipa) = val;
552}
553
554static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
555{
556 VCPU(vcpu, iha) = val;
557}
558
559
560static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
561{
562 return vcpu->arch.vrr[reg>>61];
563}
564
565/**************************************************************************
566 VCPU debug breakpoint register access routines
567 **************************************************************************/
568
569static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
570{
571 __ia64_set_dbr(reg, val);
572}
573
574static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
575{
576 ia64_set_ibr(reg, val);
577}
578
579static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
580{
581 return ((u64)__ia64_get_dbr(reg));
582}
583
584static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
585{
586 return ((u64)ia64_get_ibr(reg));
587}
588
589/**************************************************************************
590 VCPU performance monitor register access routines
591 **************************************************************************/
592static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
593{
594 /* NOTE: Writes to unimplemented PMC registers are discarded */
595 ia64_set_pmc(reg, val);
596}
597
598static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
599{
600 /* NOTE: Writes to unimplemented PMD registers are discarded */
601 ia64_set_pmd(reg, val);
602}
603
604static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
605{
606 /* NOTE: Reads from unimplemented PMC registers return zero */
607 return ((u64)ia64_get_pmc(reg));
608}
609
610static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
611{
612 /* NOTE: Reads from unimplemented PMD registers return zero */
613 return ((u64)ia64_get_pmd(reg));
614}
615
616static inline unsigned long vrrtomrr(unsigned long val)
617{
618 union ia64_rr rr;
619 rr.val = val;
620 rr.rid = (rr.rid << 4) | 0xe;
621 if (rr.ps > PAGE_SHIFT)
622 rr.ps = PAGE_SHIFT;
623 rr.ve = 1;
624 return rr.val;
625}
626
627
628static inline int highest_bits(int *dat)
629{
630 u32 bits, bitnum;
631 int i;
632
633 /* loop for all 256 bits */
634 for (i = 7; i >= 0 ; i--) {
635 bits = dat[i];
636 if (bits) {
637 bitnum = fls(bits);
638 return i * 32 + bitnum - 1;
639 }
640 }
641 return NULL_VECTOR;
642}
643
644/*
645 * The pending irq is higher than the inservice one.
646 *
647 */
648static inline int is_higher_irq(int pending, int inservice)
649{
650 return ((pending > inservice)
651 || ((pending != NULL_VECTOR)
652 && (inservice == NULL_VECTOR)));
653}
654
655static inline int is_higher_class(int pending, int mic)
656{
657 return ((pending >> 4) > mic);
658}
659
660/*
661 * Return 0-255 for pending irq.
662 * NULL_VECTOR: when no pending.
663 */
664static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
665{
666 if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
667 return NMI_VECTOR;
668 if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
669 return ExtINT_VECTOR;
670
671 return highest_bits((int *)&VCPU(vcpu, irr[0]));
672}
673
674static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
675{
676 if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
677 return NMI_VECTOR;
678 if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
679 return ExtINT_VECTOR;
680
681 return highest_bits((int *)&(VMX(vcpu, insvc[0])));
682}
683
684extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
685 struct ia64_fpreg *val);
686extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
687 struct ia64_fpreg *val);
688extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
689extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
690extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
691extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
692extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
693extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
694extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
695 u64 itir, u64 va, int type);
696extern struct thash_data *vhpt_lookup(u64 va);
697extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
698extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
699extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
700extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
701extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
702 u64 itir, u64 ifa, int type);
703extern void thash_purge_all(struct kvm_vcpu *v);
704extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
705 u64 va, int is_data);
706extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
707 u64 ps, int is_data);
708
709extern void vcpu_increment_iip(struct kvm_vcpu *v);
710extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
711extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
712extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
713extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
714extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
715extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
716extern void nested_dtlb(struct kvm_vcpu *vcpu);
717extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
718extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
719
720extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
721extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
722
723extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
724extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
725extern void vmm_transition(struct kvm_vcpu *vcpu);
726extern void vmm_trampoline(union context *from, union context *to);
727extern int vmm_entry(void);
728extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
729
730extern void vmm_reset_entry(void);
731void kvm_init_vtlb(struct kvm_vcpu *v);
732void kvm_init_vhpt(struct kvm_vcpu *v);
733void thash_init(struct thash_cb *hcb, u64 sz);
734
735void panic_vm(struct kvm_vcpu *v);
736
737extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
738 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
739#endif
740#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
new file mode 100644
index 000000000000..2275bf4e681a
--- /dev/null
+++ b/arch/ia64/kvm/vmm.c
@@ -0,0 +1,66 @@
1/*
2 * vmm.c: vmm module interface with kvm module
3 *
4 * Copyright (c) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21
22
23#include<linux/module.h>
24#include<asm/fpswa.h>
25
26#include "vcpu.h"
27
28MODULE_AUTHOR("Intel");
29MODULE_LICENSE("GPL");
30
31extern char kvm_ia64_ivt;
32extern fpswa_interface_t *vmm_fpswa_interface;
33
34struct kvm_vmm_info vmm_info = {
35 .module = THIS_MODULE,
36 .vmm_entry = vmm_entry,
37 .tramp_entry = vmm_trampoline,
38 .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
39};
40
41static int __init kvm_vmm_init(void)
42{
43
44 vmm_fpswa_interface = fpswa_interface;
45
46 /*Register vmm data to kvm side*/
47 return kvm_init(&vmm_info, 1024, THIS_MODULE);
48}
49
50static void __exit kvm_vmm_exit(void)
51{
52 kvm_exit();
53 return ;
54}
55
56void vmm_spin_lock(spinlock_t *lock)
57{
58 _vmm_raw_spin_lock(lock);
59}
60
61void vmm_spin_unlock(spinlock_t *lock)
62{
63 _vmm_raw_spin_unlock(lock);
64}
65module_init(kvm_vmm_init)
66module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
new file mode 100644
index 000000000000..3ee5f481c06d
--- /dev/null
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -0,0 +1,1424 @@
1/*
2 * /ia64/kvm_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
15 * for SMP
16 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
17 * handler now uses virtual PT.
18 *
19 * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Supporting Intel virtualization architecture
21 *
22 */
23
24/*
25 * This file defines the interruption vector table used by the CPU.
26 * It does not include one entry per possible cause of interruption.
27 *
28 * The first 20 entries of the table contain 64 bundles each while the
29 * remaining 48 entries contain only 16 bundles each.
30 *
31 * The 64 bundles are used to allow inlining the whole handler for
32 * critical
33 * interruptions like TLB misses.
34 *
35 * For each entry, the comment is as follows:
36 *
37 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
38 * (12,51)
39 * entry offset ----/ / / /
40 * /
41 * entry number ---------/ / /
42 * /
43 * size of the entry -------------/ /
44 * /
45 * vector name -------------------------------------/
46 * /
47 * interruptions triggering this vector
48 * ----------------------/
49 *
50 * The table is 32KB in size and must be aligned on 32KB
51 * boundary.
52 * (The CPU ignores the 15 lower bits of the address)
53 *
54 * Table is based upon EAS2.6 (Oct 1999)
55 */
56
57
58#include <asm/asmmacro.h>
59#include <asm/cache.h>
60#include <asm/pgtable.h>
61
62#include "asm-offsets.h"
63#include "vcpu.h"
64#include "kvm_minstate.h"
65#include "vti.h"
66
67#if 1
68# define PSR_DEFAULT_BITS psr.ac
69#else
70# define PSR_DEFAULT_BITS 0
71#endif
72
73
74#define KVM_FAULT(n) \
75 kvm_fault_##n:; \
76 mov r19=n;; \
77 br.sptk.many kvm_fault_##n; \
78 ;; \
79
80
81#define KVM_REFLECT(n) \
82 mov r31=pr; \
83 mov r19=n; /* prepare to save predicates */ \
84 mov r29=cr.ipsr; \
85 ;; \
86 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
87(p7)br.sptk.many kvm_dispatch_reflection; \
88 br.sptk.many kvm_panic; \
89
90
91GLOBAL_ENTRY(kvm_panic)
92 br.sptk.many kvm_panic
93 ;;
94END(kvm_panic)
95
96
97
98
99
100 .section .text.ivt,"ax"
101
102 .align 32768 // align on 32KB boundary
103 .global kvm_ia64_ivt
104kvm_ia64_ivt:
105///////////////////////////////////////////////////////////////
106// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
107ENTRY(kvm_vhpt_miss)
108 KVM_FAULT(0)
109END(kvm_vhpt_miss)
110
111
112 .org kvm_ia64_ivt+0x400
113////////////////////////////////////////////////////////////////
114// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
115ENTRY(kvm_itlb_miss)
116 mov r31 = pr
117 mov r29=cr.ipsr;
118 ;;
119 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
120 (p6) br.sptk kvm_alt_itlb_miss
121 mov r19 = 1
122 br.sptk kvm_itlb_miss_dispatch
123 KVM_FAULT(1);
124END(kvm_itlb_miss)
125
126 .org kvm_ia64_ivt+0x0800
127//////////////////////////////////////////////////////////////////
128// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
129ENTRY(kvm_dtlb_miss)
130 mov r31 = pr
131 mov r29=cr.ipsr;
132 ;;
133 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
134(p6)br.sptk kvm_alt_dtlb_miss
135 br.sptk kvm_dtlb_miss_dispatch
136END(kvm_dtlb_miss)
137
138 .org kvm_ia64_ivt+0x0c00
139////////////////////////////////////////////////////////////////////
140// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
141ENTRY(kvm_alt_itlb_miss)
142 mov r16=cr.ifa // get address that caused the TLB miss
143 ;;
144 movl r17=PAGE_KERNEL
145 mov r24=cr.ipsr
146 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
147 ;;
148 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
149 ;;
150 or r19=r17,r19 // insert PTE control bits into r19
151 ;;
152 movl r20=IA64_GRANULE_SHIFT<<2
153 ;;
154 mov cr.itir=r20
155 ;;
156 itc.i r19 // insert the TLB entry
157 mov pr=r31,-1
158 rfi
159END(kvm_alt_itlb_miss)
160
161 .org kvm_ia64_ivt+0x1000
162/////////////////////////////////////////////////////////////////////
163// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
164ENTRY(kvm_alt_dtlb_miss)
165 mov r16=cr.ifa // get address that caused the TLB miss
166 ;;
167 movl r17=PAGE_KERNEL
168 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
169 mov r24=cr.ipsr
170 ;;
171 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
172 ;;
173 or r19=r19,r17 // insert PTE control bits into r19
174 ;;
175 movl r20=IA64_GRANULE_SHIFT<<2
176 ;;
177 mov cr.itir=r20
178 ;;
179 itc.d r19 // insert the TLB entry
180 mov pr=r31,-1
181 rfi
182END(kvm_alt_dtlb_miss)
183
184 .org kvm_ia64_ivt+0x1400
185//////////////////////////////////////////////////////////////////////
186// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
187ENTRY(kvm_nested_dtlb_miss)
188 KVM_FAULT(5)
189END(kvm_nested_dtlb_miss)
190
191 .org kvm_ia64_ivt+0x1800
192/////////////////////////////////////////////////////////////////////
193// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
194ENTRY(kvm_ikey_miss)
195 KVM_REFLECT(6)
196END(kvm_ikey_miss)
197
198 .org kvm_ia64_ivt+0x1c00
199/////////////////////////////////////////////////////////////////////
200// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
201ENTRY(kvm_dkey_miss)
202 KVM_REFLECT(7)
203END(kvm_dkey_miss)
204
205 .org kvm_ia64_ivt+0x2000
206////////////////////////////////////////////////////////////////////
207// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
208ENTRY(kvm_dirty_bit)
209 KVM_REFLECT(8)
210END(kvm_dirty_bit)
211
212 .org kvm_ia64_ivt+0x2400
213////////////////////////////////////////////////////////////////////
214// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
215ENTRY(kvm_iaccess_bit)
216 KVM_REFLECT(9)
217END(kvm_iaccess_bit)
218
219 .org kvm_ia64_ivt+0x2800
220///////////////////////////////////////////////////////////////////
221// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
222ENTRY(kvm_daccess_bit)
223 KVM_REFLECT(10)
224END(kvm_daccess_bit)
225
226 .org kvm_ia64_ivt+0x2c00
227/////////////////////////////////////////////////////////////////
228// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
229ENTRY(kvm_break_fault)
230 mov r31=pr
231 mov r19=11
232 mov r29=cr.ipsr
233 ;;
234 KVM_SAVE_MIN_WITH_COVER_R19
235 ;;
236 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
237 mov out0=cr.ifa
238 mov out2=cr.isr // FIXME: pity to make this slow access twice
239 mov out3=cr.iim // FIXME: pity to make this slow access twice
240 adds r3=8,r2 // set up second base pointer
241 ;;
242 ssm psr.ic
243 ;;
244 srlz.i // guarantee that interruption collection is on
245 ;;
246 //(p15)ssm psr.i // restore psr.i
247 addl r14=@gprel(ia64_leave_hypervisor),gp
248 ;;
249 KVM_SAVE_REST
250 mov rp=r14
251 ;;
252 adds out1=16,sp
253 br.call.sptk.many b6=kvm_ia64_handle_break
254 ;;
255END(kvm_break_fault)
256
257 .org kvm_ia64_ivt+0x3000
258/////////////////////////////////////////////////////////////////
259// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
260ENTRY(kvm_interrupt)
261 mov r31=pr // prepare to save predicates
262 mov r19=12
263 mov r29=cr.ipsr
264 ;;
265 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
266 tbit.z p0,p15=r29,IA64_PSR_I_BIT
267 ;;
268(p7) br.sptk kvm_dispatch_interrupt
269 ;;
270 mov r27=ar.rsc /* M */
271 mov r20=r1 /* A */
272 mov r25=ar.unat /* M */
273 mov r26=ar.pfs /* I */
274 mov r28=cr.iip /* M */
275 cover /* B (or nothing) */
276 ;;
277 mov r1=sp
278 ;;
279 invala /* M */
280 mov r30=cr.ifs
281 ;;
282 addl r1=-VMM_PT_REGS_SIZE,r1
283 ;;
284 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
285 adds r16=PT(CR_IPSR),r1
286 ;;
287 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
288 st8 [r16]=r29 /* save cr.ipsr */
289 ;;
290 lfetch.fault.excl.nt1 [r17]
291 mov r29=b0
292 ;;
293 adds r16=PT(R8),r1 /* initialize first base pointer */
294 adds r17=PT(R9),r1 /* initialize second base pointer */
295 mov r18=r0 /* make sure r18 isn't NaT */
296 ;;
297.mem.offset 0,0; st8.spill [r16]=r8,16
298.mem.offset 8,0; st8.spill [r17]=r9,16
299 ;;
300.mem.offset 0,0; st8.spill [r16]=r10,24
301.mem.offset 8,0; st8.spill [r17]=r11,24
302 ;;
303 st8 [r16]=r28,16 /* save cr.iip */
304 st8 [r17]=r30,16 /* save cr.ifs */
305 mov r8=ar.fpsr /* M */
306 mov r9=ar.csd
307 mov r10=ar.ssd
308 movl r11=FPSR_DEFAULT /* L-unit */
309 ;;
310 st8 [r16]=r25,16 /* save ar.unat */
311 st8 [r17]=r26,16 /* save ar.pfs */
312 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
313 ;;
314 st8 [r16]=r27,16 /* save ar.rsc */
315 adds r17=16,r17 /* skip over ar_rnat field */
316 ;;
317 st8 [r17]=r31,16 /* save predicates */
318 adds r16=16,r16 /* skip over ar_bspstore field */
319 ;;
320 st8 [r16]=r29,16 /* save b0 */
321 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
322 ;;
323.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
324.mem.offset 8,0; st8.spill [r17]=r12,16
325 adds r12=-16,r1
326 /* switch to kernel memory stack (with 16 bytes of scratch) */
327 ;;
328.mem.offset 0,0; st8.spill [r16]=r13,16
329.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
330 ;;
331.mem.offset 0,0; st8.spill [r16]=r15,16
332.mem.offset 8,0; st8.spill [r17]=r14,16
333 dep r14=-1,r0,60,4
334 ;;
335.mem.offset 0,0; st8.spill [r16]=r2,16
336.mem.offset 8,0; st8.spill [r17]=r3,16
337 adds r2=VMM_PT_REGS_R16_OFFSET,r1
338 adds r14 = VMM_VCPU_GP_OFFSET,r13
339 ;;
340 mov r8=ar.ccv
341 ld8 r14 = [r14]
342 ;;
343 mov r1=r14 /* establish kernel global pointer */
344 ;; \
345 bsw.1
346 ;;
347 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
348 mov out0=r13
349 ;;
350 ssm psr.ic
351 ;;
352 srlz.i
353 ;;
354 //(p15) ssm psr.i
355 adds r3=8,r2 // set up second base pointer for SAVE_REST
356 srlz.i // ensure everybody knows psr.ic is back on
357 ;;
358.mem.offset 0,0; st8.spill [r2]=r16,16
359.mem.offset 8,0; st8.spill [r3]=r17,16
360 ;;
361.mem.offset 0,0; st8.spill [r2]=r18,16
362.mem.offset 8,0; st8.spill [r3]=r19,16
363 ;;
364.mem.offset 0,0; st8.spill [r2]=r20,16
365.mem.offset 8,0; st8.spill [r3]=r21,16
366 mov r18=b6
367 ;;
368.mem.offset 0,0; st8.spill [r2]=r22,16
369.mem.offset 8,0; st8.spill [r3]=r23,16
370 mov r19=b7
371 ;;
372.mem.offset 0,0; st8.spill [r2]=r24,16
373.mem.offset 8,0; st8.spill [r3]=r25,16
374 ;;
375.mem.offset 0,0; st8.spill [r2]=r26,16
376.mem.offset 8,0; st8.spill [r3]=r27,16
377 ;;
378.mem.offset 0,0; st8.spill [r2]=r28,16
379.mem.offset 8,0; st8.spill [r3]=r29,16
380 ;;
381.mem.offset 0,0; st8.spill [r2]=r30,16
382.mem.offset 8,0; st8.spill [r3]=r31,32
383 ;;
384 mov ar.fpsr=r11 /* M-unit */
385 st8 [r2]=r8,8 /* ar.ccv */
386 adds r24=PT(B6)-PT(F7),r3
387 ;;
388 stf.spill [r2]=f6,32
389 stf.spill [r3]=f7,32
390 ;;
391 stf.spill [r2]=f8,32
392 stf.spill [r3]=f9,32
393 ;;
394 stf.spill [r2]=f10
395 stf.spill [r3]=f11
396 adds r25=PT(B7)-PT(F11),r3
397 ;;
398 st8 [r24]=r18,16 /* b6 */
399 st8 [r25]=r19,16 /* b7 */
400 ;;
401 st8 [r24]=r9 /* ar.csd */
402 st8 [r25]=r10 /* ar.ssd */
403 ;;
404 srlz.d // make sure we see the effect of cr.ivr
405 addl r14=@gprel(ia64_leave_nested),gp
406 ;;
407 mov rp=r14
408 br.call.sptk.many b6=kvm_ia64_handle_irq
409 ;;
410END(kvm_interrupt)
411
412 .global kvm_dispatch_vexirq
413 .org kvm_ia64_ivt+0x3400
414//////////////////////////////////////////////////////////////////////
415// 0x3400 Entry 13 (size 64 bundles) Reserved
416ENTRY(kvm_virtual_exirq)
417 mov r31=pr
418 mov r19=13
419 mov r30 =r0
420 ;;
421kvm_dispatch_vexirq:
422 cmp.eq p6,p0 = 1,r30
423 ;;
424(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
425 ;;
426(p6)ld8 r1 = [r29]
427 ;;
428 KVM_SAVE_MIN_WITH_COVER_R19
429 alloc r14=ar.pfs,0,0,1,0
430 mov out0=r13
431
432 ssm psr.ic
433 ;;
434 srlz.i // guarantee that interruption collection is on
435 ;;
436 //(p15) ssm psr.i // restore psr.i
437 adds r3=8,r2 // set up second base pointer
438 ;;
439 KVM_SAVE_REST
440 addl r14=@gprel(ia64_leave_hypervisor),gp
441 ;;
442 mov rp=r14
443 br.call.sptk.many b6=kvm_vexirq
444END(kvm_virtual_exirq)
445
446 .org kvm_ia64_ivt+0x3800
447/////////////////////////////////////////////////////////////////////
448// 0x3800 Entry 14 (size 64 bundles) Reserved
449 KVM_FAULT(14)
450 // this code segment is from 2.6.16.13
451
452
453 .org kvm_ia64_ivt+0x3c00
454///////////////////////////////////////////////////////////////////////
455// 0x3c00 Entry 15 (size 64 bundles) Reserved
456 KVM_FAULT(15)
457
458
459 .org kvm_ia64_ivt+0x4000
460///////////////////////////////////////////////////////////////////////
461// 0x4000 Entry 16 (size 64 bundles) Reserved
462 KVM_FAULT(16)
463
464 .org kvm_ia64_ivt+0x4400
465//////////////////////////////////////////////////////////////////////
466// 0x4400 Entry 17 (size 64 bundles) Reserved
467 KVM_FAULT(17)
468
469 .org kvm_ia64_ivt+0x4800
470//////////////////////////////////////////////////////////////////////
471// 0x4800 Entry 18 (size 64 bundles) Reserved
472 KVM_FAULT(18)
473
474 .org kvm_ia64_ivt+0x4c00
475//////////////////////////////////////////////////////////////////////
476// 0x4c00 Entry 19 (size 64 bundles) Reserved
477 KVM_FAULT(19)
478
479 .org kvm_ia64_ivt+0x5000
480//////////////////////////////////////////////////////////////////////
481// 0x5000 Entry 20 (size 16 bundles) Page Not Present
482ENTRY(kvm_page_not_present)
483 KVM_REFLECT(20)
484END(kvm_page_not_present)
485
486 .org kvm_ia64_ivt+0x5100
487///////////////////////////////////////////////////////////////////////
488// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
489ENTRY(kvm_key_permission)
490 KVM_REFLECT(21)
491END(kvm_key_permission)
492
493 .org kvm_ia64_ivt+0x5200
494//////////////////////////////////////////////////////////////////////
495// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
496ENTRY(kvm_iaccess_rights)
497 KVM_REFLECT(22)
498END(kvm_iaccess_rights)
499
500 .org kvm_ia64_ivt+0x5300
501//////////////////////////////////////////////////////////////////////
502// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
503ENTRY(kvm_daccess_rights)
504 KVM_REFLECT(23)
505END(kvm_daccess_rights)
506
507 .org kvm_ia64_ivt+0x5400
508/////////////////////////////////////////////////////////////////////
509// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
510ENTRY(kvm_general_exception)
511 KVM_REFLECT(24)
512 KVM_FAULT(24)
513END(kvm_general_exception)
514
515 .org kvm_ia64_ivt+0x5500
516//////////////////////////////////////////////////////////////////////
517// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
518ENTRY(kvm_disabled_fp_reg)
519 KVM_REFLECT(25)
520END(kvm_disabled_fp_reg)
521
522 .org kvm_ia64_ivt+0x5600
523////////////////////////////////////////////////////////////////////
524// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
525ENTRY(kvm_nat_consumption)
526 KVM_REFLECT(26)
527END(kvm_nat_consumption)
528
529 .org kvm_ia64_ivt+0x5700
530/////////////////////////////////////////////////////////////////////
531// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
532ENTRY(kvm_speculation_vector)
533 KVM_REFLECT(27)
534END(kvm_speculation_vector)
535
536 .org kvm_ia64_ivt+0x5800
537/////////////////////////////////////////////////////////////////////
538// 0x5800 Entry 28 (size 16 bundles) Reserved
539 KVM_FAULT(28)
540
541 .org kvm_ia64_ivt+0x5900
542///////////////////////////////////////////////////////////////////
543// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
544ENTRY(kvm_debug_vector)
545 KVM_FAULT(29)
546END(kvm_debug_vector)
547
548 .org kvm_ia64_ivt+0x5a00
549///////////////////////////////////////////////////////////////
550// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
551ENTRY(kvm_unaligned_access)
552 KVM_REFLECT(30)
553END(kvm_unaligned_access)
554
555 .org kvm_ia64_ivt+0x5b00
556//////////////////////////////////////////////////////////////////////
557// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
558ENTRY(kvm_unsupported_data_reference)
559 KVM_REFLECT(31)
560END(kvm_unsupported_data_reference)
561
562 .org kvm_ia64_ivt+0x5c00
563////////////////////////////////////////////////////////////////////
564// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
565ENTRY(kvm_floating_point_fault)
566 KVM_REFLECT(32)
567END(kvm_floating_point_fault)
568
569 .org kvm_ia64_ivt+0x5d00
570/////////////////////////////////////////////////////////////////////
571// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
572ENTRY(kvm_floating_point_trap)
573 KVM_REFLECT(33)
574END(kvm_floating_point_trap)
575
576 .org kvm_ia64_ivt+0x5e00
577//////////////////////////////////////////////////////////////////////
578// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
579ENTRY(kvm_lower_privilege_trap)
580 KVM_REFLECT(34)
581END(kvm_lower_privilege_trap)
582
583 .org kvm_ia64_ivt+0x5f00
584//////////////////////////////////////////////////////////////////////
585// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
586ENTRY(kvm_taken_branch_trap)
587 KVM_REFLECT(35)
588END(kvm_taken_branch_trap)
589
590 .org kvm_ia64_ivt+0x6000
591////////////////////////////////////////////////////////////////////
592// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
593ENTRY(kvm_single_step_trap)
594 KVM_REFLECT(36)
595END(kvm_single_step_trap)
596 .global kvm_virtualization_fault_back
597 .org kvm_ia64_ivt+0x6100
598/////////////////////////////////////////////////////////////////////
599// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
600ENTRY(kvm_virtualization_fault)
601 mov r31=pr
602 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
603 ;;
604 st8 [r16] = r1
605 adds r17 = VMM_VCPU_GP_OFFSET, r21
606 ;;
607 ld8 r1 = [r17]
608 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
609 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
610 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
611 cmp.eq p9,p0=EVENT_RSM,r24
612 cmp.eq p10,p0=EVENT_SSM,r24
613 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
614 cmp.eq p12,p0=EVENT_THASH,r24
615 (p6) br.dptk.many kvm_asm_mov_from_ar
616 (p7) br.dptk.many kvm_asm_mov_from_rr
617 (p8) br.dptk.many kvm_asm_mov_to_rr
618 (p9) br.dptk.many kvm_asm_rsm
619 (p10) br.dptk.many kvm_asm_ssm
620 (p11) br.dptk.many kvm_asm_mov_to_psr
621 (p12) br.dptk.many kvm_asm_thash
622 ;;
623kvm_virtualization_fault_back:
624 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
625 ;;
626 ld8 r1 = [r16]
627 ;;
628 mov r19=37
629 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
630 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
631 ;;
632 st8 [r16] = r24
633 st8 [r17] = r25
634 ;;
635 cmp.ne p6,p0=EVENT_RFI, r24
636 (p6) br.sptk kvm_dispatch_virtualization_fault
637 ;;
638 adds r18=VMM_VPD_BASE_OFFSET,r21
639 ;;
640 ld8 r18=[r18]
641 ;;
642 adds r18=VMM_VPD_VIFS_OFFSET,r18
643 ;;
644 ld8 r18=[r18]
645 ;;
646 tbit.z p6,p0=r18,63
647 (p6) br.sptk kvm_dispatch_virtualization_fault
648 ;;
649 //if vifs.v=1 desert current register frame
650 alloc r18=ar.pfs,0,0,0,0
651 br.sptk kvm_dispatch_virtualization_fault
652END(kvm_virtualization_fault)
653
654 .org kvm_ia64_ivt+0x6200
655//////////////////////////////////////////////////////////////
656// 0x6200 Entry 38 (size 16 bundles) Reserved
657 KVM_FAULT(38)
658
659 .org kvm_ia64_ivt+0x6300
660/////////////////////////////////////////////////////////////////
661// 0x6300 Entry 39 (size 16 bundles) Reserved
662 KVM_FAULT(39)
663
664 .org kvm_ia64_ivt+0x6400
665/////////////////////////////////////////////////////////////////
666// 0x6400 Entry 40 (size 16 bundles) Reserved
667 KVM_FAULT(40)
668
669 .org kvm_ia64_ivt+0x6500
670//////////////////////////////////////////////////////////////////
671// 0x6500 Entry 41 (size 16 bundles) Reserved
672 KVM_FAULT(41)
673
674 .org kvm_ia64_ivt+0x6600
675//////////////////////////////////////////////////////////////////
676// 0x6600 Entry 42 (size 16 bundles) Reserved
677 KVM_FAULT(42)
678
679 .org kvm_ia64_ivt+0x6700
680//////////////////////////////////////////////////////////////////
681// 0x6700 Entry 43 (size 16 bundles) Reserved
682 KVM_FAULT(43)
683
684 .org kvm_ia64_ivt+0x6800
685//////////////////////////////////////////////////////////////////
686// 0x6800 Entry 44 (size 16 bundles) Reserved
687 KVM_FAULT(44)
688
689 .org kvm_ia64_ivt+0x6900
690///////////////////////////////////////////////////////////////////
691// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
692//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
693ENTRY(kvm_ia32_exception)
694 KVM_FAULT(45)
695END(kvm_ia32_exception)
696
697 .org kvm_ia64_ivt+0x6a00
698////////////////////////////////////////////////////////////////////
699// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
700ENTRY(kvm_ia32_intercept)
701 KVM_FAULT(47)
702END(kvm_ia32_intercept)
703
704 .org kvm_ia64_ivt+0x6c00
705/////////////////////////////////////////////////////////////////////
706// 0x6c00 Entry 48 (size 16 bundles) Reserved
707 KVM_FAULT(48)
708
709 .org kvm_ia64_ivt+0x6d00
710//////////////////////////////////////////////////////////////////////
711// 0x6d00 Entry 49 (size 16 bundles) Reserved
712 KVM_FAULT(49)
713
714 .org kvm_ia64_ivt+0x6e00
715//////////////////////////////////////////////////////////////////////
716// 0x6e00 Entry 50 (size 16 bundles) Reserved
717 KVM_FAULT(50)
718
719 .org kvm_ia64_ivt+0x6f00
720/////////////////////////////////////////////////////////////////////
721// 0x6f00 Entry 51 (size 16 bundles) Reserved
722 KVM_FAULT(52)
723
724 .org kvm_ia64_ivt+0x7100
725////////////////////////////////////////////////////////////////////
726// 0x7100 Entry 53 (size 16 bundles) Reserved
727 KVM_FAULT(53)
728
729 .org kvm_ia64_ivt+0x7200
730/////////////////////////////////////////////////////////////////////
731// 0x7200 Entry 54 (size 16 bundles) Reserved
732 KVM_FAULT(54)
733
734 .org kvm_ia64_ivt+0x7300
735////////////////////////////////////////////////////////////////////
736// 0x7300 Entry 55 (size 16 bundles) Reserved
737 KVM_FAULT(55)
738
739 .org kvm_ia64_ivt+0x7400
740////////////////////////////////////////////////////////////////////
741// 0x7400 Entry 56 (size 16 bundles) Reserved
742 KVM_FAULT(56)
743
744 .org kvm_ia64_ivt+0x7500
745/////////////////////////////////////////////////////////////////////
746// 0x7500 Entry 57 (size 16 bundles) Reserved
747 KVM_FAULT(57)
748
749 .org kvm_ia64_ivt+0x7600
750/////////////////////////////////////////////////////////////////////
751// 0x7600 Entry 58 (size 16 bundles) Reserved
752 KVM_FAULT(58)
753
754 .org kvm_ia64_ivt+0x7700
755////////////////////////////////////////////////////////////////////
756// 0x7700 Entry 59 (size 16 bundles) Reserved
757 KVM_FAULT(59)
758
759 .org kvm_ia64_ivt+0x7800
760////////////////////////////////////////////////////////////////////
761// 0x7800 Entry 60 (size 16 bundles) Reserved
762 KVM_FAULT(60)
763
764 .org kvm_ia64_ivt+0x7900
765/////////////////////////////////////////////////////////////////////
766// 0x7900 Entry 61 (size 16 bundles) Reserved
767 KVM_FAULT(61)
768
769 .org kvm_ia64_ivt+0x7a00
770/////////////////////////////////////////////////////////////////////
771// 0x7a00 Entry 62 (size 16 bundles) Reserved
772 KVM_FAULT(62)
773
774 .org kvm_ia64_ivt+0x7b00
775/////////////////////////////////////////////////////////////////////
776// 0x7b00 Entry 63 (size 16 bundles) Reserved
777 KVM_FAULT(63)
778
779 .org kvm_ia64_ivt+0x7c00
780////////////////////////////////////////////////////////////////////
781// 0x7c00 Entry 64 (size 16 bundles) Reserved
782 KVM_FAULT(64)
783
784 .org kvm_ia64_ivt+0x7d00
785/////////////////////////////////////////////////////////////////////
786// 0x7d00 Entry 65 (size 16 bundles) Reserved
787 KVM_FAULT(65)
788
789 .org kvm_ia64_ivt+0x7e00
790/////////////////////////////////////////////////////////////////////
791// 0x7e00 Entry 66 (size 16 bundles) Reserved
792 KVM_FAULT(66)
793
794 .org kvm_ia64_ivt+0x7f00
795////////////////////////////////////////////////////////////////////
796// 0x7f00 Entry 67 (size 16 bundles) Reserved
797 KVM_FAULT(67)
798
799 .org kvm_ia64_ivt+0x8000
800// There is no particular reason for this code to be here, other than that
801// there happens to be space here that would go unused otherwise. If this
802// fault ever gets "unreserved", simply moved the following code to a more
803// suitable spot...
804
805
806ENTRY(kvm_dtlb_miss_dispatch)
807 mov r19 = 2
808 KVM_SAVE_MIN_WITH_COVER_R19
809 alloc r14=ar.pfs,0,0,3,0
810 mov out0=cr.ifa
811 mov out1=r15
812 adds r3=8,r2 // set up second base pointer
813 ;;
814 ssm psr.ic
815 ;;
816 srlz.i // guarantee that interruption collection is on
817 ;;
818 //(p15) ssm psr.i // restore psr.i
819 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
820 ;;
821 KVM_SAVE_REST
822 KVM_SAVE_EXTRA
823 mov rp=r14
824 ;;
825 adds out2=16,r12
826 br.call.sptk.many b6=kvm_page_fault
827END(kvm_dtlb_miss_dispatch)
828
829ENTRY(kvm_itlb_miss_dispatch)
830
831 KVM_SAVE_MIN_WITH_COVER_R19
832 alloc r14=ar.pfs,0,0,3,0
833 mov out0=cr.ifa
834 mov out1=r15
835 adds r3=8,r2 // set up second base pointer
836 ;;
837 ssm psr.ic
838 ;;
839 srlz.i // guarantee that interruption collection is on
840 ;;
841 //(p15) ssm psr.i // restore psr.i
842 addl r14=@gprel(ia64_leave_hypervisor),gp
843 ;;
844 KVM_SAVE_REST
845 mov rp=r14
846 ;;
847 adds out2=16,r12
848 br.call.sptk.many b6=kvm_page_fault
849END(kvm_itlb_miss_dispatch)
850
851ENTRY(kvm_dispatch_reflection)
852 /*
853 * Input:
854 * psr.ic: off
855 * r19: intr type (offset into ivt, see ia64_int.h)
856 * r31: contains saved predicates (pr)
857 */
858 KVM_SAVE_MIN_WITH_COVER_R19
859 alloc r14=ar.pfs,0,0,5,0
860 mov out0=cr.ifa
861 mov out1=cr.isr
862 mov out2=cr.iim
863 mov out3=r15
864 adds r3=8,r2 // set up second base pointer
865 ;;
866 ssm psr.ic
867 ;;
868 srlz.i // guarantee that interruption collection is on
869 ;;
870 //(p15) ssm psr.i // restore psr.i
871 addl r14=@gprel(ia64_leave_hypervisor),gp
872 ;;
873 KVM_SAVE_REST
874 mov rp=r14
875 ;;
876 adds out4=16,r12
877 br.call.sptk.many b6=reflect_interruption
878END(kvm_dispatch_reflection)
879
880ENTRY(kvm_dispatch_virtualization_fault)
881 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
882 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
883 ;;
884 st8 [r16] = r24
885 st8 [r17] = r25
886 ;;
887 KVM_SAVE_MIN_WITH_COVER_R19
888 ;;
889 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
890 mov out0=r13 //vcpu
891 adds r3=8,r2 // set up second base pointer
892 ;;
893 ssm psr.ic
894 ;;
895 srlz.i // guarantee that interruption collection is on
896 ;;
897 //(p15) ssm psr.i // restore psr.i
898 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
899 ;;
900 KVM_SAVE_REST
901 KVM_SAVE_EXTRA
902 mov rp=r14
903 ;;
904 adds out1=16,sp //regs
905 br.call.sptk.many b6=kvm_emulate
906END(kvm_dispatch_virtualization_fault)
907
908
909ENTRY(kvm_dispatch_interrupt)
910 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
911 ;;
912 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
913 //mov out0=cr.ivr // pass cr.ivr as first arg
914 adds r3=8,r2 // set up second base pointer for SAVE_REST
915 ;;
916 ssm psr.ic
917 ;;
918 srlz.i
919 ;;
920 //(p15) ssm psr.i
921 addl r14=@gprel(ia64_leave_hypervisor),gp
922 ;;
923 KVM_SAVE_REST
924 mov rp=r14
925 ;;
926 mov out0=r13 // pass pointer to pt_regs as second arg
927 br.call.sptk.many b6=kvm_ia64_handle_irq
928END(kvm_dispatch_interrupt)
929
930
931
932
933GLOBAL_ENTRY(ia64_leave_nested)
934 rsm psr.i
935 ;;
936 adds r21=PT(PR)+16,r12
937 ;;
938 lfetch [r21],PT(CR_IPSR)-PT(PR)
939 adds r2=PT(B6)+16,r12
940 adds r3=PT(R16)+16,r12
941 ;;
942 lfetch [r21]
943 ld8 r28=[r2],8 // load b6
944 adds r29=PT(R24)+16,r12
945
946 ld8.fill r16=[r3]
947 adds r3=PT(AR_CSD)-PT(R16),r3
948 adds r30=PT(AR_CCV)+16,r12
949 ;;
950 ld8.fill r24=[r29]
951 ld8 r15=[r30] // load ar.ccv
952 ;;
953 ld8 r29=[r2],16 // load b7
954 ld8 r30=[r3],16 // load ar.csd
955 ;;
956 ld8 r31=[r2],16 // load ar.ssd
957 ld8.fill r8=[r3],16
958 ;;
959 ld8.fill r9=[r2],16
960 ld8.fill r10=[r3],PT(R17)-PT(R10)
961 ;;
962 ld8.fill r11=[r2],PT(R18)-PT(R11)
963 ld8.fill r17=[r3],16
964 ;;
965 ld8.fill r18=[r2],16
966 ld8.fill r19=[r3],16
967 ;;
968 ld8.fill r20=[r2],16
969 ld8.fill r21=[r3],16
970 mov ar.csd=r30
971 mov ar.ssd=r31
972 ;;
973 rsm psr.i | psr.ic
974 // initiate turning off of interrupt and interruption collection
975 invala // invalidate ALAT
976 ;;
977 srlz.i
978 ;;
979 ld8.fill r22=[r2],24
980 ld8.fill r23=[r3],24
981 mov b6=r28
982 ;;
983 ld8.fill r25=[r2],16
984 ld8.fill r26=[r3],16
985 mov b7=r29
986 ;;
987 ld8.fill r27=[r2],16
988 ld8.fill r28=[r3],16
989 ;;
990 ld8.fill r29=[r2],16
991 ld8.fill r30=[r3],24
992 ;;
993 ld8.fill r31=[r2],PT(F9)-PT(R31)
994 adds r3=PT(F10)-PT(F6),r3
995 ;;
996 ldf.fill f9=[r2],PT(F6)-PT(F9)
997 ldf.fill f10=[r3],PT(F8)-PT(F10)
998 ;;
999 ldf.fill f6=[r2],PT(F7)-PT(F6)
1000 ;;
1001 ldf.fill f7=[r2],PT(F11)-PT(F7)
1002 ldf.fill f8=[r3],32
1003 ;;
1004 srlz.i // ensure interruption collection is off
1005 mov ar.ccv=r15
1006 ;;
1007 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
1008 ;;
1009 ldf.fill f11=[r2]
1010// mov r18=r13
1011// mov r21=r13
1012 adds r16=PT(CR_IPSR)+16,r12
1013 adds r17=PT(CR_IIP)+16,r12
1014 ;;
1015 ld8 r29=[r16],16 // load cr.ipsr
1016 ld8 r28=[r17],16 // load cr.iip
1017 ;;
1018 ld8 r30=[r16],16 // load cr.ifs
1019 ld8 r25=[r17],16 // load ar.unat
1020 ;;
1021 ld8 r26=[r16],16 // load ar.pfs
1022 ld8 r27=[r17],16 // load ar.rsc
1023 cmp.eq p9,p0=r0,r0
1024 // set p9 to indicate that we should restore cr.ifs
1025 ;;
1026 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
1027 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
1028 ;;
1029 ld8 r31=[r16],16 // load predicates
1030 ld8 r22=[r17],16 // load b0
1031 ;;
1032 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
1033 ld8.fill r1=[r17],16 // load r1
1034 ;;
1035 ld8.fill r12=[r16],16
1036 ld8.fill r13=[r17],16
1037 ;;
1038 ld8 r20=[r16],16 // ar.fpsr
1039 ld8.fill r15=[r17],16
1040 ;;
1041 ld8.fill r14=[r16],16
1042 ld8.fill r2=[r17]
1043 ;;
1044 ld8.fill r3=[r16]
1045 ;;
1046 mov r16=ar.bsp // get existing backing store pointer
1047 ;;
1048 mov b0=r22
1049 mov ar.pfs=r26
1050 mov cr.ifs=r30
1051 mov cr.ipsr=r29
1052 mov ar.fpsr=r20
1053 mov cr.iip=r28
1054 ;;
1055 mov ar.rsc=r27
1056 mov ar.unat=r25
1057 mov pr=r31,-1
1058 rfi
1059END(ia64_leave_nested)
1060
1061
1062
1063GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1064 /*
1065 * work.need_resched etc. mustn't get changed
1066 *by this CPU before it returns to
1067 ;;
1068 * user- or fsys-mode, hence we disable interrupts early on:
1069 */
1070 adds r2 = PT(R4)+16,r12
1071 adds r3 = PT(R5)+16,r12
1072 adds r8 = PT(EML_UNAT)+16,r12
1073 ;;
1074 ld8 r8 = [r8]
1075 ;;
1076 mov ar.unat=r8
1077 ;;
1078 ld8.fill r4=[r2],16 //load r4
1079 ld8.fill r5=[r3],16 //load r5
1080 ;;
1081 ld8.fill r6=[r2] //load r6
1082 ld8.fill r7=[r3] //load r7
1083 ;;
1084END(ia64_leave_hypervisor_prepare)
1085//fall through
1086GLOBAL_ENTRY(ia64_leave_hypervisor)
1087 rsm psr.i
1088 ;;
1089 br.call.sptk.many b0=leave_hypervisor_tail
1090 ;;
1091 adds r20=PT(PR)+16,r12
1092 adds r8=PT(EML_UNAT)+16,r12
1093 ;;
1094 ld8 r8=[r8]
1095 ;;
1096 mov ar.unat=r8
1097 ;;
1098 lfetch [r20],PT(CR_IPSR)-PT(PR)
1099 adds r2 = PT(B6)+16,r12
1100 adds r3 = PT(B7)+16,r12
1101 ;;
1102 lfetch [r20]
1103 ;;
1104 ld8 r24=[r2],16 /* B6 */
1105 ld8 r25=[r3],16 /* B7 */
1106 ;;
1107 ld8 r26=[r2],16 /* ar_csd */
1108 ld8 r27=[r3],16 /* ar_ssd */
1109 mov b6 = r24
1110 ;;
1111 ld8.fill r8=[r2],16
1112 ld8.fill r9=[r3],16
1113 mov b7 = r25
1114 ;;
1115 mov ar.csd = r26
1116 mov ar.ssd = r27
1117 ;;
1118 ld8.fill r10=[r2],PT(R15)-PT(R10)
1119 ld8.fill r11=[r3],PT(R14)-PT(R11)
1120 ;;
1121 ld8.fill r15=[r2],PT(R16)-PT(R15)
1122 ld8.fill r14=[r3],PT(R17)-PT(R14)
1123 ;;
1124 ld8.fill r16=[r2],16
1125 ld8.fill r17=[r3],16
1126 ;;
1127 ld8.fill r18=[r2],16
1128 ld8.fill r19=[r3],16
1129 ;;
1130 ld8.fill r20=[r2],16
1131 ld8.fill r21=[r3],16
1132 ;;
1133 ld8.fill r22=[r2],16
1134 ld8.fill r23=[r3],16
1135 ;;
1136 ld8.fill r24=[r2],16
1137 ld8.fill r25=[r3],16
1138 ;;
1139 ld8.fill r26=[r2],16
1140 ld8.fill r27=[r3],16
1141 ;;
1142 ld8.fill r28=[r2],16
1143 ld8.fill r29=[r3],16
1144 ;;
1145 ld8.fill r30=[r2],PT(F6)-PT(R30)
1146 ld8.fill r31=[r3],PT(F7)-PT(R31)
1147 ;;
1148 rsm psr.i | psr.ic
1149 // initiate turning off of interrupt and interruption collection
1150 invala // invalidate ALAT
1151 ;;
1152 srlz.i // ensure interruption collection is off
1153 ;;
1154 bsw.0
1155 ;;
1156 adds r16 = PT(CR_IPSR)+16,r12
1157 adds r17 = PT(CR_IIP)+16,r12
1158 mov r21=r13 // get current
1159 ;;
1160 ld8 r31=[r16],16 // load cr.ipsr
1161 ld8 r30=[r17],16 // load cr.iip
1162 ;;
1163 ld8 r29=[r16],16 // load cr.ifs
1164 ld8 r28=[r17],16 // load ar.unat
1165 ;;
1166 ld8 r27=[r16],16 // load ar.pfs
1167 ld8 r26=[r17],16 // load ar.rsc
1168 ;;
1169 ld8 r25=[r16],16 // load ar.rnat
1170 ld8 r24=[r17],16 // load ar.bspstore
1171 ;;
1172 ld8 r23=[r16],16 // load predicates
1173 ld8 r22=[r17],16 // load b0
1174 ;;
1175 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
1176 ld8.fill r1=[r17],16 //load r1
1177 ;;
1178 ld8.fill r12=[r16],16 //load r12
1179 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
1180 ;;
1181 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
1182 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
1183 ;;
1184 ld8.fill r3=[r16] //load r3
1185 ld8 r18=[r17] //load ar_ccv
1186 ;;
1187 mov ar.fpsr=r19
1188 mov ar.ccv=r18
1189 shr.u r18=r20,16
1190 ;;
1191kvm_rbs_switch:
1192 mov r19=96
1193
1194kvm_dont_preserve_current_frame:
1195/*
1196 * To prevent leaking bits between the hypervisor and guest domain,
1197 * we must clear the stacked registers in the "invalid" partition here.
1198 * 5 registers/cycle on McKinley).
1199 */
1200# define pRecurse p6
1201# define pReturn p7
1202# define Nregs 14
1203
1204 alloc loc0=ar.pfs,2,Nregs-2,2,0
1205 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1206 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
1207 ;;
1208 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
1209 shladd in0=loc1,3,r19
1210 mov in1=0
1211 ;;
1212 TEXT_ALIGN(32)
1213kvm_rse_clear_invalid:
1214 alloc loc0=ar.pfs,2,Nregs-2,2,0
1215 cmp.lt pRecurse,p0=Nregs*8,in0
1216 // if more than Nregs regs left to clear, (re)curse
1217 add out0=-Nregs*8,in0
1218 add out1=1,in1 // increment recursion count
1219 mov loc1=0
1220 mov loc2=0
1221 ;;
1222 mov loc3=0
1223 mov loc4=0
1224 mov loc5=0
1225 mov loc6=0
1226 mov loc7=0
1227(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1228 ;;
1229 mov loc8=0
1230 mov loc9=0
1231 cmp.ne pReturn,p0=r0,in1
1232 // if recursion count != 0, we need to do a br.ret
1233 mov loc10=0
1234 mov loc11=0
1235(pReturn) br.ret.dptk.many b0
1236
1237# undef pRecurse
1238# undef pReturn
1239
1240// loadrs has already been shifted
1241 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
1242 ;;
1243 loadrs
1244 ;;
1245 mov ar.bspstore=r24
1246 ;;
1247 mov ar.unat=r28
1248 mov ar.rnat=r25
1249 mov ar.rsc=r26
1250 ;;
1251 mov cr.ipsr=r31
1252 mov cr.iip=r30
1253 mov cr.ifs=r29
1254 mov ar.pfs=r27
1255 adds r18=VMM_VPD_BASE_OFFSET,r21
1256 ;;
1257 ld8 r18=[r18] //vpd
1258 adds r17=VMM_VCPU_ISR_OFFSET,r21
1259 ;;
1260 ld8 r17=[r17]
1261 adds r19=VMM_VPD_VPSR_OFFSET,r18
1262 ;;
1263 ld8 r19=[r19] //vpsr
1264 adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
1265 ;;
1266 ld8 r20=[r20]
1267 ;;
1268//vsa_sync_write_start
1269 mov r25=r18
1270 adds r16= VMM_VCPU_GP_OFFSET,r21
1271 ;;
1272 ld8 r16= [r16] // Put gp in r24
1273 movl r24=@gprel(ia64_vmm_entry) // calculate return address
1274 ;;
1275 add r24=r24,r16
1276 ;;
1277 add r16=PAL_VPS_SYNC_WRITE,r20
1278 ;;
1279 mov b0=r16
1280 br.cond.sptk b0 // call the service
1281 ;;
1282END(ia64_leave_hypervisor)
1283// fall through
1284GLOBAL_ENTRY(ia64_vmm_entry)
1285/*
1286 * must be at bank 0
1287 * parameter:
1288 * r17:cr.isr
1289 * r18:vpd
1290 * r19:vpsr
1291 * r20:__vsa_base
1292 * r22:b0
1293 * r23:predicate
1294 */
1295 mov r24=r22
1296 mov r25=r18
1297 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
1298 ;;
1299 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
1300 (p1) br.sptk.many ia64_vmm_entry_out
1301 ;;
1302 tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
1303 ;;
1304 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
1305 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
1306 (p2) ld8 r26=[r25]
1307 ;;
1308ia64_vmm_entry_out:
1309 mov pr=r23,-2
1310 mov b0=r29
1311 ;;
1312 br.cond.sptk b0 // call pal service
1313END(ia64_vmm_entry)
1314
1315
1316
1317/*
1318 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
1319 * u64 arg3, u64 arg4, u64 arg5,
1320 * u64 arg6, u64 arg7);
1321 *
1322 * XXX: The currently defined services use only 4 args at the max. The
1323 * rest are not consumed.
1324 */
1325GLOBAL_ENTRY(ia64_call_vsa)
1326 .regstk 4,4,0,0
1327
1328rpsave = loc0
1329pfssave = loc1
1330psrsave = loc2
1331entry = loc3
1332hostret = r24
1333
1334 alloc pfssave=ar.pfs,4,4,0,0
1335 mov rpsave=rp
1336 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1337 ;;
1338 ld8 entry=[entry]
13391: mov hostret=ip
1340 mov r25=in1 // copy arguments
1341 mov r26=in2
1342 mov r27=in3
1343 mov psrsave=psr
1344 ;;
1345 tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
1346 tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
1347 ;;
1348 add hostret=2f-1b,hostret // calculate return address
1349 add entry=entry,in0
1350 ;;
1351 rsm psr.i | psr.ic
1352 ;;
1353 srlz.i
1354 mov b6=entry
1355 br.cond.sptk b6 // call the service
13562:
1357 // Architectural sequence for enabling interrupts if necessary
1358(p7) ssm psr.ic
1359 ;;
1360(p7) srlz.i
1361 ;;
1362//(p6) ssm psr.i
1363 ;;
1364 mov rp=rpsave
1365 mov ar.pfs=pfssave
1366 mov r8=r31
1367 ;;
1368 srlz.d
1369 br.ret.sptk rp
1370
1371END(ia64_call_vsa)
1372
1373#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
1374
1375GLOBAL_ENTRY(vmm_reset_entry)
1376 //set up ipsr, iip, vpd.vpsr, dcr
1377 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1378 // For DCR: all bits 0
1379 adds r14=-VMM_PT_REGS_SIZE, r12
1380 ;;
1381 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1382 movl r10=0x8000000000000000
1383 adds r16=PT(CR_IIP), r14
1384 adds r20=PT(R1), r14
1385 ;;
1386 rsm psr.ic | psr.i
1387 ;;
1388 srlz.i
1389 ;;
1390 bsw.0
1391 ;;
1392 mov r21 =r13
1393 ;;
1394 bsw.1
1395 ;;
1396 mov ar.rsc = 0
1397 ;;
1398 flushrs
1399 ;;
1400 mov ar.bspstore = 0
1401 // clear BSPSTORE
1402 ;;
1403 mov cr.ipsr=r6
1404 mov cr.ifs=r10
1405 ld8 r4 = [r16] // Set init iip for first run.
1406 ld8 r1 = [r20]
1407 ;;
1408 mov cr.iip=r4
1409 ;;
1410 adds r16=VMM_VPD_BASE_OFFSET,r13
1411 adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
1412 ;;
1413 ld8 r18=[r16]
1414 ld8 r20=[r20]
1415 ;;
1416 adds r19=VMM_VPD_VPSR_OFFSET,r18
1417 ;;
1418 ld8 r19=[r19]
1419 mov r17=r0
1420 mov r22=r0
1421 mov r23=r0
1422 br.cond.sptk ia64_vmm_entry
1423 br.ret.sptk b0
1424END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
new file mode 100644
index 000000000000..f6c5617e16af
--- /dev/null
+++ b/arch/ia64/kvm/vti.h
@@ -0,0 +1,290 @@
1/*
2 * vti.h: prototype for generial vt related interface
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
6 * Fred Yang (fred.yang@intel.com)
7 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
8 *
9 * Copyright (c) 2007, Intel Corporation.
10 * Zhang xiantao <xiantao.zhang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
23 * Place - Suite 330, Boston, MA 02111-1307 USA.
24 */
25#ifndef _KVM_VT_I_H
26#define _KVM_VT_I_H
27
28#ifndef __ASSEMBLY__
29#include <asm/page.h>
30
31#include <linux/kvm_host.h>
32
33/* define itr.i and itr.d in ia64_itr function */
34#define ITR 0x01
35#define DTR 0x02
36#define IaDTR 0x03
37
38#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
39#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
40
41#define RR6 (6UL<<61)
42#define RR7 (7UL<<61)
43
44
45/* config_options in pal_vp_init_env */
46#define VP_INITIALIZE 1UL
47#define VP_FR_PMC 1UL<<1
48#define VP_OPCODE 1UL<<8
49#define VP_CAUSE 1UL<<9
50#define VP_FW_ACC 1UL<<63
51
52/* init vp env with initializing vm_buffer */
53#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
54 VP_OPCODE | VP_CAUSE | VP_FW_ACC)
55/* init vp env without initializing vm_buffer */
56#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
57
58#define PAL_VP_CREATE 265
59/* Stacked Virt. Initializes a new VPD for the operation of
60 * a new virtual processor in the virtual environment.
61 */
62#define PAL_VP_ENV_INFO 266
63/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
64#define PAL_VP_EXIT_ENV 267
65/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
66#define PAL_VP_INIT_ENV 268
67/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
68#define PAL_VP_REGISTER 269
69/*Stacked Virt. Register a different host IVT for the virtual processor.*/
70#define PAL_VP_RESUME 270
71/* Renamed from PAL_VP_RESUME */
72#define PAL_VP_RESTORE 270
73/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
74#define PAL_VP_SUSPEND 271
75/* Renamed from PAL_VP_SUSPEND */
76#define PAL_VP_SAVE 271
77/* Stacked Virt. Suspends operation for the specified virtual processor on
78 * the logical processor.
79 */
80#define PAL_VP_TERMINATE 272
81/* Stacked Virt. Terminates operation for the specified virtual processor.*/
82
83union vac {
84 unsigned long value;
85 struct {
86 int a_int:1;
87 int a_from_int_cr:1;
88 int a_to_int_cr:1;
89 int a_from_psr:1;
90 int a_from_cpuid:1;
91 int a_cover:1;
92 int a_bsw:1;
93 long reserved:57;
94 };
95};
96
97union vdc {
98 unsigned long value;
99 struct {
100 int d_vmsw:1;
101 int d_extint:1;
102 int d_ibr_dbr:1;
103 int d_pmc:1;
104 int d_to_pmd:1;
105 int d_itm:1;
106 long reserved:58;
107 };
108};
109
110struct vpd {
111 union vac vac;
112 union vdc vdc;
113 unsigned long virt_env_vaddr;
114 unsigned long reserved1[29];
115 unsigned long vhpi;
116 unsigned long reserved2[95];
117 unsigned long vgr[16];
118 unsigned long vbgr[16];
119 unsigned long vnat;
120 unsigned long vbnat;
121 unsigned long vcpuid[5];
122 unsigned long reserved3[11];
123 unsigned long vpsr;
124 unsigned long vpr;
125 unsigned long reserved4[76];
126 union {
127 unsigned long vcr[128];
128 struct {
129 unsigned long dcr;
130 unsigned long itm;
131 unsigned long iva;
132 unsigned long rsv1[5];
133 unsigned long pta;
134 unsigned long rsv2[7];
135 unsigned long ipsr;
136 unsigned long isr;
137 unsigned long rsv3;
138 unsigned long iip;
139 unsigned long ifa;
140 unsigned long itir;
141 unsigned long iipa;
142 unsigned long ifs;
143 unsigned long iim;
144 unsigned long iha;
145 unsigned long rsv4[38];
146 unsigned long lid;
147 unsigned long ivr;
148 unsigned long tpr;
149 unsigned long eoi;
150 unsigned long irr[4];
151 unsigned long itv;
152 unsigned long pmv;
153 unsigned long cmcv;
154 unsigned long rsv5[5];
155 unsigned long lrr0;
156 unsigned long lrr1;
157 unsigned long rsv6[46];
158 };
159 };
160 unsigned long reserved5[128];
161 unsigned long reserved6[3456];
162 unsigned long vmm_avail[128];
163 unsigned long reserved7[4096];
164};
165
166#define PAL_PROC_VM_BIT (1UL << 40)
167#define PAL_PROC_VMSW_BIT (1UL << 54)
168
169static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
170 u64 *vp_env_info)
171{
172 struct ia64_pal_retval iprv;
173 PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
174 *buffer_size = iprv.v0;
175 *vp_env_info = iprv.v1;
176 return iprv.status;
177}
178
179static inline s64 ia64_pal_vp_exit_env(u64 iva)
180{
181 struct ia64_pal_retval iprv;
182
183 PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
184 return iprv.status;
185}
186
187static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
188 u64 vbase_addr, u64 *vsa_base)
189{
190 struct ia64_pal_retval iprv;
191
192 PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
193 vbase_addr);
194 *vsa_base = iprv.v0;
195
196 return iprv.status;
197}
198
199static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
200{
201 struct ia64_pal_retval iprv;
202
203 PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
204
205 return iprv.status;
206}
207
208static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
209{
210 struct ia64_pal_retval iprv;
211
212 PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
213
214 return iprv.status;
215}
216
217#endif
218
219/*VPD field offset*/
220#define VPD_VAC_START_OFFSET 0
221#define VPD_VDC_START_OFFSET 8
222#define VPD_VHPI_START_OFFSET 256
223#define VPD_VGR_START_OFFSET 1024
224#define VPD_VBGR_START_OFFSET 1152
225#define VPD_VNAT_START_OFFSET 1280
226#define VPD_VBNAT_START_OFFSET 1288
227#define VPD_VCPUID_START_OFFSET 1296
228#define VPD_VPSR_START_OFFSET 1424
229#define VPD_VPR_START_OFFSET 1432
230#define VPD_VRSE_CFLE_START_OFFSET 1440
231#define VPD_VCR_START_OFFSET 2048
232#define VPD_VTPR_START_OFFSET 2576
233#define VPD_VRR_START_OFFSET 3072
234#define VPD_VMM_VAIL_START_OFFSET 31744
235
236/*Virtualization faults*/
237
238#define EVENT_MOV_TO_AR 1
239#define EVENT_MOV_TO_AR_IMM 2
240#define EVENT_MOV_FROM_AR 3
241#define EVENT_MOV_TO_CR 4
242#define EVENT_MOV_FROM_CR 5
243#define EVENT_MOV_TO_PSR 6
244#define EVENT_MOV_FROM_PSR 7
245#define EVENT_ITC_D 8
246#define EVENT_ITC_I 9
247#define EVENT_MOV_TO_RR 10
248#define EVENT_MOV_TO_DBR 11
249#define EVENT_MOV_TO_IBR 12
250#define EVENT_MOV_TO_PKR 13
251#define EVENT_MOV_TO_PMC 14
252#define EVENT_MOV_TO_PMD 15
253#define EVENT_ITR_D 16
254#define EVENT_ITR_I 17
255#define EVENT_MOV_FROM_RR 18
256#define EVENT_MOV_FROM_DBR 19
257#define EVENT_MOV_FROM_IBR 20
258#define EVENT_MOV_FROM_PKR 21
259#define EVENT_MOV_FROM_PMC 22
260#define EVENT_MOV_FROM_CPUID 23
261#define EVENT_SSM 24
262#define EVENT_RSM 25
263#define EVENT_PTC_L 26
264#define EVENT_PTC_G 27
265#define EVENT_PTC_GA 28
266#define EVENT_PTR_D 29
267#define EVENT_PTR_I 30
268#define EVENT_THASH 31
269#define EVENT_TTAG 32
270#define EVENT_TPA 33
271#define EVENT_TAK 34
272#define EVENT_PTC_E 35
273#define EVENT_COVER 36
274#define EVENT_RFI 37
275#define EVENT_BSW_0 38
276#define EVENT_BSW_1 39
277#define EVENT_VMSW 40
278
279/**PAL virtual services offsets */
280#define PAL_VPS_RESUME_NORMAL 0x0000
281#define PAL_VPS_RESUME_HANDLER 0x0400
282#define PAL_VPS_SYNC_READ 0x0800
283#define PAL_VPS_SYNC_WRITE 0x0c00
284#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
285#define PAL_VPS_THASH 0x1400
286#define PAL_VPS_TTAG 0x1800
287#define PAL_VPS_RESTORE 0x1c00
288#define PAL_VPS_SAVE 0x2000
289
290#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
new file mode 100644
index 000000000000..def4576d22b1
--- /dev/null
+++ b/arch/ia64/kvm/vtlb.c
@@ -0,0 +1,636 @@
1/*
2 * vtlb.c: guest virtual tlb handling module.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
5 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
6 *
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
9 * Xiantao Zhang <xiantao.zhang@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26#include "vcpu.h"
27
28#include <linux/rwsem.h>
29
30#include <asm/tlb.h>
31
32/*
33 * Check to see if the address rid:va is translated by the TLB
34 */
35
36static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
37{
38 return ((trp->p) && (trp->rid == rid)
39 && ((va-trp->vadr) < PSIZE(trp->ps)));
40}
41
42/*
43 * Only for GUEST TR format.
44 */
45static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
46{
47 u64 sa1, ea1;
48
49 if (!trp->p || trp->rid != rid)
50 return 0;
51
52 sa1 = trp->vadr;
53 ea1 = sa1 + PSIZE(trp->ps) - 1;
54 eva -= 1;
55 if ((sva > ea1) || (sa1 > eva))
56 return 0;
57 else
58 return 1;
59
60}
61
62void machine_tlb_purge(u64 va, u64 ps)
63{
64 ia64_ptcl(va, ps << 2);
65}
66
67void local_flush_tlb_all(void)
68{
69 int i, j;
70 unsigned long flags, count0, count1;
71 unsigned long stride0, stride1, addr;
72
73 addr = current_vcpu->arch.ptce_base;
74 count0 = current_vcpu->arch.ptce_count[0];
75 count1 = current_vcpu->arch.ptce_count[1];
76 stride0 = current_vcpu->arch.ptce_stride[0];
77 stride1 = current_vcpu->arch.ptce_stride[1];
78
79 local_irq_save(flags);
80 for (i = 0; i < count0; ++i) {
81 for (j = 0; j < count1; ++j) {
82 ia64_ptce(addr);
83 addr += stride1;
84 }
85 addr += stride0;
86 }
87 local_irq_restore(flags);
88 ia64_srlz_i(); /* srlz.i implies srlz.d */
89}
90
91int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
92{
93 union ia64_rr vrr;
94 union ia64_pta vpta;
95 struct ia64_psr vpsr;
96
97 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
98 vrr.val = vcpu_get_rr(vcpu, vadr);
99 vpta.val = vcpu_get_pta(vcpu);
100
101 if (vrr.ve & vpta.ve) {
102 switch (ref) {
103 case DATA_REF:
104 case NA_REF:
105 return vpsr.dt;
106 case INST_REF:
107 return vpsr.dt && vpsr.it && vpsr.ic;
108 case RSE_REF:
109 return vpsr.dt && vpsr.rt;
110
111 }
112 }
113 return 0;
114}
115
116struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
117{
118 u64 index, pfn, rid, pfn_bits;
119
120 pfn_bits = vpta.size - 5 - 8;
121 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
122 rid = _REGION_ID(vrr);
123 index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
124 *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
125
126 return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
127 (index << 5));
128}
129
130struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
131{
132
133 struct thash_data *trp;
134 int i;
135 u64 rid;
136
137 rid = vcpu_get_rr(vcpu, va);
138 rid = rid & RR_RID_MASK;;
139 if (type == D_TLB) {
140 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
141 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
142 i < NDTRS; i++, trp++) {
143 if (__is_tr_translated(trp, rid, va))
144 return trp;
145 }
146 }
147 } else {
148 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
149 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
150 i < NITRS; i++, trp++) {
151 if (__is_tr_translated(trp, rid, va))
152 return trp;
153 }
154 }
155 }
156
157 return NULL;
158}
159
160static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
161{
162 union ia64_rr rr;
163 struct thash_data *head;
164 unsigned long ps, gpaddr;
165
166 ps = itir_ps(itir);
167
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1));
170
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG;
174 ia64_mf();
175 head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
176 head->itir = rr.ps << 2;
177 head->etag = ia64_ttag(ifa);
178 head->gpaddr = gpaddr;
179}
180
181void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{
183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE)
187 + KVM_MEM_DIRTY_LOG_OFS;
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
189
190 vmm_spin_lock(lock);
191 for (i = 0; i < dirty_pages; i++) {
192 /* avoid RMW */
193 if (!test_bit(base_gfn + i, dirty_bitmap))
194 set_bit(base_gfn + i , dirty_bitmap);
195 }
196 vmm_spin_unlock(lock);
197}
198
199void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
200{
201 u64 phy_pte, psr;
202 union ia64_rr mrr;
203
204 mrr.val = ia64_get_rr(va);
205 phy_pte = translate_phy_pte(&pte, itir, va);
206
207 if (itir_ps(itir) >= mrr.ps) {
208 vhpt_insert(phy_pte, itir, va, pte);
209 } else {
210 phy_pte &= ~PAGE_FLAGS_RV_MASK;
211 psr = ia64_clear_ic();
212 ia64_itc(type, va, phy_pte, itir_ps(itir));
213 ia64_set_psr(psr);
214 }
215
216 if (!(pte&VTLB_PTE_IO))
217 mark_pages_dirty(v, pte, itir_ps(itir));
218}
219
220/*
221 * vhpt lookup
222 */
223struct thash_data *vhpt_lookup(u64 va)
224{
225 struct thash_data *head;
226 u64 tag;
227
228 head = (struct thash_data *)ia64_thash(va);
229 tag = ia64_ttag(va);
230 if (head->etag == tag)
231 return head;
232 return NULL;
233}
234
235u64 guest_vhpt_lookup(u64 iha, u64 *pte)
236{
237 u64 ret;
238 struct thash_data *data;
239
240 data = __vtr_lookup(current_vcpu, iha, D_TLB);
241 if (data != NULL)
242 thash_vhpt_insert(current_vcpu, data->page_flags,
243 data->itir, iha, D_TLB);
244
245 asm volatile ("rsm psr.ic|psr.i;;"
246 "srlz.d;;"
247 "ld8.s r9=[%1];;"
248 "tnat.nz p6,p7=r9;;"
249 "(p6) mov %0=1;"
250 "(p6) mov r9=r0;"
251 "(p7) extr.u r9=r9,0,53;;"
252 "(p7) mov %0=r0;"
253 "(p7) st8 [%2]=r9;;"
254 "ssm psr.ic;;"
255 "srlz.d;;"
256 /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
257 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
258
259 return ret;
260}
261
262/*
263 * purge software guest tlb
264 */
265
266static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
267{
268 struct thash_data *cur;
269 u64 start, curadr, size, psbits, tag, rr_ps, num;
270 union ia64_rr vrr;
271 struct thash_cb *hcb = &v->arch.vtlb;
272
273 vrr.val = vcpu_get_rr(v, va);
274 psbits = VMX(v, psbits[(va >> 61)]);
275 start = va & ~((1UL << ps) - 1);
276 while (psbits) {
277 curadr = start;
278 rr_ps = __ffs(psbits);
279 psbits &= ~(1UL << rr_ps);
280 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
281 size = PSIZE(rr_ps);
282 vrr.ps = rr_ps;
283 while (num) {
284 cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
285 if (cur->etag == tag && cur->ps == rr_ps)
286 cur->etag = INVALID_TI_TAG;
287 curadr += size;
288 num--;
289 }
290 }
291}
292
293
294/*
295 * purge VHPT and machine TLB
296 */
297static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
298{
299 struct thash_data *cur;
300 u64 start, size, tag, num;
301 union ia64_rr rr;
302
303 start = va & ~((1UL << ps) - 1);
304 rr.val = ia64_get_rr(va);
305 size = PSIZE(rr.ps);
306 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
307 while (num) {
308 cur = (struct thash_data *)ia64_thash(start);
309 tag = ia64_ttag(start);
310 if (cur->etag == tag)
311 cur->etag = INVALID_TI_TAG;
312 start += size;
313 num--;
314 }
315 machine_tlb_purge(va, ps);
316}
317
318/*
319 * Insert an entry into hash TLB or VHPT.
320 * NOTES:
321 * 1: When inserting VHPT to thash, "va" is a must covered
322 * address by the inserted machine VHPT entry.
323 * 2: The format of entry is always in TLB.
324 * 3: The caller need to make sure the new entry will not overlap
325 * with any existed entry.
326 */
327void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
328{
329 struct thash_data *head;
330 union ia64_rr vrr;
331 u64 tag;
332 struct thash_cb *hcb = &v->arch.vtlb;
333
334 vrr.val = vcpu_get_rr(v, va);
335 vrr.ps = itir_ps(itir);
336 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
337 head = vsa_thash(hcb->pta, va, vrr.val, &tag);
338 head->page_flags = pte;
339 head->itir = itir;
340 head->etag = tag;
341}
342
343int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
344{
345 struct thash_data *trp;
346 int i;
347 u64 end, rid;
348
349 rid = vcpu_get_rr(vcpu, va);
350 rid = rid & RR_RID_MASK;
351 end = va + PSIZE(ps);
352 if (type == D_TLB) {
353 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
354 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
355 i < NDTRS; i++, trp++) {
356 if (__is_tr_overlap(trp, rid, va, end))
357 return i;
358 }
359 }
360 } else {
361 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
362 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
363 i < NITRS; i++, trp++) {
364 if (__is_tr_overlap(trp, rid, va, end))
365 return i;
366 }
367 }
368 }
369 return -1;
370}
371
372/*
373 * Purge entries in VTLB and VHPT
374 */
375void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
376{
377 if (vcpu_quick_region_check(v->arch.tc_regions, va))
378 vtlb_purge(v, va, ps);
379 vhpt_purge(v, va, ps);
380}
381
382void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
383{
384 u64 old_va = va;
385 va = REGION_OFFSET(va);
386 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
387 vtlb_purge(v, va, ps);
388 vhpt_purge(v, va, ps);
389}
390
391u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
392{
393 u64 ps, ps_mask, paddr, maddr;
394 union pte_flags phy_pte;
395
396 ps = itir_ps(itir);
397 ps_mask = ~((1UL << ps) - 1);
398 phy_pte.val = *pte;
399 paddr = *pte;
400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
401 maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
402 if (maddr & GPFN_IO_MASK) {
403 *pte |= VTLB_PTE_IO;
404 return -1;
405 }
406 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
407 (paddr & ~PAGE_MASK);
408 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
409 return phy_pte.val;
410}
411
412/*
413 * Purge overlap TCs and then insert the new entry to emulate itc ops.
414 * Notes: Only TC entry can purge and insert.
415 * 1 indicates this is MMIO
416 */
417int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
418 u64 ifa, int type)
419{
420 u64 ps;
421 u64 phy_pte;
422 union ia64_rr vrr, mrr;
423 int ret = 0;
424
425 ps = itir_ps(itir);
426 vrr.val = vcpu_get_rr(v, ifa);
427 mrr.val = ia64_get_rr(ifa);
428
429 phy_pte = translate_phy_pte(&pte, itir, ifa);
430
431 /* Ensure WB attribute if pte is related to a normal mem page,
432 * which is required by vga acceleration since qemu maps shared
433 * vram buffer with WB.
434 */
435 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
436 pte &= ~_PAGE_MA_MASK;
437 phy_pte &= ~_PAGE_MA_MASK;
438 }
439
440 if (pte & VTLB_PTE_IO)
441 ret = 1;
442
443 vtlb_purge(v, ifa, ps);
444 vhpt_purge(v, ifa, ps);
445
446 if (ps == mrr.ps) {
447 if (!(pte&VTLB_PTE_IO)) {
448 vhpt_insert(phy_pte, itir, ifa, pte);
449 } else {
450 vtlb_insert(v, pte, itir, ifa);
451 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
452 }
453 } else if (ps > mrr.ps) {
454 vtlb_insert(v, pte, itir, ifa);
455 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
456 if (!(pte&VTLB_PTE_IO))
457 vhpt_insert(phy_pte, itir, ifa, pte);
458 } else {
459 u64 psr;
460 phy_pte &= ~PAGE_FLAGS_RV_MASK;
461 psr = ia64_clear_ic();
462 ia64_itc(type, ifa, phy_pte, ps);
463 ia64_set_psr(psr);
464 }
465 if (!(pte&VTLB_PTE_IO))
466 mark_pages_dirty(v, pte, ps);
467
468 return ret;
469}
470
471/*
472 * Purge all TCs or VHPT entries including those in Hash table.
473 *
474 */
475
476void thash_purge_all(struct kvm_vcpu *v)
477{
478 int i;
479 struct thash_data *head;
480 struct thash_cb *vtlb, *vhpt;
481 vtlb = &v->arch.vtlb;
482 vhpt = &v->arch.vhpt;
483
484 for (i = 0; i < 8; i++)
485 VMX(v, psbits[i]) = 0;
486
487 head = vtlb->hash;
488 for (i = 0; i < vtlb->num; i++) {
489 head->page_flags = 0;
490 head->etag = INVALID_TI_TAG;
491 head->itir = 0;
492 head->next = 0;
493 head++;
494 };
495
496 head = vhpt->hash;
497 for (i = 0; i < vhpt->num; i++) {
498 head->page_flags = 0;
499 head->etag = INVALID_TI_TAG;
500 head->itir = 0;
501 head->next = 0;
502 head++;
503 };
504
505 local_flush_tlb_all();
506}
507
508
509/*
510 * Lookup the hash table and its collision chain to find an entry
511 * covering this address rid:va or the entry.
512 *
513 * INPUT:
514 * in: TLB format for both VHPT & TLB.
515 */
516
517struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
518{
519 struct thash_data *cch;
520 u64 psbits, ps, tag;
521 union ia64_rr vrr;
522
523 struct thash_cb *hcb = &v->arch.vtlb;
524
525 cch = __vtr_lookup(v, va, is_data);;
526 if (cch)
527 return cch;
528
529 if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
530 return NULL;
531
532 psbits = VMX(v, psbits[(va >> 61)]);
533 vrr.val = vcpu_get_rr(v, va);
534 while (psbits) {
535 ps = __ffs(psbits);
536 psbits &= ~(1UL << ps);
537 vrr.ps = ps;
538 cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
539 if (cch->etag == tag && cch->ps == ps)
540 return cch;
541 }
542
543 return NULL;
544}
545
546
547/*
548 * Initialize internal control data before service.
549 */
550void thash_init(struct thash_cb *hcb, u64 sz)
551{
552 int i;
553 struct thash_data *head;
554
555 hcb->pta.val = (unsigned long)hcb->hash;
556 hcb->pta.vf = 1;
557 hcb->pta.ve = 1;
558 hcb->pta.size = sz;
559 head = hcb->hash;
560 for (i = 0; i < hcb->num; i++) {
561 head->page_flags = 0;
562 head->itir = 0;
563 head->etag = INVALID_TI_TAG;
564 head->next = 0;
565 head++;
566 }
567}
568
569u64 kvm_lookup_mpa(u64 gpfn)
570{
571 u64 *base = (u64 *) KVM_P2M_BASE;
572 return *(base + gpfn);
573}
574
575u64 kvm_gpa_to_mpa(u64 gpa)
576{
577 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
578 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
579}
580
581
582/*
583 * Fetch guest bundle code.
584 * INPUT:
585 * gip: guest ip
586 * pbundle: used to return fetched bundle.
587 */
588int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
589{
590 u64 gpip = 0; /* guest physical IP*/
591 u64 *vpa;
592 struct thash_data *tlb;
593 u64 maddr;
594
595 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
596 /* I-side physical mode */
597 gpip = gip;
598 } else {
599 tlb = vtlb_lookup(vcpu, gip, I_TLB);
600 if (tlb)
601 gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
602 (gip & (PSIZE(tlb->ps) - 1));
603 }
604 if (gpip) {
605 maddr = kvm_gpa_to_mpa(gpip);
606 } else {
607 tlb = vhpt_lookup(gip);
608 if (tlb == NULL) {
609 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
610 return IA64_FAULT;
611 }
612 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
613 | (gip & (PSIZE(tlb->ps) - 1));
614 }
615 vpa = (u64 *)__kvm_va(maddr);
616
617 pbundle->i64[0] = *vpa++;
618 pbundle->i64[1] = *vpa;
619
620 return IA64_NO_FAULT;
621}
622
623
624void kvm_init_vhpt(struct kvm_vcpu *v)
625{
626 v->arch.vhpt.num = VHPT_NUM_ENTRIES;
627 thash_init(&v->arch.vhpt, VHPT_SHIFT);
628 ia64_set_pta(v->arch.vhpt.pta.val);
629 /*Enable VHPT here?*/
630}
631
632void kvm_init_vtlb(struct kvm_vcpu *v)
633{
634 v->arch.vtlb.num = VTLB_NUM_ENTRIES;
635 thash_init(&v->arch.vtlb, VTLB_SHIFT);
636}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 20f45a8b87e3..4e40c122bf26 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -803,3 +803,4 @@ config PPC_CLOCK
803config PPC_LIB_RHEAP 803config PPC_LIB_RHEAP
804 bool 804 bool
805 805
806source "arch/powerpc/kvm/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a86d8d853214..807a2dce6263 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -151,6 +151,9 @@ config BOOTX_TEXT
151 151
152config PPC_EARLY_DEBUG 152config PPC_EARLY_DEBUG
153 bool "Early debugging (dangerous)" 153 bool "Early debugging (dangerous)"
154 # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water
155 # mark, which doesn't work with current 440 KVM.
156 depends on !KVM
154 help 157 help
155 Say Y to enable some early debugging facilities that may be available 158 Say Y to enable some early debugging facilities that may be available
156 for your processor/board combination. Those facilities are hacks 159 for your processor/board combination. Those facilities are hacks
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e2ec4a91ccef..9dcdc036cdf7 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -145,6 +145,7 @@ core-y += arch/powerpc/kernel/ \
145 arch/powerpc/platforms/ 145 arch/powerpc/platforms/
146core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/ 146core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/
147core-$(CONFIG_XMON) += arch/powerpc/xmon/ 147core-$(CONFIG_XMON) += arch/powerpc/xmon/
148core-$(CONFIG_KVM) += arch/powerpc/kvm/
148 149
149drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ 150drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
150 151
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index adf1d09d726f..62134845af08 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,6 +23,9 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/suspend.h> 24#include <linux/suspend.h>
25#include <linux/hrtimer.h> 25#include <linux/hrtimer.h>
26#ifdef CONFIG_KVM
27#include <linux/kvm_host.h>
28#endif
26#ifdef CONFIG_PPC64 29#ifdef CONFIG_PPC64
27#include <linux/time.h> 30#include <linux/time.h>
28#include <linux/hardirq.h> 31#include <linux/hardirq.h>
@@ -324,5 +327,30 @@ int main(void)
324 327
325 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 328 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
326 329
330#ifdef CONFIG_KVM
331 DEFINE(TLBE_BYTES, sizeof(struct tlbe));
332
333 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
334 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
335 DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
336 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
337 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
338 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
339 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
340 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
341 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
342 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
343 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
344 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
345 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
346 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
347 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
348 DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
349
350 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
351 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
352 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
353#endif
354
327 return 0; 355 return 0;
328} 356}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
new file mode 100644
index 000000000000..f5d7a5eab96e
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -0,0 +1,224 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm_host.h>
23#include <linux/highmem.h>
24#include <asm/mmu-44x.h>
25#include <asm/kvm_ppc.h>
26
27#include "44x_tlb.h"
28
29#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
30#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
31
32static unsigned int kvmppc_tlb_44x_pos;
33
34static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
35{
36 /* Mask off reserved bits. */
37 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
38
39 if (!usermode) {
40 /* Guest is in supervisor mode, so we need to translate guest
41 * supervisor permissions into user permissions. */
42 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
43 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
44 }
45
46 /* Make sure host can always access this memory. */
47 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
48
49 return attrib;
50}
51
52/* Search the guest TLB for a matching entry. */
53int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
54 unsigned int as)
55{
56 int i;
57
58 /* XXX Replace loop with fancy data structures. */
59 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
60 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
61 unsigned int tid;
62
63 if (eaddr < get_tlb_eaddr(tlbe))
64 continue;
65
66 if (eaddr > get_tlb_end(tlbe))
67 continue;
68
69 tid = get_tlb_tid(tlbe);
70 if (tid && (tid != pid))
71 continue;
72
73 if (!get_tlb_v(tlbe))
74 continue;
75
76 if (get_tlb_ts(tlbe) != as)
77 continue;
78
79 return i;
80 }
81
82 return -1;
83}
84
85struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
86{
87 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
88 unsigned int index;
89
90 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
91 if (index == -1)
92 return NULL;
93 return &vcpu->arch.guest_tlb[index];
94}
95
96struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
97{
98 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
99 unsigned int index;
100
101 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
102 if (index == -1)
103 return NULL;
104 return &vcpu->arch.guest_tlb[index];
105}
106
107static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
108{
109 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
110}
111
112/* Must be called with mmap_sem locked for writing. */
113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
114 unsigned int index)
115{
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index];
118
119 kunmap(vcpu->arch.shadow_pages[index]);
120
121 if (get_tlb_v(stlbe)) {
122 if (kvmppc_44x_tlbe_is_writable(stlbe))
123 kvm_release_page_dirty(page);
124 else
125 kvm_release_page_clean(page);
126 }
127}
128
129/* Caller must ensure that the specified guest TLB entry is safe to insert into
130 * the shadow TLB. */
131void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
132 u32 flags)
133{
134 struct page *new_page;
135 struct tlbe *stlbe;
136 hpa_t hpaddr;
137 unsigned int victim;
138
139 /* Future optimization: don't overwrite the TLB entry containing the
140 * current PC (or stack?). */
141 victim = kvmppc_tlb_44x_pos++;
142 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
143 kvmppc_tlb_44x_pos = 0;
144 stlbe = &vcpu->arch.shadow_tlb[victim];
145
146 /* Get reference to new page. */
147 down_write(&current->mm->mmap_sem);
148 new_page = gfn_to_page(vcpu->kvm, gfn);
149 if (is_error_page(new_page)) {
150 printk(KERN_ERR "Couldn't get guest page!\n");
151 kvm_release_page_clean(new_page);
152 return;
153 }
154 hpaddr = page_to_phys(new_page);
155
156 /* Drop reference to old page. */
157 kvmppc_44x_shadow_release(vcpu, victim);
158 up_write(&current->mm->mmap_sem);
159
160 vcpu->arch.shadow_pages[victim] = new_page;
161
162 /* XXX Make sure (va, size) doesn't overlap any other
163 * entries. 440x6 user manual says the result would be
164 * "undefined." */
165
166 /* XXX what about AS? */
167
168 stlbe->tid = asid & 0xff;
169
170 /* Force TS=1 for all guest mappings. */
171 /* For now we hardcode 4KB mappings, but it will be important to
172 * use host large pages in the future. */
173 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
174 | PPC44x_TLB_4K;
175
176 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
177 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
178 vcpu->arch.msr & MSR_PR);
179}
180
181void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
182{
183 unsigned int pid = asid & 0xff;
184 int i;
185
186 /* XXX Replace loop with fancy data structures. */
187 down_write(&current->mm->mmap_sem);
188 for (i = 0; i <= tlb_44x_hwater; i++) {
189 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
190 unsigned int tid;
191
192 if (!get_tlb_v(stlbe))
193 continue;
194
195 if (eaddr < get_tlb_eaddr(stlbe))
196 continue;
197
198 if (eaddr > get_tlb_end(stlbe))
199 continue;
200
201 tid = get_tlb_tid(stlbe);
202 if (tid && (tid != pid))
203 continue;
204
205 kvmppc_44x_shadow_release(vcpu, i);
206 stlbe->word0 = 0;
207 }
208 up_write(&current->mm->mmap_sem);
209}
210
211/* Invalidate all mappings, so that when they fault back in they will get the
212 * proper permission bits. */
213void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
214{
215 int i;
216
217 /* XXX Replace loop with fancy data structures. */
218 down_write(&current->mm->mmap_sem);
219 for (i = 0; i <= tlb_44x_hwater; i++) {
220 kvmppc_44x_shadow_release(vcpu, i);
221 vcpu->arch.shadow_tlb[i].word0 = 0;
222 }
223 up_write(&current->mm->mmap_sem);
224}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
new file mode 100644
index 000000000000..2ccd46b6f6b7
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -0,0 +1,91 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_POWERPC_TLB_H__
21#define __KVM_POWERPC_TLB_H__
22
23#include <linux/kvm_host.h>
24#include <asm/mmu-44x.h>
25
26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
27 unsigned int pid, unsigned int as);
28extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
29extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
30
31/* TLB helper functions */
32static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
33{
34 return (tlbe->word0 >> 4) & 0xf;
35}
36
37static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
38{
39 return tlbe->word0 & 0xfffffc00;
40}
41
42static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
43{
44 unsigned int pgsize = get_tlb_size(tlbe);
45 return 1 << 10 << (pgsize << 1);
46}
47
48static inline gva_t get_tlb_end(const struct tlbe *tlbe)
49{
50 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
51}
52
53static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
54{
55 u64 word1 = tlbe->word1;
56 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
57}
58
59static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
60{
61 return tlbe->tid & 0xff;
62}
63
64static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
65{
66 return (tlbe->word0 >> 8) & 0x1;
67}
68
69static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
70{
71 return (tlbe->word0 >> 9) & 0x1;
72}
73
74static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
75{
76 return vcpu->arch.mmucr & 0xff;
77}
78
79static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
80{
81 return (vcpu->arch.mmucr >> 16) & 0x1;
82}
83
84static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
85{
86 unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
87
88 return get_tlb_raddr(tlbe) | (eaddr & pgmask);
89}
90
91#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
new file mode 100644
index 000000000000..6b076010213b
--- /dev/null
+++ b/arch/powerpc/kvm/Kconfig
@@ -0,0 +1,42 @@
1#
2# KVM configuration
3#
4
5menuconfig VIRTUALIZATION
6 bool "Virtualization"
7 ---help---
8 Say Y here to get to see options for using your Linux host to run
9 other operating systems inside virtual machines (guests).
10 This option alone does not add any kernel code.
11
12 If you say N, all options in this submenu will be skipped and
13 disabled.
14
15if VIRTUALIZATION
16
17config KVM
18 bool "Kernel-based Virtual Machine (KVM) support"
19 depends on 44x && EXPERIMENTAL
20 select PREEMPT_NOTIFIERS
21 select ANON_INODES
22 # We can only run on Book E hosts so far
23 select KVM_BOOKE_HOST
24 ---help---
25 Support hosting virtualized guest machines. You will also
26 need to select one or more of the processor modules below.
27
28 This module provides access to the hardware capabilities through
29 a character device node named /dev/kvm.
30
31 If unsure, say N.
32
33config KVM_BOOKE_HOST
34 bool "KVM host support for Book E PowerPC processors"
35 depends on KVM && 44x
36 ---help---
37 Provides host support for KVM on Book E PowerPC processors. Currently
38 this works on 440 processors only.
39
40source drivers/virtio/Kconfig
41
42endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
new file mode 100644
index 000000000000..d0d358d367ec
--- /dev/null
+++ b/arch/powerpc/kvm/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
6
7common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
8
9kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
10obj-$(CONFIG_KVM) += kvm.o
11
12AFLAGS_booke_interrupts.o := -I$(obj)
13
14kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
15obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
new file mode 100644
index 000000000000..6d9884a6884a
--- /dev/null
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -0,0 +1,615 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
26#include <linux/fs.h>
27#include <asm/cputable.h>
28#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h>
30
31#include "44x_tlb.h"
32
33#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "exits", VCPU_STAT(sum_exits) },
38 { "mmio", VCPU_STAT(mmio_exits) },
39 { "dcr", VCPU_STAT(dcr_exits) },
40 { "sig", VCPU_STAT(signal_exits) },
41 { "light", VCPU_STAT(light_exits) },
42 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
43 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
44 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
45 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
46 { "sysc", VCPU_STAT(syscall_exits) },
47 { "isi", VCPU_STAT(isi_exits) },
48 { "dsi", VCPU_STAT(dsi_exits) },
49 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
50 { "dec", VCPU_STAT(dec_exits) },
51 { "ext_intr", VCPU_STAT(ext_intr_exits) },
52 { NULL }
53};
54
55static const u32 interrupt_msr_mask[16] = {
56 [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
57 [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
58 [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
59 [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
60 [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
61 [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
62 [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
63 [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
64 [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
65 [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
66 [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
67 [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
68 [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
69 [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
70 [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
71 [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
72};
73
74const unsigned char exception_priority[] = {
75 [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
76 [BOOKE_INTERRUPT_INST_STORAGE] = 1,
77 [BOOKE_INTERRUPT_ALIGNMENT] = 2,
78 [BOOKE_INTERRUPT_PROGRAM] = 3,
79 [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
80 [BOOKE_INTERRUPT_SYSCALL] = 5,
81 [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
82 [BOOKE_INTERRUPT_DTLB_MISS] = 7,
83 [BOOKE_INTERRUPT_ITLB_MISS] = 8,
84 [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
85 [BOOKE_INTERRUPT_DEBUG] = 10,
86 [BOOKE_INTERRUPT_CRITICAL] = 11,
87 [BOOKE_INTERRUPT_WATCHDOG] = 12,
88 [BOOKE_INTERRUPT_EXTERNAL] = 13,
89 [BOOKE_INTERRUPT_FIT] = 14,
90 [BOOKE_INTERRUPT_DECREMENTER] = 15,
91};
92
93const unsigned char priority_exception[] = {
94 BOOKE_INTERRUPT_DATA_STORAGE,
95 BOOKE_INTERRUPT_INST_STORAGE,
96 BOOKE_INTERRUPT_ALIGNMENT,
97 BOOKE_INTERRUPT_PROGRAM,
98 BOOKE_INTERRUPT_FP_UNAVAIL,
99 BOOKE_INTERRUPT_SYSCALL,
100 BOOKE_INTERRUPT_AP_UNAVAIL,
101 BOOKE_INTERRUPT_DTLB_MISS,
102 BOOKE_INTERRUPT_ITLB_MISS,
103 BOOKE_INTERRUPT_MACHINE_CHECK,
104 BOOKE_INTERRUPT_DEBUG,
105 BOOKE_INTERRUPT_CRITICAL,
106 BOOKE_INTERRUPT_WATCHDOG,
107 BOOKE_INTERRUPT_EXTERNAL,
108 BOOKE_INTERRUPT_FIT,
109 BOOKE_INTERRUPT_DECREMENTER,
110};
111
112
113void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
114{
115 struct tlbe *tlbe;
116 int i;
117
118 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
119 printk("| %2s | %3s | %8s | %8s | %8s |\n",
120 "nr", "tid", "word0", "word1", "word2");
121
122 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
123 tlbe = &vcpu->arch.guest_tlb[i];
124 if (tlbe->word0 & PPC44x_TLB_VALID)
125 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
126 i, tlbe->tid, tlbe->word0, tlbe->word1,
127 tlbe->word2);
128 }
129
130 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
131 tlbe = &vcpu->arch.shadow_tlb[i];
132 if (tlbe->word0 & PPC44x_TLB_VALID)
133 printk(" S%2d | %02X | %08X | %08X | %08X |\n",
134 i, tlbe->tid, tlbe->word0, tlbe->word1,
135 tlbe->word2);
136 }
137}
138
139/* TODO: use vcpu_printf() */
140void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
141{
142 int i;
143
144 printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr);
145 printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
146 printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
147
148 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
149
150 for (i = 0; i < 32; i += 4) {
151 printk("gpr%02d: %08x %08x %08x %08x\n", i,
152 vcpu->arch.gpr[i],
153 vcpu->arch.gpr[i+1],
154 vcpu->arch.gpr[i+2],
155 vcpu->arch.gpr[i+3]);
156 }
157}
158
159/* Check if we are ready to deliver the interrupt */
160static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
161{
162 int r;
163
164 switch (interrupt) {
165 case BOOKE_INTERRUPT_CRITICAL:
166 r = vcpu->arch.msr & MSR_CE;
167 break;
168 case BOOKE_INTERRUPT_MACHINE_CHECK:
169 r = vcpu->arch.msr & MSR_ME;
170 break;
171 case BOOKE_INTERRUPT_EXTERNAL:
172 r = vcpu->arch.msr & MSR_EE;
173 break;
174 case BOOKE_INTERRUPT_DECREMENTER:
175 r = vcpu->arch.msr & MSR_EE;
176 break;
177 case BOOKE_INTERRUPT_FIT:
178 r = vcpu->arch.msr & MSR_EE;
179 break;
180 case BOOKE_INTERRUPT_WATCHDOG:
181 r = vcpu->arch.msr & MSR_CE;
182 break;
183 case BOOKE_INTERRUPT_DEBUG:
184 r = vcpu->arch.msr & MSR_DE;
185 break;
186 default:
187 r = 1;
188 }
189
190 return r;
191}
192
193static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
194{
195 switch (interrupt) {
196 case BOOKE_INTERRUPT_DECREMENTER:
197 vcpu->arch.tsr |= TSR_DIS;
198 break;
199 }
200
201 vcpu->arch.srr0 = vcpu->arch.pc;
202 vcpu->arch.srr1 = vcpu->arch.msr;
203 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
204 kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
205}
206
207/* Check pending exceptions and deliver one, if possible. */
208void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
209{
210 unsigned long *pending = &vcpu->arch.pending_exceptions;
211 unsigned int exception;
212 unsigned int priority;
213
214 priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
215 while (priority <= BOOKE_MAX_INTERRUPT) {
216 exception = priority_exception[priority];
217 if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
218 kvmppc_clear_exception(vcpu, exception);
219 kvmppc_deliver_interrupt(vcpu, exception);
220 break;
221 }
222
223 priority = find_next_bit(pending,
224 BITS_PER_BYTE * sizeof(*pending),
225 priority + 1);
226 }
227}
228
229static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
230{
231 enum emulation_result er;
232 int r;
233
234 er = kvmppc_emulate_instruction(run, vcpu);
235 switch (er) {
236 case EMULATE_DONE:
237 /* Future optimization: only reload non-volatiles if they were
238 * actually modified. */
239 r = RESUME_GUEST_NV;
240 break;
241 case EMULATE_DO_MMIO:
242 run->exit_reason = KVM_EXIT_MMIO;
243 /* We must reload nonvolatiles because "update" load/store
244 * instructions modify register state. */
245 /* Future optimization: only reload non-volatiles if they were
246 * actually modified. */
247 r = RESUME_HOST_NV;
248 break;
249 case EMULATE_FAIL:
250 /* XXX Deliver Program interrupt to guest. */
251 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
252 vcpu->arch.last_inst);
253 r = RESUME_HOST;
254 break;
255 default:
256 BUG();
257 }
258
259 return r;
260}
261
262/**
263 * kvmppc_handle_exit
264 *
265 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
266 */
267int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
268 unsigned int exit_nr)
269{
270 enum emulation_result er;
271 int r = RESUME_HOST;
272
273 local_irq_enable();
274
275 run->exit_reason = KVM_EXIT_UNKNOWN;
276 run->ready_for_interrupt_injection = 1;
277
278 switch (exit_nr) {
279 case BOOKE_INTERRUPT_MACHINE_CHECK:
280 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
281 kvmppc_dump_vcpu(vcpu);
282 r = RESUME_HOST;
283 break;
284
285 case BOOKE_INTERRUPT_EXTERNAL:
286 case BOOKE_INTERRUPT_DECREMENTER:
287 /* Since we switched IVPR back to the host's value, the host
288 * handled this interrupt the moment we enabled interrupts.
289 * Now we just offer it a chance to reschedule the guest. */
290
291 /* XXX At this point the TLB still holds our shadow TLB, so if
292 * we do reschedule the host will fault over it. Perhaps we
293 * should politely restore the host's entries to minimize
294 * misses before ceding control. */
295 if (need_resched())
296 cond_resched();
297 if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
298 vcpu->stat.dec_exits++;
299 else
300 vcpu->stat.ext_intr_exits++;
301 r = RESUME_GUEST;
302 break;
303
304 case BOOKE_INTERRUPT_PROGRAM:
305 if (vcpu->arch.msr & MSR_PR) {
306 /* Program traps generated by user-level software must be handled
307 * by the guest kernel. */
308 vcpu->arch.esr = vcpu->arch.fault_esr;
309 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
310 r = RESUME_GUEST;
311 break;
312 }
313
314 er = kvmppc_emulate_instruction(run, vcpu);
315 switch (er) {
316 case EMULATE_DONE:
317 /* Future optimization: only reload non-volatiles if
318 * they were actually modified by emulation. */
319 vcpu->stat.emulated_inst_exits++;
320 r = RESUME_GUEST_NV;
321 break;
322 case EMULATE_DO_DCR:
323 run->exit_reason = KVM_EXIT_DCR;
324 r = RESUME_HOST;
325 break;
326 case EMULATE_FAIL:
327 /* XXX Deliver Program interrupt to guest. */
328 printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
329 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
330 /* For debugging, encode the failing instruction and
331 * report it to userspace. */
332 run->hw.hardware_exit_reason = ~0ULL << 32;
333 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
334 r = RESUME_HOST;
335 break;
336 default:
337 BUG();
338 }
339 break;
340
341 case BOOKE_INTERRUPT_DATA_STORAGE:
342 vcpu->arch.dear = vcpu->arch.fault_dear;
343 vcpu->arch.esr = vcpu->arch.fault_esr;
344 kvmppc_queue_exception(vcpu, exit_nr);
345 vcpu->stat.dsi_exits++;
346 r = RESUME_GUEST;
347 break;
348
349 case BOOKE_INTERRUPT_INST_STORAGE:
350 vcpu->arch.esr = vcpu->arch.fault_esr;
351 kvmppc_queue_exception(vcpu, exit_nr);
352 vcpu->stat.isi_exits++;
353 r = RESUME_GUEST;
354 break;
355
356 case BOOKE_INTERRUPT_SYSCALL:
357 kvmppc_queue_exception(vcpu, exit_nr);
358 vcpu->stat.syscall_exits++;
359 r = RESUME_GUEST;
360 break;
361
362 case BOOKE_INTERRUPT_DTLB_MISS: {
363 struct tlbe *gtlbe;
364 unsigned long eaddr = vcpu->arch.fault_dear;
365 gfn_t gfn;
366
367 /* Check the guest TLB. */
368 gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
369 if (!gtlbe) {
370 /* The guest didn't have a mapping for it. */
371 kvmppc_queue_exception(vcpu, exit_nr);
372 vcpu->arch.dear = vcpu->arch.fault_dear;
373 vcpu->arch.esr = vcpu->arch.fault_esr;
374 vcpu->stat.dtlb_real_miss_exits++;
375 r = RESUME_GUEST;
376 break;
377 }
378
379 vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
380 gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
381
382 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
383 /* The guest TLB had a mapping, but the shadow TLB
384 * didn't, and it is RAM. This could be because:
385 * a) the entry is mapping the host kernel, or
386 * b) the guest used a large mapping which we're faking
387 * Either way, we need to satisfy the fault without
388 * invoking the guest. */
389 kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
390 gtlbe->word2);
391 vcpu->stat.dtlb_virt_miss_exits++;
392 r = RESUME_GUEST;
393 } else {
394 /* Guest has mapped and accessed a page which is not
395 * actually RAM. */
396 r = kvmppc_emulate_mmio(run, vcpu);
397 }
398
399 break;
400 }
401
402 case BOOKE_INTERRUPT_ITLB_MISS: {
403 struct tlbe *gtlbe;
404 unsigned long eaddr = vcpu->arch.pc;
405 gfn_t gfn;
406
407 r = RESUME_GUEST;
408
409 /* Check the guest TLB. */
410 gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
411 if (!gtlbe) {
412 /* The guest didn't have a mapping for it. */
413 kvmppc_queue_exception(vcpu, exit_nr);
414 vcpu->stat.itlb_real_miss_exits++;
415 break;
416 }
417
418 vcpu->stat.itlb_virt_miss_exits++;
419
420 gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
421
422 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
423 /* The guest TLB had a mapping, but the shadow TLB
424 * didn't. This could be because:
425 * a) the entry is mapping the host kernel, or
426 * b) the guest used a large mapping which we're faking
427 * Either way, we need to satisfy the fault without
428 * invoking the guest. */
429 kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
430 gtlbe->word2);
431 } else {
432 /* Guest mapped and leaped at non-RAM! */
433 kvmppc_queue_exception(vcpu,
434 BOOKE_INTERRUPT_MACHINE_CHECK);
435 }
436
437 break;
438 }
439
440 default:
441 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
442 BUG();
443 }
444
445 local_irq_disable();
446
447 kvmppc_check_and_deliver_interrupts(vcpu);
448
449 /* Do some exit accounting. */
450 vcpu->stat.sum_exits++;
451 if (!(r & RESUME_HOST)) {
452 /* To avoid clobbering exit_reason, only check for signals if
453 * we aren't already exiting to userspace for some other
454 * reason. */
455 if (signal_pending(current)) {
456 run->exit_reason = KVM_EXIT_INTR;
457 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
458
459 vcpu->stat.signal_exits++;
460 } else {
461 vcpu->stat.light_exits++;
462 }
463 } else {
464 switch (run->exit_reason) {
465 case KVM_EXIT_MMIO:
466 vcpu->stat.mmio_exits++;
467 break;
468 case KVM_EXIT_DCR:
469 vcpu->stat.dcr_exits++;
470 break;
471 case KVM_EXIT_INTR:
472 vcpu->stat.signal_exits++;
473 break;
474 }
475 }
476
477 return r;
478}
479
480/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
481int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
482{
483 struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
484
485 tlbe->tid = 0;
486 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
487 tlbe->word1 = 0;
488 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
489
490 tlbe++;
491 tlbe->tid = 0;
492 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
493 tlbe->word1 = 0xef600000;
494 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
495 | PPC44x_TLB_I | PPC44x_TLB_G;
496
497 vcpu->arch.pc = 0;
498 vcpu->arch.msr = 0;
499 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
500
501 /* Eye-catching number so we know if the guest takes an interrupt
502 * before it's programmed its own IVPR. */
503 vcpu->arch.ivpr = 0x55550000;
504
505 /* Since the guest can directly access the timebase, it must know the
506 * real timebase frequency. Accordingly, it must see the state of
507 * CCR1[TCS]. */
508 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
509
510 return 0;
511}
512
513int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
514{
515 int i;
516
517 regs->pc = vcpu->arch.pc;
518 regs->cr = vcpu->arch.cr;
519 regs->ctr = vcpu->arch.ctr;
520 regs->lr = vcpu->arch.lr;
521 regs->xer = vcpu->arch.xer;
522 regs->msr = vcpu->arch.msr;
523 regs->srr0 = vcpu->arch.srr0;
524 regs->srr1 = vcpu->arch.srr1;
525 regs->pid = vcpu->arch.pid;
526 regs->sprg0 = vcpu->arch.sprg0;
527 regs->sprg1 = vcpu->arch.sprg1;
528 regs->sprg2 = vcpu->arch.sprg2;
529 regs->sprg3 = vcpu->arch.sprg3;
530 regs->sprg5 = vcpu->arch.sprg4;
531 regs->sprg6 = vcpu->arch.sprg5;
532 regs->sprg7 = vcpu->arch.sprg6;
533
534 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
535 regs->gpr[i] = vcpu->arch.gpr[i];
536
537 return 0;
538}
539
540int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
541{
542 int i;
543
544 vcpu->arch.pc = regs->pc;
545 vcpu->arch.cr = regs->cr;
546 vcpu->arch.ctr = regs->ctr;
547 vcpu->arch.lr = regs->lr;
548 vcpu->arch.xer = regs->xer;
549 vcpu->arch.msr = regs->msr;
550 vcpu->arch.srr0 = regs->srr0;
551 vcpu->arch.srr1 = regs->srr1;
552 vcpu->arch.sprg0 = regs->sprg0;
553 vcpu->arch.sprg1 = regs->sprg1;
554 vcpu->arch.sprg2 = regs->sprg2;
555 vcpu->arch.sprg3 = regs->sprg3;
556 vcpu->arch.sprg5 = regs->sprg4;
557 vcpu->arch.sprg6 = regs->sprg5;
558 vcpu->arch.sprg7 = regs->sprg6;
559
560 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
561 vcpu->arch.gpr[i] = regs->gpr[i];
562
563 return 0;
564}
565
566int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
567 struct kvm_sregs *sregs)
568{
569 return -ENOTSUPP;
570}
571
572int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
573 struct kvm_sregs *sregs)
574{
575 return -ENOTSUPP;
576}
577
578int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
579{
580 return -ENOTSUPP;
581}
582
583int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
584{
585 return -ENOTSUPP;
586}
587
588/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
589int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
590 struct kvm_translation *tr)
591{
592 struct tlbe *gtlbe;
593 int index;
594 gva_t eaddr;
595 u8 pid;
596 u8 as;
597
598 eaddr = tr->linear_address;
599 pid = (tr->linear_address >> 32) & 0xff;
600 as = (tr->linear_address >> 40) & 0x1;
601
602 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
603 if (index == -1) {
604 tr->valid = 0;
605 return 0;
606 }
607
608 gtlbe = &vcpu->arch.guest_tlb[index];
609
610 tr->physical_address = tlb_xlate(gtlbe, eaddr);
611 /* XXX what does "writeable" and "usermode" even mean? */
612 tr->valid = 1;
613
614 return 0;
615}
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
new file mode 100644
index 000000000000..b480341bc31e
--- /dev/null
+++ b/arch/powerpc/kvm/booke_host.c
@@ -0,0 +1,83 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/errno.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <asm/cacheflush.h>
24#include <asm/kvm_ppc.h>
25
26unsigned long kvmppc_booke_handlers;
27
28static int kvmppc_booke_init(void)
29{
30 unsigned long ivor[16];
31 unsigned long max_ivor = 0;
32 int i;
33
34 /* We install our own exception handlers by hijacking IVPR. IVPR must
35 * be 16-bit aligned, so we need a 64KB allocation. */
36 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
37 VCPU_SIZE_ORDER);
38 if (!kvmppc_booke_handlers)
39 return -ENOMEM;
40
41 /* XXX make sure our handlers are smaller than Linux's */
42
43 /* Copy our interrupt handlers to match host IVORs. That way we don't
44 * have to swap the IVORs on every guest/host transition. */
45 ivor[0] = mfspr(SPRN_IVOR0);
46 ivor[1] = mfspr(SPRN_IVOR1);
47 ivor[2] = mfspr(SPRN_IVOR2);
48 ivor[3] = mfspr(SPRN_IVOR3);
49 ivor[4] = mfspr(SPRN_IVOR4);
50 ivor[5] = mfspr(SPRN_IVOR5);
51 ivor[6] = mfspr(SPRN_IVOR6);
52 ivor[7] = mfspr(SPRN_IVOR7);
53 ivor[8] = mfspr(SPRN_IVOR8);
54 ivor[9] = mfspr(SPRN_IVOR9);
55 ivor[10] = mfspr(SPRN_IVOR10);
56 ivor[11] = mfspr(SPRN_IVOR11);
57 ivor[12] = mfspr(SPRN_IVOR12);
58 ivor[13] = mfspr(SPRN_IVOR13);
59 ivor[14] = mfspr(SPRN_IVOR14);
60 ivor[15] = mfspr(SPRN_IVOR15);
61
62 for (i = 0; i < 16; i++) {
63 if (ivor[i] > max_ivor)
64 max_ivor = ivor[i];
65
66 memcpy((void *)kvmppc_booke_handlers + ivor[i],
67 kvmppc_handlers_start + i * kvmppc_handler_len,
68 kvmppc_handler_len);
69 }
70 flush_icache_range(kvmppc_booke_handlers,
71 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
72
73 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
74}
75
76static void __exit kvmppc_booke_exit(void)
77{
78 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
79 kvm_exit();
80}
81
82module_init(kvmppc_booke_init)
83module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
new file mode 100644
index 000000000000..3b653b5309b8
--- /dev/null
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -0,0 +1,436 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu-44x.h>
24#include <asm/page.h>
25#include <asm/asm-offsets.h>
26
27#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
28
29#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
30
31/* The host stack layout: */
32#define HOST_R1 0 /* Implied by stwu. */
33#define HOST_CALLEE_LR 4
34#define HOST_RUN 8
35/* r2 is special: it holds 'current', and it made nonvolatile in the
36 * kernel with the -ffixed-r2 gcc option. */
37#define HOST_R2 12
38#define HOST_NV_GPRS 16
39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43
44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
45 (1<<BOOKE_INTERRUPT_DTLB_MISS))
46
47#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
48 (1<<BOOKE_INTERRUPT_DTLB_MISS))
49
50#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
51 (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
52 (1<<BOOKE_INTERRUPT_PROGRAM) | \
53 (1<<BOOKE_INTERRUPT_DTLB_MISS))
54
55.macro KVM_HANDLER ivor_nr
56_GLOBAL(kvmppc_handler_\ivor_nr)
57 /* Get pointer to vcpu and record exit number. */
58 mtspr SPRN_SPRG0, r4
59 mfspr r4, SPRN_SPRG1
60 stw r5, VCPU_GPR(r5)(r4)
61 stw r6, VCPU_GPR(r6)(r4)
62 mfctr r5
63 lis r6, kvmppc_resume_host@h
64 stw r5, VCPU_CTR(r4)
65 li r5, \ivor_nr
66 ori r6, r6, kvmppc_resume_host@l
67 mtctr r6
68 bctr
69.endm
70
71_GLOBAL(kvmppc_handlers_start)
72KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
73KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
74KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
75KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
76KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
77KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
78KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
79KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
80KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
81KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
82KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
83KVM_HANDLER BOOKE_INTERRUPT_FIT
84KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
85KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
86KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
87KVM_HANDLER BOOKE_INTERRUPT_DEBUG
88
89_GLOBAL(kvmppc_handler_len)
90 .long kvmppc_handler_1 - kvmppc_handler_0
91
92
93/* Registers:
94 * SPRG0: guest r4
95 * r4: vcpu pointer
96 * r5: KVM exit number
97 */
98_GLOBAL(kvmppc_resume_host)
99 stw r3, VCPU_GPR(r3)(r4)
100 mfcr r3
101 stw r3, VCPU_CR(r4)
102 stw r7, VCPU_GPR(r7)(r4)
103 stw r8, VCPU_GPR(r8)(r4)
104 stw r9, VCPU_GPR(r9)(r4)
105
106 li r6, 1
107 slw r6, r6, r5
108
109 /* Save the faulting instruction and all GPRs for emulation. */
110 andi. r7, r6, NEED_INST_MASK
111 beq ..skip_inst_copy
112 mfspr r9, SPRN_SRR0
113 mfmsr r8
114 ori r7, r8, MSR_DS
115 mtmsr r7
116 isync
117 lwz r9, 0(r9)
118 mtmsr r8
119 isync
120 stw r9, VCPU_LAST_INST(r4)
121
122 stw r15, VCPU_GPR(r15)(r4)
123 stw r16, VCPU_GPR(r16)(r4)
124 stw r17, VCPU_GPR(r17)(r4)
125 stw r18, VCPU_GPR(r18)(r4)
126 stw r19, VCPU_GPR(r19)(r4)
127 stw r20, VCPU_GPR(r20)(r4)
128 stw r21, VCPU_GPR(r21)(r4)
129 stw r22, VCPU_GPR(r22)(r4)
130 stw r23, VCPU_GPR(r23)(r4)
131 stw r24, VCPU_GPR(r24)(r4)
132 stw r25, VCPU_GPR(r25)(r4)
133 stw r26, VCPU_GPR(r26)(r4)
134 stw r27, VCPU_GPR(r27)(r4)
135 stw r28, VCPU_GPR(r28)(r4)
136 stw r29, VCPU_GPR(r29)(r4)
137 stw r30, VCPU_GPR(r30)(r4)
138 stw r31, VCPU_GPR(r31)(r4)
139..skip_inst_copy:
140
141 /* Also grab DEAR and ESR before the host can clobber them. */
142
143 andi. r7, r6, NEED_DEAR_MASK
144 beq ..skip_dear
145 mfspr r9, SPRN_DEAR
146 stw r9, VCPU_FAULT_DEAR(r4)
147..skip_dear:
148
149 andi. r7, r6, NEED_ESR_MASK
150 beq ..skip_esr
151 mfspr r9, SPRN_ESR
152 stw r9, VCPU_FAULT_ESR(r4)
153..skip_esr:
154
155 /* Save remaining volatile guest register state to vcpu. */
156 stw r0, VCPU_GPR(r0)(r4)
157 stw r1, VCPU_GPR(r1)(r4)
158 stw r2, VCPU_GPR(r2)(r4)
159 stw r10, VCPU_GPR(r10)(r4)
160 stw r11, VCPU_GPR(r11)(r4)
161 stw r12, VCPU_GPR(r12)(r4)
162 stw r13, VCPU_GPR(r13)(r4)
163 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
164 mflr r3
165 stw r3, VCPU_LR(r4)
166 mfxer r3
167 stw r3, VCPU_XER(r4)
168 mfspr r3, SPRN_SPRG0
169 stw r3, VCPU_GPR(r4)(r4)
170 mfspr r3, SPRN_SRR0
171 stw r3, VCPU_PC(r4)
172
173 /* Restore host stack pointer and PID before IVPR, since the host
174 * exception handlers use them. */
175 lwz r1, VCPU_HOST_STACK(r4)
176 lwz r3, VCPU_HOST_PID(r4)
177 mtspr SPRN_PID, r3
178
179 /* Restore host IVPR before re-enabling interrupts. We cheat and know
180 * that Linux IVPR is always 0xc0000000. */
181 lis r3, 0xc000
182 mtspr SPRN_IVPR, r3
183
184 /* Switch to kernel stack and jump to handler. */
185 LOAD_REG_ADDR(r3, kvmppc_handle_exit)
186 mtctr r3
187 lwz r3, HOST_RUN(r1)
188 lwz r2, HOST_R2(r1)
189 mr r14, r4 /* Save vcpu pointer. */
190
191 bctrl /* kvmppc_handle_exit() */
192
193 /* Restore vcpu pointer and the nonvolatiles we used. */
194 mr r4, r14
195 lwz r14, VCPU_GPR(r14)(r4)
196
197 /* Sometimes instruction emulation must restore complete GPR state. */
198 andi. r5, r3, RESUME_FLAG_NV
199 beq ..skip_nv_load
200 lwz r15, VCPU_GPR(r15)(r4)
201 lwz r16, VCPU_GPR(r16)(r4)
202 lwz r17, VCPU_GPR(r17)(r4)
203 lwz r18, VCPU_GPR(r18)(r4)
204 lwz r19, VCPU_GPR(r19)(r4)
205 lwz r20, VCPU_GPR(r20)(r4)
206 lwz r21, VCPU_GPR(r21)(r4)
207 lwz r22, VCPU_GPR(r22)(r4)
208 lwz r23, VCPU_GPR(r23)(r4)
209 lwz r24, VCPU_GPR(r24)(r4)
210 lwz r25, VCPU_GPR(r25)(r4)
211 lwz r26, VCPU_GPR(r26)(r4)
212 lwz r27, VCPU_GPR(r27)(r4)
213 lwz r28, VCPU_GPR(r28)(r4)
214 lwz r29, VCPU_GPR(r29)(r4)
215 lwz r30, VCPU_GPR(r30)(r4)
216 lwz r31, VCPU_GPR(r31)(r4)
217..skip_nv_load:
218
219 /* Should we return to the guest? */
220 andi. r5, r3, RESUME_FLAG_HOST
221 beq lightweight_exit
222
223 srawi r3, r3, 2 /* Shift -ERR back down. */
224
225heavyweight_exit:
226 /* Not returning to guest. */
227
228 /* We already saved guest volatile register state; now save the
229 * non-volatiles. */
230 stw r15, VCPU_GPR(r15)(r4)
231 stw r16, VCPU_GPR(r16)(r4)
232 stw r17, VCPU_GPR(r17)(r4)
233 stw r18, VCPU_GPR(r18)(r4)
234 stw r19, VCPU_GPR(r19)(r4)
235 stw r20, VCPU_GPR(r20)(r4)
236 stw r21, VCPU_GPR(r21)(r4)
237 stw r22, VCPU_GPR(r22)(r4)
238 stw r23, VCPU_GPR(r23)(r4)
239 stw r24, VCPU_GPR(r24)(r4)
240 stw r25, VCPU_GPR(r25)(r4)
241 stw r26, VCPU_GPR(r26)(r4)
242 stw r27, VCPU_GPR(r27)(r4)
243 stw r28, VCPU_GPR(r28)(r4)
244 stw r29, VCPU_GPR(r29)(r4)
245 stw r30, VCPU_GPR(r30)(r4)
246 stw r31, VCPU_GPR(r31)(r4)
247
248 /* Load host non-volatile register state from host stack. */
249 lwz r14, HOST_NV_GPR(r14)(r1)
250 lwz r15, HOST_NV_GPR(r15)(r1)
251 lwz r16, HOST_NV_GPR(r16)(r1)
252 lwz r17, HOST_NV_GPR(r17)(r1)
253 lwz r18, HOST_NV_GPR(r18)(r1)
254 lwz r19, HOST_NV_GPR(r19)(r1)
255 lwz r20, HOST_NV_GPR(r20)(r1)
256 lwz r21, HOST_NV_GPR(r21)(r1)
257 lwz r22, HOST_NV_GPR(r22)(r1)
258 lwz r23, HOST_NV_GPR(r23)(r1)
259 lwz r24, HOST_NV_GPR(r24)(r1)
260 lwz r25, HOST_NV_GPR(r25)(r1)
261 lwz r26, HOST_NV_GPR(r26)(r1)
262 lwz r27, HOST_NV_GPR(r27)(r1)
263 lwz r28, HOST_NV_GPR(r28)(r1)
264 lwz r29, HOST_NV_GPR(r29)(r1)
265 lwz r30, HOST_NV_GPR(r30)(r1)
266 lwz r31, HOST_NV_GPR(r31)(r1)
267
268 /* Return to kvm_vcpu_run(). */
269 lwz r4, HOST_STACK_LR(r1)
270 addi r1, r1, HOST_STACK_SIZE
271 mtlr r4
272 /* r3 still contains the return code from kvmppc_handle_exit(). */
273 blr
274
275
276/* Registers:
277 * r3: kvm_run pointer
278 * r4: vcpu pointer
279 */
280_GLOBAL(__kvmppc_vcpu_run)
281 stwu r1, -HOST_STACK_SIZE(r1)
282 stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
283
284 /* Save host state to stack. */
285 stw r3, HOST_RUN(r1)
286 mflr r3
287 stw r3, HOST_STACK_LR(r1)
288
289 /* Save host non-volatile register state to stack. */
290 stw r14, HOST_NV_GPR(r14)(r1)
291 stw r15, HOST_NV_GPR(r15)(r1)
292 stw r16, HOST_NV_GPR(r16)(r1)
293 stw r17, HOST_NV_GPR(r17)(r1)
294 stw r18, HOST_NV_GPR(r18)(r1)
295 stw r19, HOST_NV_GPR(r19)(r1)
296 stw r20, HOST_NV_GPR(r20)(r1)
297 stw r21, HOST_NV_GPR(r21)(r1)
298 stw r22, HOST_NV_GPR(r22)(r1)
299 stw r23, HOST_NV_GPR(r23)(r1)
300 stw r24, HOST_NV_GPR(r24)(r1)
301 stw r25, HOST_NV_GPR(r25)(r1)
302 stw r26, HOST_NV_GPR(r26)(r1)
303 stw r27, HOST_NV_GPR(r27)(r1)
304 stw r28, HOST_NV_GPR(r28)(r1)
305 stw r29, HOST_NV_GPR(r29)(r1)
306 stw r30, HOST_NV_GPR(r30)(r1)
307 stw r31, HOST_NV_GPR(r31)(r1)
308
309 /* Load guest non-volatiles. */
310 lwz r14, VCPU_GPR(r14)(r4)
311 lwz r15, VCPU_GPR(r15)(r4)
312 lwz r16, VCPU_GPR(r16)(r4)
313 lwz r17, VCPU_GPR(r17)(r4)
314 lwz r18, VCPU_GPR(r18)(r4)
315 lwz r19, VCPU_GPR(r19)(r4)
316 lwz r20, VCPU_GPR(r20)(r4)
317 lwz r21, VCPU_GPR(r21)(r4)
318 lwz r22, VCPU_GPR(r22)(r4)
319 lwz r23, VCPU_GPR(r23)(r4)
320 lwz r24, VCPU_GPR(r24)(r4)
321 lwz r25, VCPU_GPR(r25)(r4)
322 lwz r26, VCPU_GPR(r26)(r4)
323 lwz r27, VCPU_GPR(r27)(r4)
324 lwz r28, VCPU_GPR(r28)(r4)
325 lwz r29, VCPU_GPR(r29)(r4)
326 lwz r30, VCPU_GPR(r30)(r4)
327 lwz r31, VCPU_GPR(r31)(r4)
328
329lightweight_exit:
330 stw r2, HOST_R2(r1)
331
332 mfspr r3, SPRN_PID
333 stw r3, VCPU_HOST_PID(r4)
334 lwz r3, VCPU_PID(r4)
335 mtspr SPRN_PID, r3
336
337 /* Prevent all TLB updates. */
338 mfmsr r5
339 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
340 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
341 andc r6, r5, r6
342 mtmsr r6
343
344 /* Save the host's non-pinned TLB mappings, and load the guest mappings
345 * over them. Leave the host's "pinned" kernel mappings in place. */
346 /* XXX optimization: use generation count to avoid swapping unmodified
347 * entries. */
348 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
349 lis r8, tlb_44x_hwater@ha
350 lwz r8, tlb_44x_hwater@l(r8)
351 addi r3, r4, VCPU_HOST_TLB - 4
352 addi r9, r4, VCPU_SHADOW_TLB - 4
353 li r6, 0
3541:
355 /* Save host entry. */
356 tlbre r7, r6, PPC44x_TLB_PAGEID
357 mfspr r5, SPRN_MMUCR
358 stwu r5, 4(r3)
359 stwu r7, 4(r3)
360 tlbre r7, r6, PPC44x_TLB_XLAT
361 stwu r7, 4(r3)
362 tlbre r7, r6, PPC44x_TLB_ATTRIB
363 stwu r7, 4(r3)
364 /* Load guest entry. */
365 lwzu r7, 4(r9)
366 mtspr SPRN_MMUCR, r7
367 lwzu r7, 4(r9)
368 tlbwe r7, r6, PPC44x_TLB_PAGEID
369 lwzu r7, 4(r9)
370 tlbwe r7, r6, PPC44x_TLB_XLAT
371 lwzu r7, 4(r9)
372 tlbwe r7, r6, PPC44x_TLB_ATTRIB
373 /* Increment index. */
374 addi r6, r6, 1
375 cmpw r6, r8
376 blt 1b
377 mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
378
379 iccci 0, 0 /* XXX hack */
380
381 /* Load some guest volatiles. */
382 lwz r0, VCPU_GPR(r0)(r4)
383 lwz r2, VCPU_GPR(r2)(r4)
384 lwz r9, VCPU_GPR(r9)(r4)
385 lwz r10, VCPU_GPR(r10)(r4)
386 lwz r11, VCPU_GPR(r11)(r4)
387 lwz r12, VCPU_GPR(r12)(r4)
388 lwz r13, VCPU_GPR(r13)(r4)
389 lwz r3, VCPU_LR(r4)
390 mtlr r3
391 lwz r3, VCPU_XER(r4)
392 mtxer r3
393
394 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
395 * so how do we make sure vcpu won't fault? */
396 lis r8, kvmppc_booke_handlers@ha
397 lwz r8, kvmppc_booke_handlers@l(r8)
398 mtspr SPRN_IVPR, r8
399
400 /* Save vcpu pointer for the exception handlers. */
401 mtspr SPRN_SPRG1, r4
402
403 /* Can't switch the stack pointer until after IVPR is switched,
404 * because host interrupt handlers would get confused. */
405 lwz r1, VCPU_GPR(r1)(r4)
406
407 /* XXX handle USPRG0 */
408 /* Host interrupt handlers may have clobbered these guest-readable
409 * SPRGs, so we need to reload them here with the guest's values. */
410 lwz r3, VCPU_SPRG4(r4)
411 mtspr SPRN_SPRG4, r3
412 lwz r3, VCPU_SPRG5(r4)
413 mtspr SPRN_SPRG5, r3
414 lwz r3, VCPU_SPRG6(r4)
415 mtspr SPRN_SPRG6, r3
416 lwz r3, VCPU_SPRG7(r4)
417 mtspr SPRN_SPRG7, r3
418
419 /* Finish loading guest volatiles and jump to guest. */
420 lwz r3, VCPU_CTR(r4)
421 mtctr r3
422 lwz r3, VCPU_CR(r4)
423 mtcr r3
424 lwz r5, VCPU_GPR(r5)(r4)
425 lwz r6, VCPU_GPR(r6)(r4)
426 lwz r7, VCPU_GPR(r7)(r4)
427 lwz r8, VCPU_GPR(r8)(r4)
428 lwz r3, VCPU_PC(r4)
429 mtsrr0 r3
430 lwz r3, VCPU_MSR(r4)
431 oris r3, r3, KVMPPC_MSR_MASK@h
432 ori r3, r3, KVMPPC_MSR_MASK@l
433 mtsrr1 r3
434 lwz r3, VCPU_GPR(r3)(r4)
435 lwz r4, VCPU_GPR(r4)(r4)
436 rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
new file mode 100644
index 000000000000..a03fe0c80698
--- /dev/null
+++ b/arch/powerpc/kvm/emulate.c
@@ -0,0 +1,760 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/jiffies.h>
21#include <linux/timer.h>
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/kvm_host.h>
25
26#include <asm/dcr.h>
27#include <asm/dcr-regs.h>
28#include <asm/time.h>
29#include <asm/byteorder.h>
30#include <asm/kvm_ppc.h>
31
32#include "44x_tlb.h"
33
34/* Instruction decoding */
35static inline unsigned int get_op(u32 inst)
36{
37 return inst >> 26;
38}
39
40static inline unsigned int get_xop(u32 inst)
41{
42 return (inst >> 1) & 0x3ff;
43}
44
45static inline unsigned int get_sprn(u32 inst)
46{
47 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
48}
49
50static inline unsigned int get_dcrn(u32 inst)
51{
52 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
53}
54
55static inline unsigned int get_rt(u32 inst)
56{
57 return (inst >> 21) & 0x1f;
58}
59
60static inline unsigned int get_rs(u32 inst)
61{
62 return (inst >> 21) & 0x1f;
63}
64
65static inline unsigned int get_ra(u32 inst)
66{
67 return (inst >> 16) & 0x1f;
68}
69
70static inline unsigned int get_rb(u32 inst)
71{
72 return (inst >> 11) & 0x1f;
73}
74
75static inline unsigned int get_rc(u32 inst)
76{
77 return inst & 0x1;
78}
79
80static inline unsigned int get_ws(u32 inst)
81{
82 return (inst >> 11) & 0x1f;
83}
84
85static inline unsigned int get_d(u32 inst)
86{
87 return inst & 0xffff;
88}
89
90static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
91 const struct tlbe *tlbe)
92{
93 gpa_t gpa;
94
95 if (!get_tlb_v(tlbe))
96 return 0;
97
98 /* Does it match current guest AS? */
99 /* XXX what about IS != DS? */
100 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
101 return 0;
102
103 gpa = get_tlb_raddr(tlbe);
104 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
105 /* Mapping is not for RAM. */
106 return 0;
107
108 return 1;
109}
110
111static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
112{
113 u64 eaddr;
114 u64 raddr;
115 u64 asid;
116 u32 flags;
117 struct tlbe *tlbe;
118 unsigned int ra;
119 unsigned int rs;
120 unsigned int ws;
121 unsigned int index;
122
123 ra = get_ra(inst);
124 rs = get_rs(inst);
125 ws = get_ws(inst);
126
127 index = vcpu->arch.gpr[ra];
128 if (index > PPC44x_TLB_SIZE) {
129 printk("%s: index %d\n", __func__, index);
130 kvmppc_dump_vcpu(vcpu);
131 return EMULATE_FAIL;
132 }
133
134 tlbe = &vcpu->arch.guest_tlb[index];
135
136 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
137 if (tlbe->word0 & PPC44x_TLB_VALID) {
138 eaddr = get_tlb_eaddr(tlbe);
139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
140 kvmppc_mmu_invalidate(vcpu, eaddr, asid);
141 }
142
143 switch (ws) {
144 case PPC44x_TLB_PAGEID:
145 tlbe->tid = vcpu->arch.mmucr & 0xff;
146 tlbe->word0 = vcpu->arch.gpr[rs];
147 break;
148
149 case PPC44x_TLB_XLAT:
150 tlbe->word1 = vcpu->arch.gpr[rs];
151 break;
152
153 case PPC44x_TLB_ATTRIB:
154 tlbe->word2 = vcpu->arch.gpr[rs];
155 break;
156
157 default:
158 return EMULATE_FAIL;
159 }
160
161 if (tlbe_is_host_safe(vcpu, tlbe)) {
162 eaddr = get_tlb_eaddr(tlbe);
163 raddr = get_tlb_raddr(tlbe);
164 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
165 flags = tlbe->word2 & 0xffff;
166
167 /* Create a 4KB mapping on the host. If the guest wanted a
168 * large page, only the first 4KB is mapped here and the rest
169 * are mapped on the fly. */
170 kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
171 }
172
173 return EMULATE_DONE;
174}
175
176static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
177{
178 if (vcpu->arch.tcr & TCR_DIE) {
179 /* The decrementer ticks at the same rate as the timebase, so
180 * that's how we convert the guest DEC value to the number of
181 * host ticks. */
182 unsigned long nr_jiffies;
183
184 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
185 mod_timer(&vcpu->arch.dec_timer,
186 get_jiffies_64() + nr_jiffies);
187 } else {
188 del_timer(&vcpu->arch.dec_timer);
189 }
190}
191
192static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
193{
194 vcpu->arch.pc = vcpu->arch.srr0;
195 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
196}
197
198/* XXX to do:
199 * lhax
200 * lhaux
201 * lswx
202 * lswi
203 * stswx
204 * stswi
205 * lha
206 * lhau
207 * lmw
208 * stmw
209 *
210 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
211 */
212int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
213{
214 u32 inst = vcpu->arch.last_inst;
215 u32 ea;
216 int ra;
217 int rb;
218 int rc;
219 int rs;
220 int rt;
221 int sprn;
222 int dcrn;
223 enum emulation_result emulated = EMULATE_DONE;
224 int advance = 1;
225
226 switch (get_op(inst)) {
227 case 3: /* trap */
228 printk("trap!\n");
229 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
230 advance = 0;
231 break;
232
233 case 19:
234 switch (get_xop(inst)) {
235 case 50: /* rfi */
236 kvmppc_emul_rfi(vcpu);
237 advance = 0;
238 break;
239
240 default:
241 emulated = EMULATE_FAIL;
242 break;
243 }
244 break;
245
246 case 31:
247 switch (get_xop(inst)) {
248
249 case 83: /* mfmsr */
250 rt = get_rt(inst);
251 vcpu->arch.gpr[rt] = vcpu->arch.msr;
252 break;
253
254 case 87: /* lbzx */
255 rt = get_rt(inst);
256 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
257 break;
258
259 case 131: /* wrtee */
260 rs = get_rs(inst);
261 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
262 | (vcpu->arch.gpr[rs] & MSR_EE);
263 break;
264
265 case 146: /* mtmsr */
266 rs = get_rs(inst);
267 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
268 break;
269
270 case 163: /* wrteei */
271 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
272 | (inst & MSR_EE);
273 break;
274
275 case 215: /* stbx */
276 rs = get_rs(inst);
277 emulated = kvmppc_handle_store(run, vcpu,
278 vcpu->arch.gpr[rs],
279 1, 1);
280 break;
281
282 case 247: /* stbux */
283 rs = get_rs(inst);
284 ra = get_ra(inst);
285 rb = get_rb(inst);
286
287 ea = vcpu->arch.gpr[rb];
288 if (ra)
289 ea += vcpu->arch.gpr[ra];
290
291 emulated = kvmppc_handle_store(run, vcpu,
292 vcpu->arch.gpr[rs],
293 1, 1);
294 vcpu->arch.gpr[rs] = ea;
295 break;
296
297 case 279: /* lhzx */
298 rt = get_rt(inst);
299 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
300 break;
301
302 case 311: /* lhzux */
303 rt = get_rt(inst);
304 ra = get_ra(inst);
305 rb = get_rb(inst);
306
307 ea = vcpu->arch.gpr[rb];
308 if (ra)
309 ea += vcpu->arch.gpr[ra];
310
311 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
312 vcpu->arch.gpr[ra] = ea;
313 break;
314
315 case 323: /* mfdcr */
316 dcrn = get_dcrn(inst);
317 rt = get_rt(inst);
318
319 /* The guest may access CPR0 registers to determine the timebase
320 * frequency, and it must know the real host frequency because it
321 * can directly access the timebase registers.
322 *
323 * It would be possible to emulate those accesses in userspace,
324 * but userspace can really only figure out the end frequency.
325 * We could decompose that into the factors that compute it, but
326 * that's tricky math, and it's easier to just report the real
327 * CPR0 values.
328 */
329 switch (dcrn) {
330 case DCRN_CPR0_CONFIG_ADDR:
331 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
332 break;
333 case DCRN_CPR0_CONFIG_DATA:
334 local_irq_disable();
335 mtdcr(DCRN_CPR0_CONFIG_ADDR,
336 vcpu->arch.cpr0_cfgaddr);
337 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
338 local_irq_enable();
339 break;
340 default:
341 run->dcr.dcrn = dcrn;
342 run->dcr.data = 0;
343 run->dcr.is_write = 0;
344 vcpu->arch.io_gpr = rt;
345 vcpu->arch.dcr_needed = 1;
346 emulated = EMULATE_DO_DCR;
347 }
348
349 break;
350
351 case 339: /* mfspr */
352 sprn = get_sprn(inst);
353 rt = get_rt(inst);
354
355 switch (sprn) {
356 case SPRN_SRR0:
357 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
358 case SPRN_SRR1:
359 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
360 case SPRN_MMUCR:
361 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
362 case SPRN_PID:
363 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
364 case SPRN_IVPR:
365 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
366 case SPRN_CCR0:
367 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
368 case SPRN_CCR1:
369 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
370 case SPRN_PVR:
371 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
372 case SPRN_DEAR:
373 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
374 case SPRN_ESR:
375 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
376 case SPRN_DBCR0:
377 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
378 case SPRN_DBCR1:
379 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
380
381 /* Note: mftb and TBRL/TBWL are user-accessible, so
382 * the guest can always access the real TB anyways.
383 * In fact, we probably will never see these traps. */
384 case SPRN_TBWL:
385 vcpu->arch.gpr[rt] = mftbl(); break;
386 case SPRN_TBWU:
387 vcpu->arch.gpr[rt] = mftbu(); break;
388
389 case SPRN_SPRG0:
390 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
391 case SPRN_SPRG1:
392 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
393 case SPRN_SPRG2:
394 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
395 case SPRN_SPRG3:
396 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
397 /* Note: SPRG4-7 are user-readable, so we don't get
398 * a trap. */
399
400 case SPRN_IVOR0:
401 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
402 case SPRN_IVOR1:
403 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
404 case SPRN_IVOR2:
405 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
406 case SPRN_IVOR3:
407 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
408 case SPRN_IVOR4:
409 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
410 case SPRN_IVOR5:
411 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
412 case SPRN_IVOR6:
413 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
414 case SPRN_IVOR7:
415 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
416 case SPRN_IVOR8:
417 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
418 case SPRN_IVOR9:
419 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
420 case SPRN_IVOR10:
421 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
422 case SPRN_IVOR11:
423 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
424 case SPRN_IVOR12:
425 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
426 case SPRN_IVOR13:
427 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
428 case SPRN_IVOR14:
429 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
430 case SPRN_IVOR15:
431 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
432
433 default:
434 printk("mfspr: unknown spr %x\n", sprn);
435 vcpu->arch.gpr[rt] = 0;
436 break;
437 }
438 break;
439
440 case 407: /* sthx */
441 rs = get_rs(inst);
442 ra = get_ra(inst);
443 rb = get_rb(inst);
444
445 emulated = kvmppc_handle_store(run, vcpu,
446 vcpu->arch.gpr[rs],
447 2, 1);
448 break;
449
450 case 439: /* sthux */
451 rs = get_rs(inst);
452 ra = get_ra(inst);
453 rb = get_rb(inst);
454
455 ea = vcpu->arch.gpr[rb];
456 if (ra)
457 ea += vcpu->arch.gpr[ra];
458
459 emulated = kvmppc_handle_store(run, vcpu,
460 vcpu->arch.gpr[rs],
461 2, 1);
462 vcpu->arch.gpr[ra] = ea;
463 break;
464
465 case 451: /* mtdcr */
466 dcrn = get_dcrn(inst);
467 rs = get_rs(inst);
468
469 /* emulate some access in kernel */
470 switch (dcrn) {
471 case DCRN_CPR0_CONFIG_ADDR:
472 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
473 break;
474 default:
475 run->dcr.dcrn = dcrn;
476 run->dcr.data = vcpu->arch.gpr[rs];
477 run->dcr.is_write = 1;
478 vcpu->arch.dcr_needed = 1;
479 emulated = EMULATE_DO_DCR;
480 }
481
482 break;
483
484 case 467: /* mtspr */
485 sprn = get_sprn(inst);
486 rs = get_rs(inst);
487 switch (sprn) {
488 case SPRN_SRR0:
489 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
490 case SPRN_SRR1:
491 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
492 case SPRN_MMUCR:
493 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
494 case SPRN_PID:
495 vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
496 case SPRN_CCR0:
497 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
498 case SPRN_CCR1:
499 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
500 case SPRN_DEAR:
501 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
502 case SPRN_ESR:
503 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
504 case SPRN_DBCR0:
505 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
506 case SPRN_DBCR1:
507 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
508
509 /* XXX We need to context-switch the timebase for
510 * watchdog and FIT. */
511 case SPRN_TBWL: break;
512 case SPRN_TBWU: break;
513
514 case SPRN_DEC:
515 vcpu->arch.dec = vcpu->arch.gpr[rs];
516 kvmppc_emulate_dec(vcpu);
517 break;
518
519 case SPRN_TSR:
520 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
521
522 case SPRN_TCR:
523 vcpu->arch.tcr = vcpu->arch.gpr[rs];
524 kvmppc_emulate_dec(vcpu);
525 break;
526
527 case SPRN_SPRG0:
528 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
529 case SPRN_SPRG1:
530 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
531 case SPRN_SPRG2:
532 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
533 case SPRN_SPRG3:
534 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
535
536 /* Note: SPRG4-7 are user-readable. These values are
537 * loaded into the real SPRGs when resuming the
538 * guest. */
539 case SPRN_SPRG4:
540 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
541 case SPRN_SPRG5:
542 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
543 case SPRN_SPRG6:
544 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
545 case SPRN_SPRG7:
546 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
547
548 case SPRN_IVPR:
549 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
550 case SPRN_IVOR0:
551 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
552 case SPRN_IVOR1:
553 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
554 case SPRN_IVOR2:
555 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
556 case SPRN_IVOR3:
557 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
558 case SPRN_IVOR4:
559 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
560 case SPRN_IVOR5:
561 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
562 case SPRN_IVOR6:
563 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
564 case SPRN_IVOR7:
565 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
566 case SPRN_IVOR8:
567 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
568 case SPRN_IVOR9:
569 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
570 case SPRN_IVOR10:
571 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
572 case SPRN_IVOR11:
573 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
574 case SPRN_IVOR12:
575 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
576 case SPRN_IVOR13:
577 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
578 case SPRN_IVOR14:
579 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
580 case SPRN_IVOR15:
581 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
582
583 default:
584 printk("mtspr: unknown spr %x\n", sprn);
585 emulated = EMULATE_FAIL;
586 break;
587 }
588 break;
589
590 case 470: /* dcbi */
591 /* Do nothing. The guest is performing dcbi because
592 * hardware DMA is not snooped by the dcache, but
593 * emulated DMA either goes through the dcache as
594 * normal writes, or the host kernel has handled dcache
595 * coherence. */
596 break;
597
598 case 534: /* lwbrx */
599 rt = get_rt(inst);
600 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
601 break;
602
603 case 566: /* tlbsync */
604 break;
605
606 case 662: /* stwbrx */
607 rs = get_rs(inst);
608 ra = get_ra(inst);
609 rb = get_rb(inst);
610
611 emulated = kvmppc_handle_store(run, vcpu,
612 vcpu->arch.gpr[rs],
613 4, 0);
614 break;
615
616 case 978: /* tlbwe */
617 emulated = kvmppc_emul_tlbwe(vcpu, inst);
618 break;
619
620 case 914: { /* tlbsx */
621 int index;
622 unsigned int as = get_mmucr_sts(vcpu);
623 unsigned int pid = get_mmucr_stid(vcpu);
624
625 rt = get_rt(inst);
626 ra = get_ra(inst);
627 rb = get_rb(inst);
628 rc = get_rc(inst);
629
630 ea = vcpu->arch.gpr[rb];
631 if (ra)
632 ea += vcpu->arch.gpr[ra];
633
634 index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
635 if (rc) {
636 if (index < 0)
637 vcpu->arch.cr &= ~0x20000000;
638 else
639 vcpu->arch.cr |= 0x20000000;
640 }
641 vcpu->arch.gpr[rt] = index;
642
643 }
644 break;
645
646 case 790: /* lhbrx */
647 rt = get_rt(inst);
648 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
649 break;
650
651 case 918: /* sthbrx */
652 rs = get_rs(inst);
653 ra = get_ra(inst);
654 rb = get_rb(inst);
655
656 emulated = kvmppc_handle_store(run, vcpu,
657 vcpu->arch.gpr[rs],
658 2, 0);
659 break;
660
661 case 966: /* iccci */
662 break;
663
664 default:
665 printk("unknown: op %d xop %d\n", get_op(inst),
666 get_xop(inst));
667 emulated = EMULATE_FAIL;
668 break;
669 }
670 break;
671
672 case 32: /* lwz */
673 rt = get_rt(inst);
674 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
675 break;
676
677 case 33: /* lwzu */
678 ra = get_ra(inst);
679 rt = get_rt(inst);
680 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
681 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
682 break;
683
684 case 34: /* lbz */
685 rt = get_rt(inst);
686 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
687 break;
688
689 case 35: /* lbzu */
690 ra = get_ra(inst);
691 rt = get_rt(inst);
692 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
693 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
694 break;
695
696 case 36: /* stw */
697 rs = get_rs(inst);
698 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
699 4, 1);
700 break;
701
702 case 37: /* stwu */
703 ra = get_ra(inst);
704 rs = get_rs(inst);
705 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
706 4, 1);
707 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
708 break;
709
710 case 38: /* stb */
711 rs = get_rs(inst);
712 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
713 1, 1);
714 break;
715
716 case 39: /* stbu */
717 ra = get_ra(inst);
718 rs = get_rs(inst);
719 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
720 1, 1);
721 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
722 break;
723
724 case 40: /* lhz */
725 rt = get_rt(inst);
726 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
727 break;
728
729 case 41: /* lhzu */
730 ra = get_ra(inst);
731 rt = get_rt(inst);
732 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
733 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
734 break;
735
736 case 44: /* sth */
737 rs = get_rs(inst);
738 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
739 2, 1);
740 break;
741
742 case 45: /* sthu */
743 ra = get_ra(inst);
744 rs = get_rs(inst);
745 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
746 2, 1);
747 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
748 break;
749
750 default:
751 printk("unknown op %d\n", get_op(inst));
752 emulated = EMULATE_FAIL;
753 break;
754 }
755
756 if (advance)
757 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
758
759 return emulated;
760}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
new file mode 100644
index 000000000000..bad40bd2d3ac
--- /dev/null
+++ b/arch/powerpc/kvm/powerpc.c
@@ -0,0 +1,436 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
26#include <linux/fs.h>
27#include <asm/cputable.h>
28#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h>
30
31
32gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
33{
34 return gfn;
35}
36
37int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
38{
39 /* XXX implement me */
40 return 0;
41}
42
43int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
44{
45 return 1;
46}
47
48
49int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
50{
51 enum emulation_result er;
52 int r;
53
54 er = kvmppc_emulate_instruction(run, vcpu);
55 switch (er) {
56 case EMULATE_DONE:
57 /* Future optimization: only reload non-volatiles if they were
58 * actually modified. */
59 r = RESUME_GUEST_NV;
60 break;
61 case EMULATE_DO_MMIO:
62 run->exit_reason = KVM_EXIT_MMIO;
63 /* We must reload nonvolatiles because "update" load/store
64 * instructions modify register state. */
65 /* Future optimization: only reload non-volatiles if they were
66 * actually modified. */
67 r = RESUME_HOST_NV;
68 break;
69 case EMULATE_FAIL:
70 /* XXX Deliver Program interrupt to guest. */
71 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
72 vcpu->arch.last_inst);
73 r = RESUME_HOST;
74 break;
75 default:
76 BUG();
77 }
78
79 return r;
80}
81
82void kvm_arch_hardware_enable(void *garbage)
83{
84}
85
86void kvm_arch_hardware_disable(void *garbage)
87{
88}
89
90int kvm_arch_hardware_setup(void)
91{
92 return 0;
93}
94
95void kvm_arch_hardware_unsetup(void)
96{
97}
98
99void kvm_arch_check_processor_compat(void *rtn)
100{
101 int r;
102
103 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
104 r = 0;
105 else
106 r = -ENOTSUPP;
107
108 *(int *)rtn = r;
109}
110
111struct kvm *kvm_arch_create_vm(void)
112{
113 struct kvm *kvm;
114
115 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
116 if (!kvm)
117 return ERR_PTR(-ENOMEM);
118
119 return kvm;
120}
121
122static void kvmppc_free_vcpus(struct kvm *kvm)
123{
124 unsigned int i;
125
126 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
127 if (kvm->vcpus[i]) {
128 kvm_arch_vcpu_free(kvm->vcpus[i]);
129 kvm->vcpus[i] = NULL;
130 }
131 }
132}
133
134void kvm_arch_destroy_vm(struct kvm *kvm)
135{
136 kvmppc_free_vcpus(kvm);
137 kvm_free_physmem(kvm);
138 kfree(kvm);
139}
140
141int kvm_dev_ioctl_check_extension(long ext)
142{
143 int r;
144
145 switch (ext) {
146 case KVM_CAP_USER_MEMORY:
147 r = 1;
148 break;
149 default:
150 r = 0;
151 break;
152 }
153 return r;
154
155}
156
157long kvm_arch_dev_ioctl(struct file *filp,
158 unsigned int ioctl, unsigned long arg)
159{
160 return -EINVAL;
161}
162
163int kvm_arch_set_memory_region(struct kvm *kvm,
164 struct kvm_userspace_memory_region *mem,
165 struct kvm_memory_slot old,
166 int user_alloc)
167{
168 return 0;
169}
170
171struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
172{
173 struct kvm_vcpu *vcpu;
174 int err;
175
176 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
177 if (!vcpu) {
178 err = -ENOMEM;
179 goto out;
180 }
181
182 err = kvm_vcpu_init(vcpu, kvm, id);
183 if (err)
184 goto free_vcpu;
185
186 return vcpu;
187
188free_vcpu:
189 kmem_cache_free(kvm_vcpu_cache, vcpu);
190out:
191 return ERR_PTR(err);
192}
193
194void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
195{
196 kvm_vcpu_uninit(vcpu);
197 kmem_cache_free(kvm_vcpu_cache, vcpu);
198}
199
200void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
201{
202 kvm_arch_vcpu_free(vcpu);
203}
204
205int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
206{
207 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
208
209 return test_bit(priority, &vcpu->arch.pending_exceptions);
210}
211
212static void kvmppc_decrementer_func(unsigned long data)
213{
214 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
215
216 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
217}
218
219int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
220{
221 setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
222 (unsigned long)vcpu);
223
224 return 0;
225}
226
227void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
228{
229}
230
231void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
232{
233}
234
235void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
236{
237}
238
239void decache_vcpus_on_cpu(int cpu)
240{
241}
242
243int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
244 struct kvm_debug_guest *dbg)
245{
246 return -ENOTSUPP;
247}
248
249static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
250 struct kvm_run *run)
251{
252 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
253 *gpr = run->dcr.data;
254}
255
256static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
257 struct kvm_run *run)
258{
259 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
260
261 if (run->mmio.len > sizeof(*gpr)) {
262 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
263 return;
264 }
265
266 if (vcpu->arch.mmio_is_bigendian) {
267 switch (run->mmio.len) {
268 case 4: *gpr = *(u32 *)run->mmio.data; break;
269 case 2: *gpr = *(u16 *)run->mmio.data; break;
270 case 1: *gpr = *(u8 *)run->mmio.data; break;
271 }
272 } else {
273 /* Convert BE data from userland back to LE. */
274 switch (run->mmio.len) {
275 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
276 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
277 case 1: *gpr = *(u8 *)run->mmio.data; break;
278 }
279 }
280}
281
282int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
283 unsigned int rt, unsigned int bytes, int is_bigendian)
284{
285 if (bytes > sizeof(run->mmio.data)) {
286 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
287 run->mmio.len);
288 }
289
290 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
291 run->mmio.len = bytes;
292 run->mmio.is_write = 0;
293
294 vcpu->arch.io_gpr = rt;
295 vcpu->arch.mmio_is_bigendian = is_bigendian;
296 vcpu->mmio_needed = 1;
297 vcpu->mmio_is_write = 0;
298
299 return EMULATE_DO_MMIO;
300}
301
302int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
303 u32 val, unsigned int bytes, int is_bigendian)
304{
305 void *data = run->mmio.data;
306
307 if (bytes > sizeof(run->mmio.data)) {
308 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
309 run->mmio.len);
310 }
311
312 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
313 run->mmio.len = bytes;
314 run->mmio.is_write = 1;
315 vcpu->mmio_needed = 1;
316 vcpu->mmio_is_write = 1;
317
318 /* Store the value at the lowest bytes in 'data'. */
319 if (is_bigendian) {
320 switch (bytes) {
321 case 4: *(u32 *)data = val; break;
322 case 2: *(u16 *)data = val; break;
323 case 1: *(u8 *)data = val; break;
324 }
325 } else {
326 /* Store LE value into 'data'. */
327 switch (bytes) {
328 case 4: st_le32(data, val); break;
329 case 2: st_le16(data, val); break;
330 case 1: *(u8 *)data = val; break;
331 }
332 }
333
334 return EMULATE_DO_MMIO;
335}
336
337int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
338{
339 int r;
340 sigset_t sigsaved;
341
342 if (vcpu->sigset_active)
343 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
344
345 if (vcpu->mmio_needed) {
346 if (!vcpu->mmio_is_write)
347 kvmppc_complete_mmio_load(vcpu, run);
348 vcpu->mmio_needed = 0;
349 } else if (vcpu->arch.dcr_needed) {
350 if (!vcpu->arch.dcr_is_write)
351 kvmppc_complete_dcr_load(vcpu, run);
352 vcpu->arch.dcr_needed = 0;
353 }
354
355 kvmppc_check_and_deliver_interrupts(vcpu);
356
357 local_irq_disable();
358 kvm_guest_enter();
359 r = __kvmppc_vcpu_run(run, vcpu);
360 kvm_guest_exit();
361 local_irq_enable();
362
363 if (vcpu->sigset_active)
364 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
365
366 return r;
367}
368
369int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
370{
371 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
372 return 0;
373}
374
375int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
376 struct kvm_mp_state *mp_state)
377{
378 return -EINVAL;
379}
380
381int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
382 struct kvm_mp_state *mp_state)
383{
384 return -EINVAL;
385}
386
387long kvm_arch_vcpu_ioctl(struct file *filp,
388 unsigned int ioctl, unsigned long arg)
389{
390 struct kvm_vcpu *vcpu = filp->private_data;
391 void __user *argp = (void __user *)arg;
392 long r;
393
394 switch (ioctl) {
395 case KVM_INTERRUPT: {
396 struct kvm_interrupt irq;
397 r = -EFAULT;
398 if (copy_from_user(&irq, argp, sizeof(irq)))
399 goto out;
400 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
401 break;
402 }
403 default:
404 r = -EINVAL;
405 }
406
407out:
408 return r;
409}
410
411int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
412{
413 return -ENOTSUPP;
414}
415
416long kvm_arch_vm_ioctl(struct file *filp,
417 unsigned int ioctl, unsigned long arg)
418{
419 long r;
420
421 switch (ioctl) {
422 default:
423 r = -EINVAL;
424 }
425
426 return r;
427}
428
429int kvm_arch_init(void *opaque)
430{
431 return 0;
432}
433
434void kvm_arch_exit(void)
435{
436}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a68e178fc5..8f5f02160ffc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,10 @@ config GENERIC_LOCKBREAK
62 default y 62 default y
63 depends on SMP && PREEMPT 63 depends on SMP && PREEMPT
64 64
65config PGSTE
66 bool
67 default y if KVM
68
65mainmenu "Linux Kernel Configuration" 69mainmenu "Linux Kernel Configuration"
66 70
67config S390 71config S390
@@ -69,6 +73,7 @@ config S390
69 select HAVE_OPROFILE 73 select HAVE_OPROFILE
70 select HAVE_KPROBES 74 select HAVE_KPROBES
71 select HAVE_KRETPROBES 75 select HAVE_KRETPROBES
76 select HAVE_KVM if 64BIT
72 77
73source "init/Kconfig" 78source "init/Kconfig"
74 79
@@ -515,6 +520,13 @@ config ZFCPDUMP
515 Select this option if you want to build an zfcpdump enabled kernel. 520 Select this option if you want to build an zfcpdump enabled kernel.
516 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. 521 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
517 522
523config S390_GUEST
524bool "s390 guest support (EXPERIMENTAL)"
525 depends on 64BIT && EXPERIMENTAL
526 select VIRTIO
527 select VIRTIO_RING
528 help
529 Select this option if you want to run the kernel under s390 linux
518endmenu 530endmenu
519 531
520source "net/Kconfig" 532source "net/Kconfig"
@@ -536,3 +548,5 @@ source "security/Kconfig"
536source "crypto/Kconfig" 548source "crypto/Kconfig"
537 549
538source "lib/Kconfig" 550source "lib/Kconfig"
551
552source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index f708be367b03..792a4e7743ce 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -87,7 +87,7 @@ LDFLAGS_vmlinux := -e start
87head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o 87head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
88 88
89core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ 89core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
90 arch/s390/appldata/ arch/s390/hypfs/ 90 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
91libs-y += arch/s390/lib/ 91libs-y += arch/s390/lib/
92drivers-y += drivers/s390/ 92drivers-y += drivers/s390/
93drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ 93drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 540a67f979b6..68ec4083bf73 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -144,6 +144,10 @@ static noinline __init void detect_machine_type(void)
144 /* Running on a P/390 ? */ 144 /* Running on a P/390 ? */
145 if (cpuinfo->cpu_id.machine == 0x7490) 145 if (cpuinfo->cpu_id.machine == 0x7490)
146 machine_flags |= 4; 146 machine_flags |= 4;
147
148 /* Running under KVM ? */
149 if (cpuinfo->cpu_id.version == 0xfe)
150 machine_flags |= 64;
147} 151}
148 152
149#ifdef CONFIG_64BIT 153#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7141147e6b63..a9d18aafa5f4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -316,7 +316,11 @@ static int __init early_parse_ipldelay(char *p)
316early_param("ipldelay", early_parse_ipldelay); 316early_param("ipldelay", early_parse_ipldelay);
317 317
318#ifdef CONFIG_S390_SWITCH_AMODE 318#ifdef CONFIG_S390_SWITCH_AMODE
319#ifdef CONFIG_PGSTE
320unsigned int switch_amode = 1;
321#else
319unsigned int switch_amode = 0; 322unsigned int switch_amode = 0;
323#endif
320EXPORT_SYMBOL_GPL(switch_amode); 324EXPORT_SYMBOL_GPL(switch_amode);
321 325
322static void set_amode_and_uaccess(unsigned long user_amode, 326static void set_amode_and_uaccess(unsigned long user_amode,
@@ -797,9 +801,13 @@ setup_arch(char **cmdline_p)
797 "This machine has an IEEE fpu\n" : 801 "This machine has an IEEE fpu\n" :
798 "This machine has no IEEE fpu\n"); 802 "This machine has no IEEE fpu\n");
799#else /* CONFIG_64BIT */ 803#else /* CONFIG_64BIT */
800 printk((MACHINE_IS_VM) ? 804 if (MACHINE_IS_VM)
801 "We are running under VM (64 bit mode)\n" : 805 printk("We are running under VM (64 bit mode)\n");
802 "We are running native (64 bit mode)\n"); 806 else if (MACHINE_IS_KVM) {
807 printk("We are running under KVM (64 bit mode)\n");
808 add_preferred_console("ttyS", 1, NULL);
809 } else
810 printk("We are running native (64 bit mode)\n");
803#endif /* CONFIG_64BIT */ 811#endif /* CONFIG_64BIT */
804 812
805 /* Save unparsed command line copy for /proc/cmdline */ 813 /* Save unparsed command line copy for /proc/cmdline */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c5f05b3fb2c3..ca90ee3f930e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -110,6 +110,7 @@ void account_system_vtime(struct task_struct *tsk)
110 S390_lowcore.steal_clock -= cputime << 12; 110 S390_lowcore.steal_clock -= cputime << 12;
111 account_system_time(tsk, 0, cputime); 111 account_system_time(tsk, 0, cputime);
112} 112}
113EXPORT_SYMBOL_GPL(account_system_vtime);
113 114
114static inline void set_vtimer(__u64 expires) 115static inline void set_vtimer(__u64 expires)
115{ 116{
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
new file mode 100644
index 000000000000..1761b74d639b
--- /dev/null
+++ b/arch/s390/kvm/Kconfig
@@ -0,0 +1,46 @@
1#
2# KVM configuration
3#
4config HAVE_KVM
5 bool
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 default y
10 ---help---
11 Say Y here to get to see options for using your Linux host to run other
12 operating systems inside virtual machines (guests).
13 This option alone does not add any kernel code.
14
15 If you say N, all options in this submenu will be skipped and disabled.
16
17if VIRTUALIZATION
18
19config KVM
20 tristate "Kernel-based Virtual Machine (KVM) support"
21 depends on HAVE_KVM && EXPERIMENTAL
22 select PREEMPT_NOTIFIERS
23 select ANON_INODES
24 select S390_SWITCH_AMODE
25 select PREEMPT
26 ---help---
27 Support hosting paravirtualized guest machines using the SIE
28 virtualization capability on the mainframe. This should work
29 on any 64bit machine.
30
31 This module provides access to the hardware capabilities through
32 a character device node named /dev/kvm.
33
34 To compile this as a module, choose M here: the module
35 will be called kvm.
36
37 If unsure, say N.
38
39config KVM_TRACE
40 bool
41
42# OK, it's a little counter-intuitive to do this, but it puts it neatly under
43# the virtualization menu.
44source drivers/virtio/Kconfig
45
46endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
new file mode 100644
index 000000000000..e5221ec0b8e3
--- /dev/null
+++ b/arch/s390/kvm/Makefile
@@ -0,0 +1,14 @@
1# Makefile for kernel virtual machines on s390
2#
3# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8
9common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
10
11EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
12
13kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
14obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
new file mode 100644
index 000000000000..f639a152869f
--- /dev/null
+++ b/arch/s390/kvm/diag.c
@@ -0,0 +1,67 @@
1/*
2 * diag.c - handling diagnose instructions
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
15#include <linux/kvm_host.h>
16#include "kvm-s390.h"
17
18static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
19{
20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
21 vcpu->stat.diagnose_44++;
22 vcpu_put(vcpu);
23 schedule();
24 vcpu_load(vcpu);
25 return 0;
26}
27
28static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
29{
30 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
31 unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff;
32
33 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
34 switch (subcode) {
35 case 3:
36 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
37 break;
38 case 4:
39 vcpu->run->s390_reset_flags = 0;
40 break;
41 default:
42 return -ENOTSUPP;
43 }
44
45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
46 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
47 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
48 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
49 vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
50 VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
51 vcpu->run->s390_reset_flags);
52 return -EREMOTE;
53}
54
55int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
56{
57 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
58
59 switch (code) {
60 case 0x44:
61 return __diag_time_slice_end(vcpu);
62 case 0x308:
63 return __diag_ipl_functions(vcpu);
64 default:
65 return -ENOTSUPP;
66 }
67}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
new file mode 100644
index 000000000000..4e0633c413f3
--- /dev/null
+++ b/arch/s390/kvm/gaccess.h
@@ -0,0 +1,274 @@
1/*
2 * gaccess.h - access guest memory
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
19
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 u64 guestaddr)
22{
23 u64 prefix = vcpu->arch.sie_block->prefix;
24 u64 origin = vcpu->kvm->arch.guest_origin;
25 u64 memsize = vcpu->kvm->arch.guest_memsize;
26
27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix;
29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
30 guestaddr -= prefix;
31
32 if (guestaddr > memsize)
33 return (void __user __force *) ERR_PTR(-EFAULT);
34
35 guestaddr += origin;
36
37 return (void __user *) guestaddr;
38}
39
40static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
41 u64 *result)
42{
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
44
45 BUG_ON(guestaddr & 7);
46
47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr);
49
50 return get_user(*result, (u64 __user *) uptr);
51}
52
53static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
54 u32 *result)
55{
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
57
58 BUG_ON(guestaddr & 3);
59
60 if (IS_ERR((void __force *) uptr))
61 return PTR_ERR((void __force *) uptr);
62
63 return get_user(*result, (u32 __user *) uptr);
64}
65
66static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
67 u16 *result)
68{
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
70
71 BUG_ON(guestaddr & 1);
72
73 if (IS_ERR(uptr))
74 return PTR_ERR(uptr);
75
76 return get_user(*result, (u16 __user *) uptr);
77}
78
79static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
80 u8 *result)
81{
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
83
84 if (IS_ERR((void __force *) uptr))
85 return PTR_ERR((void __force *) uptr);
86
87 return get_user(*result, (u8 __user *) uptr);
88}
89
90static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
91 u64 value)
92{
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
94
95 BUG_ON(guestaddr & 7);
96
97 if (IS_ERR((void __force *) uptr))
98 return PTR_ERR((void __force *) uptr);
99
100 return put_user(value, (u64 __user *) uptr);
101}
102
103static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
104 u32 value)
105{
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
107
108 BUG_ON(guestaddr & 3);
109
110 if (IS_ERR((void __force *) uptr))
111 return PTR_ERR((void __force *) uptr);
112
113 return put_user(value, (u32 __user *) uptr);
114}
115
116static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
117 u16 value)
118{
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
120
121 BUG_ON(guestaddr & 1);
122
123 if (IS_ERR((void __force *) uptr))
124 return PTR_ERR((void __force *) uptr);
125
126 return put_user(value, (u16 __user *) uptr);
127}
128
129static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
130 u8 value)
131{
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
133
134 if (IS_ERR((void __force *) uptr))
135 return PTR_ERR((void __force *) uptr);
136
137 return put_user(value, (u8 __user *) uptr);
138}
139
140
141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
142 const void *from, unsigned long n)
143{
144 int rc;
145 unsigned long i;
146 const u8 *data = from;
147
148 for (i = 0; i < n; i++) {
149 rc = put_guest_u8(vcpu, guestdest++, *(data++));
150 if (rc < 0)
151 return rc;
152 }
153 return 0;
154}
155
156static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
157 const void *from, unsigned long n)
158{
159 u64 prefix = vcpu->arch.sie_block->prefix;
160 u64 origin = vcpu->kvm->arch.guest_origin;
161 u64 memsize = vcpu->kvm->arch.guest_memsize;
162
163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
164 goto slowpath;
165
166 if ((guestdest < prefix) && (guestdest + n > prefix))
167 goto slowpath;
168
169 if ((guestdest < prefix + 2 * PAGE_SIZE)
170 && (guestdest + n > prefix + 2 * PAGE_SIZE))
171 goto slowpath;
172
173 if (guestdest < 2 * PAGE_SIZE)
174 guestdest += prefix;
175 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
176 guestdest -= prefix;
177
178 if (guestdest + n > memsize)
179 return -EFAULT;
180
181 if (guestdest + n < guestdest)
182 return -EFAULT;
183
184 guestdest += origin;
185
186 return copy_to_user((void __user *) guestdest, from, n);
187slowpath:
188 return __copy_to_guest_slow(vcpu, guestdest, from, n);
189}
190
191static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
192 u64 guestsrc, unsigned long n)
193{
194 int rc;
195 unsigned long i;
196 u8 *data = to;
197
198 for (i = 0; i < n; i++) {
199 rc = get_guest_u8(vcpu, guestsrc++, data++);
200 if (rc < 0)
201 return rc;
202 }
203 return 0;
204}
205
206static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
207 u64 guestsrc, unsigned long n)
208{
209 u64 prefix = vcpu->arch.sie_block->prefix;
210 u64 origin = vcpu->kvm->arch.guest_origin;
211 u64 memsize = vcpu->kvm->arch.guest_memsize;
212
213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
214 goto slowpath;
215
216 if ((guestsrc < prefix) && (guestsrc + n > prefix))
217 goto slowpath;
218
219 if ((guestsrc < prefix + 2 * PAGE_SIZE)
220 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
221 goto slowpath;
222
223 if (guestsrc < 2 * PAGE_SIZE)
224 guestsrc += prefix;
225 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
226 guestsrc -= prefix;
227
228 if (guestsrc + n > memsize)
229 return -EFAULT;
230
231 if (guestsrc + n < guestsrc)
232 return -EFAULT;
233
234 guestsrc += origin;
235
236 return copy_from_user(to, (void __user *) guestsrc, n);
237slowpath:
238 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
239}
240
241static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
242 const void *from, unsigned long n)
243{
244 u64 origin = vcpu->kvm->arch.guest_origin;
245 u64 memsize = vcpu->kvm->arch.guest_memsize;
246
247 if (guestdest + n > memsize)
248 return -EFAULT;
249
250 if (guestdest + n < guestdest)
251 return -EFAULT;
252
253 guestdest += origin;
254
255 return copy_to_user((void __user *) guestdest, from, n);
256}
257
258static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
259 u64 guestsrc, unsigned long n)
260{
261 u64 origin = vcpu->kvm->arch.guest_origin;
262 u64 memsize = vcpu->kvm->arch.guest_memsize;
263
264 if (guestsrc + n > memsize)
265 return -EFAULT;
266
267 if (guestsrc + n < guestsrc)
268 return -EFAULT;
269
270 guestsrc += origin;
271
272 return copy_from_user(to, (void __user *) guestsrc, n);
273}
274#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
new file mode 100644
index 000000000000..349581a26103
--- /dev/null
+++ b/arch/s390/kvm/intercept.c
@@ -0,0 +1,216 @@
1/*
2 * intercept.c - in-kernel handling for sie intercepts
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm_host.h>
15#include <linux/errno.h>
16#include <linux/pagemap.h>
17
18#include <asm/kvm_host.h>
19
20#include "kvm-s390.h"
21#include "gaccess.h"
22
23static int handle_lctg(struct kvm_vcpu *vcpu)
24{
25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
27 int base2 = vcpu->arch.sie_block->ipb >> 28;
28 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
29 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
30 u64 useraddr;
31 int reg, rc;
32
33 vcpu->stat.instruction_lctg++;
34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
35 return -ENOTSUPP;
36
37 useraddr = disp2;
38 if (base2)
39 useraddr += vcpu->arch.guest_gprs[base2];
40
41 reg = reg1;
42
43 VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
44 disp2);
45
46 do {
47 rc = get_guest_u64(vcpu, useraddr,
48 &vcpu->arch.sie_block->gcr[reg]);
49 if (rc == -EFAULT) {
50 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
51 break;
52 }
53 useraddr += 8;
54 if (reg == reg3)
55 break;
56 reg = (reg + 1) % 16;
57 } while (1);
58 return 0;
59}
60
61static int handle_lctl(struct kvm_vcpu *vcpu)
62{
63 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
64 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
65 int base2 = vcpu->arch.sie_block->ipb >> 28;
66 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
67 u64 useraddr;
68 u32 val = 0;
69 int reg, rc;
70
71 vcpu->stat.instruction_lctl++;
72
73 useraddr = disp2;
74 if (base2)
75 useraddr += vcpu->arch.guest_gprs[base2];
76
77 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
78 disp2);
79
80 reg = reg1;
81 do {
82 rc = get_guest_u32(vcpu, useraddr, &val);
83 if (rc == -EFAULT) {
84 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
85 break;
86 }
87 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
88 vcpu->arch.sie_block->gcr[reg] |= val;
89 useraddr += 4;
90 if (reg == reg3)
91 break;
92 reg = (reg + 1) % 16;
93 } while (1);
94 return 0;
95}
96
97static intercept_handler_t instruction_handlers[256] = {
98 [0x83] = kvm_s390_handle_diag,
99 [0xae] = kvm_s390_handle_sigp,
100 [0xb2] = kvm_s390_handle_priv,
101 [0xb7] = handle_lctl,
102 [0xeb] = handle_lctg,
103};
104
105static int handle_noop(struct kvm_vcpu *vcpu)
106{
107 switch (vcpu->arch.sie_block->icptcode) {
108 case 0x10:
109 vcpu->stat.exit_external_request++;
110 break;
111 case 0x14:
112 vcpu->stat.exit_external_interrupt++;
113 break;
114 default:
115 break; /* nothing */
116 }
117 return 0;
118}
119
120static int handle_stop(struct kvm_vcpu *vcpu)
121{
122 int rc;
123
124 vcpu->stat.exit_stop_request++;
125 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
126 spin_lock_bh(&vcpu->arch.local_int.lock);
127 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
128 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
129 rc = __kvm_s390_vcpu_store_status(vcpu,
130 KVM_S390_STORE_STATUS_NOADDR);
131 if (rc >= 0)
132 rc = -ENOTSUPP;
133 }
134
135 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
136 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
137 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
138 rc = -ENOTSUPP;
139 } else
140 rc = 0;
141 spin_unlock_bh(&vcpu->arch.local_int.lock);
142 return rc;
143}
144
145static int handle_validity(struct kvm_vcpu *vcpu)
146{
147 int viwhy = vcpu->arch.sie_block->ipb >> 16;
148 vcpu->stat.exit_validity++;
149 if (viwhy == 0x37) {
150 fault_in_pages_writeable((char __user *)
151 vcpu->kvm->arch.guest_origin +
152 vcpu->arch.sie_block->prefix,
153 PAGE_SIZE);
154 return 0;
155 }
156 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
157 viwhy);
158 return -ENOTSUPP;
159}
160
161static int handle_instruction(struct kvm_vcpu *vcpu)
162{
163 intercept_handler_t handler;
164
165 vcpu->stat.exit_instruction++;
166 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
167 if (handler)
168 return handler(vcpu);
169 return -ENOTSUPP;
170}
171
172static int handle_prog(struct kvm_vcpu *vcpu)
173{
174 vcpu->stat.exit_program_interruption++;
175 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
176}
177
178static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
179{
180 int rc, rc2;
181
182 vcpu->stat.exit_instr_and_program++;
183 rc = handle_instruction(vcpu);
184 rc2 = handle_prog(vcpu);
185
186 if (rc == -ENOTSUPP)
187 vcpu->arch.sie_block->icptcode = 0x04;
188 if (rc)
189 return rc;
190 return rc2;
191}
192
193static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
194 [0x00 >> 2] = handle_noop,
195 [0x04 >> 2] = handle_instruction,
196 [0x08 >> 2] = handle_prog,
197 [0x0C >> 2] = handle_instruction_and_prog,
198 [0x10 >> 2] = handle_noop,
199 [0x14 >> 2] = handle_noop,
200 [0x1C >> 2] = kvm_s390_handle_wait,
201 [0x20 >> 2] = handle_validity,
202 [0x28 >> 2] = handle_stop,
203};
204
205int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
206{
207 intercept_handler_t func;
208 u8 code = vcpu->arch.sie_block->icptcode;
209
210 if (code & 3 || code > 0x48)
211 return -ENOTSUPP;
212 func = intercept_funcs[code >> 2];
213 if (func)
214 return func(vcpu);
215 return -ENOTSUPP;
216}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
new file mode 100644
index 000000000000..fcd1ed8015c1
--- /dev/null
+++ b/arch/s390/kvm/interrupt.c
@@ -0,0 +1,592 @@
1/*
2 * interrupt.c - handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#include <asm/lowcore.h>
14#include <asm/uaccess.h>
15#include <linux/kvm_host.h>
16#include "kvm-s390.h"
17#include "gaccess.h"
18
19static int psw_extint_disabled(struct kvm_vcpu *vcpu)
20{
21 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
22}
23
24static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
25{
26 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
27 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
28 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
29 return 0;
30 return 1;
31}
32
33static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
34 struct interrupt_info *inti)
35{
36 switch (inti->type) {
37 case KVM_S390_INT_EMERGENCY:
38 if (psw_extint_disabled(vcpu))
39 return 0;
40 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
41 return 1;
42 return 0;
43 case KVM_S390_INT_SERVICE:
44 if (psw_extint_disabled(vcpu))
45 return 0;
46 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
47 return 1;
48 return 0;
49 case KVM_S390_INT_VIRTIO:
50 if (psw_extint_disabled(vcpu))
51 return 0;
52 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
53 return 1;
54 return 0;
55 case KVM_S390_PROGRAM_INT:
56 case KVM_S390_SIGP_STOP:
57 case KVM_S390_SIGP_SET_PREFIX:
58 case KVM_S390_RESTART:
59 return 1;
60 default:
61 BUG();
62 }
63 return 0;
64}
65
66static void __set_cpu_idle(struct kvm_vcpu *vcpu)
67{
68 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
69 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
70 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
71}
72
73static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
74{
75 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
76 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
77 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
78}
79
80static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
81{
82 atomic_clear_mask(CPUSTAT_ECALL_PEND |
83 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
84 &vcpu->arch.sie_block->cpuflags);
85 vcpu->arch.sie_block->lctl = 0x0000;
86}
87
88static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
89{
90 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
91}
92
93static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
94 struct interrupt_info *inti)
95{
96 switch (inti->type) {
97 case KVM_S390_INT_EMERGENCY:
98 case KVM_S390_INT_SERVICE:
99 case KVM_S390_INT_VIRTIO:
100 if (psw_extint_disabled(vcpu))
101 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
102 else
103 vcpu->arch.sie_block->lctl |= LCTL_CR0;
104 break;
105 case KVM_S390_SIGP_STOP:
106 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
107 break;
108 default:
109 BUG();
110 }
111}
112
113static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
114 struct interrupt_info *inti)
115{
116 const unsigned short table[] = { 2, 4, 4, 6 };
117 int rc, exception = 0;
118
119 switch (inti->type) {
120 case KVM_S390_INT_EMERGENCY:
121 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
122 vcpu->stat.deliver_emergency_signal++;
123 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
124 if (rc == -EFAULT)
125 exception = 1;
126
127 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
128 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
129 if (rc == -EFAULT)
130 exception = 1;
131
132 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
133 __LC_EXT_NEW_PSW, sizeof(psw_t));
134 if (rc == -EFAULT)
135 exception = 1;
136 break;
137
138 case KVM_S390_INT_SERVICE:
139 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
140 inti->ext.ext_params);
141 vcpu->stat.deliver_service_signal++;
142 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
143 if (rc == -EFAULT)
144 exception = 1;
145
146 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
147 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
148 if (rc == -EFAULT)
149 exception = 1;
150
151 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
152 __LC_EXT_NEW_PSW, sizeof(psw_t));
153 if (rc == -EFAULT)
154 exception = 1;
155
156 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
157 if (rc == -EFAULT)
158 exception = 1;
159 break;
160
161 case KVM_S390_INT_VIRTIO:
162 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
163 inti->ext.ext_params, inti->ext.ext_params2);
164 vcpu->stat.deliver_virtio_interrupt++;
165 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
166 if (rc == -EFAULT)
167 exception = 1;
168
169 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
170 if (rc == -EFAULT)
171 exception = 1;
172
173 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
174 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
175 if (rc == -EFAULT)
176 exception = 1;
177
178 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
179 __LC_EXT_NEW_PSW, sizeof(psw_t));
180 if (rc == -EFAULT)
181 exception = 1;
182
183 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
184 if (rc == -EFAULT)
185 exception = 1;
186
187 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
188 inti->ext.ext_params2);
189 if (rc == -EFAULT)
190 exception = 1;
191 break;
192
193 case KVM_S390_SIGP_STOP:
194 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
195 vcpu->stat.deliver_stop_signal++;
196 __set_intercept_indicator(vcpu, inti);
197 break;
198
199 case KVM_S390_SIGP_SET_PREFIX:
200 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
201 inti->prefix.address);
202 vcpu->stat.deliver_prefix_signal++;
203 vcpu->arch.sie_block->prefix = inti->prefix.address;
204 vcpu->arch.sie_block->ihcpu = 0xffff;
205 break;
206
207 case KVM_S390_RESTART:
208 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
209 vcpu->stat.deliver_restart_signal++;
210 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
211 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
212 if (rc == -EFAULT)
213 exception = 1;
214
215 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
216 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
217 if (rc == -EFAULT)
218 exception = 1;
219 break;
220
221 case KVM_S390_PROGRAM_INT:
222 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
223 inti->pgm.code,
224 table[vcpu->arch.sie_block->ipa >> 14]);
225 vcpu->stat.deliver_program_int++;
226 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
227 if (rc == -EFAULT)
228 exception = 1;
229
230 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
231 table[vcpu->arch.sie_block->ipa >> 14]);
232 if (rc == -EFAULT)
233 exception = 1;
234
235 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
237 if (rc == -EFAULT)
238 exception = 1;
239
240 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
241 __LC_PGM_NEW_PSW, sizeof(psw_t));
242 if (rc == -EFAULT)
243 exception = 1;
244 break;
245
246 default:
247 BUG();
248 }
249
250 if (exception) {
251 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
252 " interrupt");
253 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
254 if (inti->type == KVM_S390_PROGRAM_INT) {
255 printk(KERN_WARNING "kvm: recursive program check\n");
256 BUG();
257 }
258 }
259}
260
261static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
262{
263 int rc, exception = 0;
264
265 if (psw_extint_disabled(vcpu))
266 return 0;
267 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
268 return 0;
269 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
270 if (rc == -EFAULT)
271 exception = 1;
272 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
273 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
274 if (rc == -EFAULT)
275 exception = 1;
276 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
277 __LC_EXT_NEW_PSW, sizeof(psw_t));
278 if (rc == -EFAULT)
279 exception = 1;
280
281 if (exception) {
282 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
283 " ckc interrupt");
284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 return 0;
286 }
287
288 return 1;
289}
290
291int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
292{
293 struct local_interrupt *li = &vcpu->arch.local_int;
294 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
295 struct interrupt_info *inti;
296 int rc = 0;
297
298 if (atomic_read(&li->active)) {
299 spin_lock_bh(&li->lock);
300 list_for_each_entry(inti, &li->list, list)
301 if (__interrupt_is_deliverable(vcpu, inti)) {
302 rc = 1;
303 break;
304 }
305 spin_unlock_bh(&li->lock);
306 }
307
308 if ((!rc) && atomic_read(&fi->active)) {
309 spin_lock_bh(&fi->lock);
310 list_for_each_entry(inti, &fi->list, list)
311 if (__interrupt_is_deliverable(vcpu, inti)) {
312 rc = 1;
313 break;
314 }
315 spin_unlock_bh(&fi->lock);
316 }
317
318 if ((!rc) && (vcpu->arch.sie_block->ckc <
319 get_clock() + vcpu->arch.sie_block->epoch)) {
320 if ((!psw_extint_disabled(vcpu)) &&
321 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
322 rc = 1;
323 }
324
325 return rc;
326}
327
328int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
329{
330 return 0;
331}
332
333int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
334{
335 u64 now, sltime;
336 DECLARE_WAITQUEUE(wait, current);
337
338 vcpu->stat.exit_wait_state++;
339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0;
341
342 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu);
345 return -ENOTSUPP; /* disabled wait */
346 }
347
348 if (psw_extint_disabled(vcpu) ||
349 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
350 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
351 goto no_timer;
352 }
353
354 now = get_clock() + vcpu->arch.sie_block->epoch;
355 if (vcpu->arch.sie_block->ckc < now) {
356 __unset_cpu_idle(vcpu);
357 return 0;
358 }
359
360 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
361
362 vcpu->arch.ckc_timer.expires = jiffies + sltime;
363
364 add_timer(&vcpu->arch.ckc_timer);
365 VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
366no_timer:
367 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
368 spin_lock_bh(&vcpu->arch.local_int.lock);
369 __set_cpu_idle(vcpu);
370 vcpu->arch.local_int.timer_due = 0;
371 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
372 while (list_empty(&vcpu->arch.local_int.list) &&
373 list_empty(&vcpu->arch.local_int.float_int->list) &&
374 (!vcpu->arch.local_int.timer_due) &&
375 !signal_pending(current)) {
376 set_current_state(TASK_INTERRUPTIBLE);
377 spin_unlock_bh(&vcpu->arch.local_int.lock);
378 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
379 vcpu_put(vcpu);
380 schedule();
381 vcpu_load(vcpu);
382 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
383 spin_lock_bh(&vcpu->arch.local_int.lock);
384 }
385 __unset_cpu_idle(vcpu);
386 __set_current_state(TASK_RUNNING);
387 remove_wait_queue(&vcpu->wq, &wait);
388 spin_unlock_bh(&vcpu->arch.local_int.lock);
389 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
390 del_timer(&vcpu->arch.ckc_timer);
391 return 0;
392}
393
394void kvm_s390_idle_wakeup(unsigned long data)
395{
396 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
397
398 spin_lock_bh(&vcpu->arch.local_int.lock);
399 vcpu->arch.local_int.timer_due = 1;
400 if (waitqueue_active(&vcpu->arch.local_int.wq))
401 wake_up_interruptible(&vcpu->arch.local_int.wq);
402 spin_unlock_bh(&vcpu->arch.local_int.lock);
403}
404
405
406void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
407{
408 struct local_interrupt *li = &vcpu->arch.local_int;
409 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
410 struct interrupt_info *n, *inti = NULL;
411 int deliver;
412
413 __reset_intercept_indicators(vcpu);
414 if (atomic_read(&li->active)) {
415 do {
416 deliver = 0;
417 spin_lock_bh(&li->lock);
418 list_for_each_entry_safe(inti, n, &li->list, list) {
419 if (__interrupt_is_deliverable(vcpu, inti)) {
420 list_del(&inti->list);
421 deliver = 1;
422 break;
423 }
424 __set_intercept_indicator(vcpu, inti);
425 }
426 if (list_empty(&li->list))
427 atomic_set(&li->active, 0);
428 spin_unlock_bh(&li->lock);
429 if (deliver) {
430 __do_deliver_interrupt(vcpu, inti);
431 kfree(inti);
432 }
433 } while (deliver);
434 }
435
436 if ((vcpu->arch.sie_block->ckc <
437 get_clock() + vcpu->arch.sie_block->epoch))
438 __try_deliver_ckc_interrupt(vcpu);
439
440 if (atomic_read(&fi->active)) {
441 do {
442 deliver = 0;
443 spin_lock_bh(&fi->lock);
444 list_for_each_entry_safe(inti, n, &fi->list, list) {
445 if (__interrupt_is_deliverable(vcpu, inti)) {
446 list_del(&inti->list);
447 deliver = 1;
448 break;
449 }
450 __set_intercept_indicator(vcpu, inti);
451 }
452 if (list_empty(&fi->list))
453 atomic_set(&fi->active, 0);
454 spin_unlock_bh(&fi->lock);
455 if (deliver) {
456 __do_deliver_interrupt(vcpu, inti);
457 kfree(inti);
458 }
459 } while (deliver);
460 }
461}
462
463int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
464{
465 struct local_interrupt *li = &vcpu->arch.local_int;
466 struct interrupt_info *inti;
467
468 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
469 if (!inti)
470 return -ENOMEM;
471
472 inti->type = KVM_S390_PROGRAM_INT;;
473 inti->pgm.code = code;
474
475 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
476 spin_lock_bh(&li->lock);
477 list_add(&inti->list, &li->list);
478 atomic_set(&li->active, 1);
479 BUG_ON(waitqueue_active(&li->wq));
480 spin_unlock_bh(&li->lock);
481 return 0;
482}
483
484int kvm_s390_inject_vm(struct kvm *kvm,
485 struct kvm_s390_interrupt *s390int)
486{
487 struct local_interrupt *li;
488 struct float_interrupt *fi;
489 struct interrupt_info *inti;
490 int sigcpu;
491
492 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
493 if (!inti)
494 return -ENOMEM;
495
496 switch (s390int->type) {
497 case KVM_S390_INT_VIRTIO:
498 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
499 s390int->parm, s390int->parm64);
500 inti->type = s390int->type;
501 inti->ext.ext_params = s390int->parm;
502 inti->ext.ext_params2 = s390int->parm64;
503 break;
504 case KVM_S390_INT_SERVICE:
505 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
506 inti->type = s390int->type;
507 inti->ext.ext_params = s390int->parm;
508 break;
509 case KVM_S390_PROGRAM_INT:
510 case KVM_S390_SIGP_STOP:
511 case KVM_S390_INT_EMERGENCY:
512 default:
513 kfree(inti);
514 return -EINVAL;
515 }
516
517 mutex_lock(&kvm->lock);
518 fi = &kvm->arch.float_int;
519 spin_lock_bh(&fi->lock);
520 list_add_tail(&inti->list, &fi->list);
521 atomic_set(&fi->active, 1);
522 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
523 if (sigcpu == KVM_MAX_VCPUS) {
524 do {
525 sigcpu = fi->next_rr_cpu++;
526 if (sigcpu == KVM_MAX_VCPUS)
527 sigcpu = fi->next_rr_cpu = 0;
528 } while (fi->local_int[sigcpu] == NULL);
529 }
530 li = fi->local_int[sigcpu];
531 spin_lock_bh(&li->lock);
532 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
533 if (waitqueue_active(&li->wq))
534 wake_up_interruptible(&li->wq);
535 spin_unlock_bh(&li->lock);
536 spin_unlock_bh(&fi->lock);
537 mutex_unlock(&kvm->lock);
538 return 0;
539}
540
541int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
542 struct kvm_s390_interrupt *s390int)
543{
544 struct local_interrupt *li;
545 struct interrupt_info *inti;
546
547 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
548 if (!inti)
549 return -ENOMEM;
550
551 switch (s390int->type) {
552 case KVM_S390_PROGRAM_INT:
553 if (s390int->parm & 0xffff0000) {
554 kfree(inti);
555 return -EINVAL;
556 }
557 inti->type = s390int->type;
558 inti->pgm.code = s390int->parm;
559 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
560 s390int->parm);
561 break;
562 case KVM_S390_SIGP_STOP:
563 case KVM_S390_RESTART:
564 case KVM_S390_SIGP_SET_PREFIX:
565 case KVM_S390_INT_EMERGENCY:
566 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
567 inti->type = s390int->type;
568 break;
569 case KVM_S390_INT_VIRTIO:
570 case KVM_S390_INT_SERVICE:
571 default:
572 kfree(inti);
573 return -EINVAL;
574 }
575
576 mutex_lock(&vcpu->kvm->lock);
577 li = &vcpu->arch.local_int;
578 spin_lock_bh(&li->lock);
579 if (inti->type == KVM_S390_PROGRAM_INT)
580 list_add(&inti->list, &li->list);
581 else
582 list_add_tail(&inti->list, &li->list);
583 atomic_set(&li->active, 1);
584 if (inti->type == KVM_S390_SIGP_STOP)
585 li->action_bits |= ACTION_STOP_ON_STOP;
586 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
587 if (waitqueue_active(&li->wq))
588 wake_up_interruptible(&vcpu->arch.local_int.wq);
589 spin_unlock_bh(&li->lock);
590 mutex_unlock(&vcpu->kvm->lock);
591 return 0;
592}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
new file mode 100644
index 000000000000..98d1e73e01f1
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.c
@@ -0,0 +1,685 @@
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/timer.h>
24#include <asm/lowcore.h>
25#include <asm/pgtable.h>
26
27#include "kvm-s390.h"
28#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
34 { "exit_validity", VCPU_STAT(exit_validity) },
35 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
36 { "exit_external_request", VCPU_STAT(exit_external_request) },
37 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
38 { "exit_instruction", VCPU_STAT(exit_instruction) },
39 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
40 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
41 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
42 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
43 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
44 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
45 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
46 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
47 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
48 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
49 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
50 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
51 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
52 { "instruction_spx", VCPU_STAT(instruction_spx) },
53 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
54 { "instruction_stap", VCPU_STAT(instruction_stap) },
55 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
56 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
57 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
58 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
59 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
60 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
61 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
62 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
63 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
64 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
65 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
66 { "diagnose_44", VCPU_STAT(diagnose_44) },
67 { NULL }
68};
69
70
71/* Section: not file related */
72void kvm_arch_hardware_enable(void *garbage)
73{
74 /* every s390 is virtualization enabled ;-) */
75}
76
77void kvm_arch_hardware_disable(void *garbage)
78{
79}
80
81void decache_vcpus_on_cpu(int cpu)
82{
83}
84
85int kvm_arch_hardware_setup(void)
86{
87 return 0;
88}
89
90void kvm_arch_hardware_unsetup(void)
91{
92}
93
94void kvm_arch_check_processor_compat(void *rtn)
95{
96}
97
98int kvm_arch_init(void *opaque)
99{
100 return 0;
101}
102
103void kvm_arch_exit(void)
104{
105}
106
107/* Section: device related */
108long kvm_arch_dev_ioctl(struct file *filp,
109 unsigned int ioctl, unsigned long arg)
110{
111 if (ioctl == KVM_S390_ENABLE_SIE)
112 return s390_enable_sie();
113 return -EINVAL;
114}
115
116int kvm_dev_ioctl_check_extension(long ext)
117{
118 return 0;
119}
120
121/* Section: vm related */
122/*
123 * Get (and clear) the dirty memory log for a memory slot.
124 */
125int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
126 struct kvm_dirty_log *log)
127{
128 return 0;
129}
130
131long kvm_arch_vm_ioctl(struct file *filp,
132 unsigned int ioctl, unsigned long arg)
133{
134 struct kvm *kvm = filp->private_data;
135 void __user *argp = (void __user *)arg;
136 int r;
137
138 switch (ioctl) {
139 case KVM_S390_INTERRUPT: {
140 struct kvm_s390_interrupt s390int;
141
142 r = -EFAULT;
143 if (copy_from_user(&s390int, argp, sizeof(s390int)))
144 break;
145 r = kvm_s390_inject_vm(kvm, &s390int);
146 break;
147 }
148 default:
149 r = -EINVAL;
150 }
151
152 return r;
153}
154
155struct kvm *kvm_arch_create_vm(void)
156{
157 struct kvm *kvm;
158 int rc;
159 char debug_name[16];
160
161 rc = s390_enable_sie();
162 if (rc)
163 goto out_nokvm;
164
165 rc = -ENOMEM;
166 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
167 if (!kvm)
168 goto out_nokvm;
169
170 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
171 if (!kvm->arch.sca)
172 goto out_nosca;
173
174 sprintf(debug_name, "kvm-%u", current->pid);
175
176 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
177 if (!kvm->arch.dbf)
178 goto out_nodbf;
179
180 spin_lock_init(&kvm->arch.float_int.lock);
181 INIT_LIST_HEAD(&kvm->arch.float_int.list);
182
183 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
184 VM_EVENT(kvm, 3, "%s", "vm created");
185
186 try_module_get(THIS_MODULE);
187
188 return kvm;
189out_nodbf:
190 free_page((unsigned long)(kvm->arch.sca));
191out_nosca:
192 kfree(kvm);
193out_nokvm:
194 return ERR_PTR(rc);
195}
196
197void kvm_arch_destroy_vm(struct kvm *kvm)
198{
199 debug_unregister(kvm->arch.dbf);
200 free_page((unsigned long)(kvm->arch.sca));
201 kfree(kvm);
202 module_put(THIS_MODULE);
203}
204
205/* Section: vcpu related */
206int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
207{
208 return 0;
209}
210
211void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
212{
213 /* kvm common code refers to this, but does'nt call it */
214 BUG();
215}
216
217void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
218{
219 save_fp_regs(&vcpu->arch.host_fpregs);
220 save_access_regs(vcpu->arch.host_acrs);
221 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
222 restore_fp_regs(&vcpu->arch.guest_fpregs);
223 restore_access_regs(vcpu->arch.guest_acrs);
224
225 if (signal_pending(current))
226 atomic_set_mask(CPUSTAT_STOP_INT,
227 &vcpu->arch.sie_block->cpuflags);
228}
229
230void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
231{
232 save_fp_regs(&vcpu->arch.guest_fpregs);
233 save_access_regs(vcpu->arch.guest_acrs);
234 restore_fp_regs(&vcpu->arch.host_fpregs);
235 restore_access_regs(vcpu->arch.host_acrs);
236}
237
238static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
239{
240 /* this equals initial cpu reset in pop, but we don't switch to ESA */
241 vcpu->arch.sie_block->gpsw.mask = 0UL;
242 vcpu->arch.sie_block->gpsw.addr = 0UL;
243 vcpu->arch.sie_block->prefix = 0UL;
244 vcpu->arch.sie_block->ihcpu = 0xffff;
245 vcpu->arch.sie_block->cputm = 0UL;
246 vcpu->arch.sie_block->ckc = 0UL;
247 vcpu->arch.sie_block->todpr = 0;
248 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
249 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
250 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
251 vcpu->arch.guest_fpregs.fpc = 0;
252 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
253 vcpu->arch.sie_block->gbea = 1;
254}
255
256int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
257{
258 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
259 vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
260 vcpu->arch.sie_block->gmsor = 0x000000000000;
261 vcpu->arch.sie_block->ecb = 2;
262 vcpu->arch.sie_block->eca = 0xC1002001U;
263 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
264 (unsigned long) vcpu);
265 get_cpu_id(&vcpu->arch.cpu_id);
266 vcpu->arch.cpu_id.version = 0xfe;
267 return 0;
268}
269
270struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
271 unsigned int id)
272{
273 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
274 int rc = -ENOMEM;
275
276 if (!vcpu)
277 goto out_nomem;
278
279 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
280
281 if (!vcpu->arch.sie_block)
282 goto out_free_cpu;
283
284 vcpu->arch.sie_block->icpua = id;
285 BUG_ON(!kvm->arch.sca);
286 BUG_ON(kvm->arch.sca->cpu[id].sda);
287 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
288 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
289 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
290
291 spin_lock_init(&vcpu->arch.local_int.lock);
292 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
293 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
294 spin_lock_bh(&kvm->arch.float_int.lock);
295 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
296 init_waitqueue_head(&vcpu->arch.local_int.wq);
297 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
298 spin_unlock_bh(&kvm->arch.float_int.lock);
299
300 rc = kvm_vcpu_init(vcpu, kvm, id);
301 if (rc)
302 goto out_free_cpu;
303 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
304 vcpu->arch.sie_block);
305
306 try_module_get(THIS_MODULE);
307
308 return vcpu;
309out_free_cpu:
310 kfree(vcpu);
311out_nomem:
312 return ERR_PTR(rc);
313}
314
315void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
316{
317 VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
318 free_page((unsigned long)(vcpu->arch.sie_block));
319 kfree(vcpu);
320 module_put(THIS_MODULE);
321}
322
323int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
324{
325 /* kvm common code refers to this, but never calls it */
326 BUG();
327 return 0;
328}
329
330static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
331{
332 vcpu_load(vcpu);
333 kvm_s390_vcpu_initial_reset(vcpu);
334 vcpu_put(vcpu);
335 return 0;
336}
337
338int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
339{
340 vcpu_load(vcpu);
341 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
342 vcpu_put(vcpu);
343 return 0;
344}
345
346int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
347{
348 vcpu_load(vcpu);
349 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
350 vcpu_put(vcpu);
351 return 0;
352}
353
354int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
355 struct kvm_sregs *sregs)
356{
357 vcpu_load(vcpu);
358 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
359 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
360 vcpu_put(vcpu);
361 return 0;
362}
363
364int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
365 struct kvm_sregs *sregs)
366{
367 vcpu_load(vcpu);
368 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
369 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
370 vcpu_put(vcpu);
371 return 0;
372}
373
374int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
375{
376 vcpu_load(vcpu);
377 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
378 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
379 vcpu_put(vcpu);
380 return 0;
381}
382
383int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
384{
385 vcpu_load(vcpu);
386 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
387 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
388 vcpu_put(vcpu);
389 return 0;
390}
391
392static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
393{
394 int rc = 0;
395
396 vcpu_load(vcpu);
397 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
398 rc = -EBUSY;
399 else
400 vcpu->arch.sie_block->gpsw = psw;
401 vcpu_put(vcpu);
402 return rc;
403}
404
405int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
406 struct kvm_translation *tr)
407{
408 return -EINVAL; /* not implemented yet */
409}
410
411int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
412 struct kvm_debug_guest *dbg)
413{
414 return -EINVAL; /* not implemented yet */
415}
416
417int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
418 struct kvm_mp_state *mp_state)
419{
420 return -EINVAL; /* not implemented yet */
421}
422
423int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
424 struct kvm_mp_state *mp_state)
425{
426 return -EINVAL; /* not implemented yet */
427}
428
429static void __vcpu_run(struct kvm_vcpu *vcpu)
430{
431 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
432
433 if (need_resched())
434 schedule();
435
436 vcpu->arch.sie_block->icptcode = 0;
437 local_irq_disable();
438 kvm_guest_enter();
439 local_irq_enable();
440 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
441 atomic_read(&vcpu->arch.sie_block->cpuflags));
442 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
443 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
444 vcpu->arch.sie_block->icptcode);
445 local_irq_disable();
446 kvm_guest_exit();
447 local_irq_enable();
448
449 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
450}
451
452int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
453{
454 int rc;
455 sigset_t sigsaved;
456
457 vcpu_load(vcpu);
458
459 if (vcpu->sigset_active)
460 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
461
462 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
463
464 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
465
466 switch (kvm_run->exit_reason) {
467 case KVM_EXIT_S390_SIEIC:
468 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
469 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
470 break;
471 case KVM_EXIT_UNKNOWN:
472 case KVM_EXIT_S390_RESET:
473 break;
474 default:
475 BUG();
476 }
477
478 might_sleep();
479
480 do {
481 kvm_s390_deliver_pending_interrupts(vcpu);
482 __vcpu_run(vcpu);
483 rc = kvm_handle_sie_intercept(vcpu);
484 } while (!signal_pending(current) && !rc);
485
486 if (signal_pending(current) && !rc)
487 rc = -EINTR;
488
489 if (rc == -ENOTSUPP) {
490 /* intercept cannot be handled in-kernel, prepare kvm-run */
491 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
492 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
493 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
494 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
495 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
496 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
497 rc = 0;
498 }
499
500 if (rc == -EREMOTE) {
501 /* intercept was handled, but userspace support is needed
502 * kvm_run has been prepared by the handler */
503 rc = 0;
504 }
505
506 if (vcpu->sigset_active)
507 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
508
509 vcpu_put(vcpu);
510
511 vcpu->stat.exit_userspace++;
512 return rc;
513}
514
515static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
516 unsigned long n, int prefix)
517{
518 if (prefix)
519 return copy_to_guest(vcpu, guestdest, from, n);
520 else
521 return copy_to_guest_absolute(vcpu, guestdest, from, n);
522}
523
524/*
525 * store status at address
526 * we use have two special cases:
527 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
528 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
529 */
530int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
531{
532 const unsigned char archmode = 1;
533 int prefix;
534
535 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
536 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
537 return -EFAULT;
538 addr = SAVE_AREA_BASE;
539 prefix = 0;
540 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
541 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
542 return -EFAULT;
543 addr = SAVE_AREA_BASE;
544 prefix = 1;
545 } else
546 prefix = 0;
547
548 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
549 vcpu->arch.guest_fpregs.fprs, 128, prefix))
550 return -EFAULT;
551
552 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
553 vcpu->arch.guest_gprs, 128, prefix))
554 return -EFAULT;
555
556 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
557 &vcpu->arch.sie_block->gpsw, 16, prefix))
558 return -EFAULT;
559
560 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
561 &vcpu->arch.sie_block->prefix, 4, prefix))
562 return -EFAULT;
563
564 if (__guestcopy(vcpu,
565 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
566 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
567 return -EFAULT;
568
569 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
570 &vcpu->arch.sie_block->todpr, 4, prefix))
571 return -EFAULT;
572
573 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
574 &vcpu->arch.sie_block->cputm, 8, prefix))
575 return -EFAULT;
576
577 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
578 &vcpu->arch.sie_block->ckc, 8, prefix))
579 return -EFAULT;
580
581 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
582 &vcpu->arch.guest_acrs, 64, prefix))
583 return -EFAULT;
584
585 if (__guestcopy(vcpu,
586 addr + offsetof(struct save_area_s390x, ctrl_regs),
587 &vcpu->arch.sie_block->gcr, 128, prefix))
588 return -EFAULT;
589 return 0;
590}
591
592static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
593{
594 int rc;
595
596 vcpu_load(vcpu);
597 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
598 vcpu_put(vcpu);
599 return rc;
600}
601
602long kvm_arch_vcpu_ioctl(struct file *filp,
603 unsigned int ioctl, unsigned long arg)
604{
605 struct kvm_vcpu *vcpu = filp->private_data;
606 void __user *argp = (void __user *)arg;
607
608 switch (ioctl) {
609 case KVM_S390_INTERRUPT: {
610 struct kvm_s390_interrupt s390int;
611
612 if (copy_from_user(&s390int, argp, sizeof(s390int)))
613 return -EFAULT;
614 return kvm_s390_inject_vcpu(vcpu, &s390int);
615 }
616 case KVM_S390_STORE_STATUS:
617 return kvm_s390_vcpu_store_status(vcpu, arg);
618 case KVM_S390_SET_INITIAL_PSW: {
619 psw_t psw;
620
621 if (copy_from_user(&psw, argp, sizeof(psw)))
622 return -EFAULT;
623 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
624 }
625 case KVM_S390_INITIAL_RESET:
626 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
627 default:
628 ;
629 }
630 return -EINVAL;
631}
632
633/* Section: memory related */
634int kvm_arch_set_memory_region(struct kvm *kvm,
635 struct kvm_userspace_memory_region *mem,
636 struct kvm_memory_slot old,
637 int user_alloc)
638{
639 /* A few sanity checks. We can have exactly one memory slot which has
640 to start at guest virtual zero and which has to be located at a
641 page boundary in userland and which has to end at a page boundary.
642 The memory in userland is ok to be fragmented into various different
643 vmas. It is okay to mmap() and munmap() stuff in this slot after
644 doing this call at any time */
645
646 if (mem->slot)
647 return -EINVAL;
648
649 if (mem->guest_phys_addr)
650 return -EINVAL;
651
652 if (mem->userspace_addr & (PAGE_SIZE - 1))
653 return -EINVAL;
654
655 if (mem->memory_size & (PAGE_SIZE - 1))
656 return -EINVAL;
657
658 kvm->arch.guest_origin = mem->userspace_addr;
659 kvm->arch.guest_memsize = mem->memory_size;
660
661 /* FIXME: we do want to interrupt running CPUs and update their memory
662 configuration now to avoid race conditions. But hey, changing the
663 memory layout while virtual CPUs are running is usually bad
664 programming practice. */
665
666 return 0;
667}
668
669gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
670{
671 return gfn;
672}
673
674static int __init kvm_s390_init(void)
675{
676 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
677}
678
679static void __exit kvm_s390_exit(void)
680{
681 kvm_exit();
682}
683
684module_init(kvm_s390_init);
685module_exit(kvm_s390_exit);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
new file mode 100644
index 000000000000..3893cf12eacf
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.h
@@ -0,0 +1,64 @@
1/*
2 * kvm_s390.h - definition for kvm on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#ifndef ARCH_S390_KVM_S390_H
15#define ARCH_S390_KVM_S390_H
16
17#include <linux/kvm.h>
18#include <linux/kvm_host.h>
19
20typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
21
22int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
23
24#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
25do { \
26 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
27 d_args); \
28} while (0)
29
30#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
31do { \
32 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
33 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
34 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
35 d_args); \
36} while (0)
37
38static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
39{
40 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
41}
42
43int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
44void kvm_s390_idle_wakeup(unsigned long data);
45void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
46int kvm_s390_inject_vm(struct kvm *kvm,
47 struct kvm_s390_interrupt *s390int);
48int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
49 struct kvm_s390_interrupt *s390int);
50int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
51
52/* implemented in priv.c */
53int kvm_s390_handle_priv(struct kvm_vcpu *vcpu);
54
55/* implemented in sigp.c */
56int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
57
58/* implemented in kvm-s390.c */
59int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
60 unsigned long addr);
61/* implemented in diag.c */
62int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
63
64#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
new file mode 100644
index 000000000000..1465946325c5
--- /dev/null
+++ b/arch/s390/kvm/priv.c
@@ -0,0 +1,323 @@
1/*
2 * priv.c - handling privileged instructions
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
15#include <linux/errno.h>
16#include <asm/current.h>
17#include <asm/debug.h>
18#include <asm/ebcdic.h>
19#include <asm/sysinfo.h>
20#include "gaccess.h"
21#include "kvm-s390.h"
22
23static int handle_set_prefix(struct kvm_vcpu *vcpu)
24{
25 int base2 = vcpu->arch.sie_block->ipb >> 28;
26 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
27 u64 operand2;
28 u32 address = 0;
29 u8 tmp;
30
31 vcpu->stat.instruction_spx++;
32
33 operand2 = disp2;
34 if (base2)
35 operand2 += vcpu->arch.guest_gprs[base2];
36
37 /* must be word boundary */
38 if (operand2 & 3) {
39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
40 goto out;
41 }
42
43 /* get the value */
44 if (get_guest_u32(vcpu, operand2, &address)) {
45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 goto out;
47 }
48
49 address = address & 0x7fffe000u;
50
51 /* make sure that the new value is valid memory */
52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
54 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
55 goto out;
56 }
57
58 vcpu->arch.sie_block->prefix = address;
59 vcpu->arch.sie_block->ihcpu = 0xffff;
60
61 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62out:
63 return 0;
64}
65
66static int handle_store_prefix(struct kvm_vcpu *vcpu)
67{
68 int base2 = vcpu->arch.sie_block->ipb >> 28;
69 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
70 u64 operand2;
71 u32 address;
72
73 vcpu->stat.instruction_stpx++;
74 operand2 = disp2;
75 if (base2)
76 operand2 += vcpu->arch.guest_gprs[base2];
77
78 /* must be word boundary */
79 if (operand2 & 3) {
80 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 goto out;
82 }
83
84 address = vcpu->arch.sie_block->prefix;
85 address = address & 0x7fffe000u;
86
87 /* get the value */
88 if (put_guest_u32(vcpu, operand2, address)) {
89 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90 goto out;
91 }
92
93 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
94out:
95 return 0;
96}
97
98static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
99{
100 int base2 = vcpu->arch.sie_block->ipb >> 28;
101 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
102 u64 useraddr;
103 int rc;
104
105 vcpu->stat.instruction_stap++;
106 useraddr = disp2;
107 if (base2)
108 useraddr += vcpu->arch.guest_gprs[base2];
109
110 if (useraddr & 1) {
111 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
112 goto out;
113 }
114
115 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
116 if (rc == -EFAULT) {
117 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
118 goto out;
119 }
120
121 VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
122out:
123 return 0;
124}
125
126static int handle_skey(struct kvm_vcpu *vcpu)
127{
128 vcpu->stat.instruction_storage_key++;
129 vcpu->arch.sie_block->gpsw.addr -= 4;
130 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
131 return 0;
132}
133
134static int handle_stsch(struct kvm_vcpu *vcpu)
135{
136 vcpu->stat.instruction_stsch++;
137 VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
138 /* condition code 3 */
139 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
140 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
141 return 0;
142}
143
144static int handle_chsc(struct kvm_vcpu *vcpu)
145{
146 vcpu->stat.instruction_chsc++;
147 VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
148 /* condition code 3 */
149 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
150 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
151 return 0;
152}
153
154static unsigned int kvm_stfl(void)
155{
156 asm volatile(
157 " .insn s,0xb2b10000,0(0)\n" /* stfl */
158 "0:\n"
159 EX_TABLE(0b, 0b));
160 return S390_lowcore.stfl_fac_list;
161}
162
163static int handle_stfl(struct kvm_vcpu *vcpu)
164{
165 unsigned int facility_list = kvm_stfl();
166 int rc;
167
168 vcpu->stat.instruction_stfl++;
169 facility_list &= ~(1UL<<24); /* no stfle */
170
171 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
172 &facility_list, sizeof(facility_list));
173 if (rc == -EFAULT)
174 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
175 else
176 VCPU_EVENT(vcpu, 5, "store facility list value %x",
177 facility_list);
178 return 0;
179}
180
181static int handle_stidp(struct kvm_vcpu *vcpu)
182{
183 int base2 = vcpu->arch.sie_block->ipb >> 28;
184 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
185 u64 operand2;
186 int rc;
187
188 vcpu->stat.instruction_stidp++;
189 operand2 = disp2;
190 if (base2)
191 operand2 += vcpu->arch.guest_gprs[base2];
192
193 if (operand2 & 7) {
194 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
195 goto out;
196 }
197
198 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
199 if (rc == -EFAULT) {
200 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
201 goto out;
202 }
203
204 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
205out:
206 return 0;
207}
208
209static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
210{
211 struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
212 int cpus = 0;
213 int n;
214
215 spin_lock_bh(&fi->lock);
216 for (n = 0; n < KVM_MAX_VCPUS; n++)
217 if (fi->local_int[n])
218 cpus++;
219 spin_unlock_bh(&fi->lock);
220
221 /* deal with other level 3 hypervisors */
222 if (stsi(mem, 3, 2, 2) == -ENOSYS)
223 mem->count = 0;
224 if (mem->count < 8)
225 mem->count++;
226 for (n = mem->count - 1; n > 0 ; n--)
227 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
228
229 mem->vm[0].cpus_total = cpus;
230 mem->vm[0].cpus_configured = cpus;
231 mem->vm[0].cpus_standby = 0;
232 mem->vm[0].cpus_reserved = 0;
233 mem->vm[0].caf = 1000;
234 memcpy(mem->vm[0].name, "KVMguest", 8);
235 ASCEBC(mem->vm[0].name, 8);
236 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
237 ASCEBC(mem->vm[0].cpi, 16);
238}
239
240static int handle_stsi(struct kvm_vcpu *vcpu)
241{
242 int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28;
243 int sel1 = vcpu->arch.guest_gprs[0] & 0xff;
244 int sel2 = vcpu->arch.guest_gprs[1] & 0xffff;
245 int base2 = vcpu->arch.sie_block->ipb >> 28;
246 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
247 u64 operand2;
248 unsigned long mem;
249
250 vcpu->stat.instruction_stsi++;
251 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
252
253 operand2 = disp2;
254 if (base2)
255 operand2 += vcpu->arch.guest_gprs[base2];
256
257 if (operand2 & 0xfff && fc > 0)
258 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
259
260 switch (fc) {
261 case 0:
262 vcpu->arch.guest_gprs[0] = 3 << 28;
263 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
264 return 0;
265 case 1: /* same handling for 1 and 2 */
266 case 2:
267 mem = get_zeroed_page(GFP_KERNEL);
268 if (!mem)
269 goto out_fail;
270 if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
271 goto out_mem;
272 break;
273 case 3:
274 if (sel1 != 2 || sel2 != 2)
275 goto out_fail;
276 mem = get_zeroed_page(GFP_KERNEL);
277 if (!mem)
278 goto out_fail;
279 handle_stsi_3_2_2(vcpu, (void *) mem);
280 break;
281 default:
282 goto out_fail;
283 }
284
285 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
286 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
287 goto out_mem;
288 }
289 free_page(mem);
290 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
291 vcpu->arch.guest_gprs[0] = 0;
292 return 0;
293out_mem:
294 free_page(mem);
295out_fail:
296 /* condition code 3 */
297 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
298 return 0;
299}
300
301static intercept_handler_t priv_handlers[256] = {
302 [0x02] = handle_stidp,
303 [0x10] = handle_set_prefix,
304 [0x11] = handle_store_prefix,
305 [0x12] = handle_store_cpu_address,
306 [0x29] = handle_skey,
307 [0x2a] = handle_skey,
308 [0x2b] = handle_skey,
309 [0x34] = handle_stsch,
310 [0x5f] = handle_chsc,
311 [0x7d] = handle_stsi,
312 [0xb1] = handle_stfl,
313};
314
315int kvm_s390_handle_priv(struct kvm_vcpu *vcpu)
316{
317 intercept_handler_t handler;
318
319 handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
320 if (handler)
321 return handler(vcpu);
322 return -ENOTSUPP;
323}
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
new file mode 100644
index 000000000000..934fd6a885f6
--- /dev/null
+++ b/arch/s390/kvm/sie64a.S
@@ -0,0 +1,47 @@
1/*
2 * sie64a.S - low level sie call
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
11 */
12
13#include <linux/errno.h>
14#include <asm/asm-offsets.h>
15
16SP_R5 = 5 * 8 # offset into stackframe
17SP_R6 = 6 * 8
18
19/*
20 * sie64a calling convention:
21 * %r2 pointer to sie control block
22 * %r3 guest register save area
23 */
24 .globl sie64a
25sie64a:
26 lgr %r5,%r3
27 stmg %r5,%r14,SP_R5(%r15) # save register on entry
28 lgr %r14,%r2 # pointer to sie control block
29 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
30sie_inst:
31 sie 0(%r14)
32 lg %r14,SP_R5(%r15)
33 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
34 lghi %r2,0
35 lmg %r6,%r14,SP_R6(%r15)
36 br %r14
37
38sie_err:
39 lg %r14,SP_R5(%r15)
40 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
41 lghi %r2,-EFAULT
42 lmg %r6,%r14,SP_R6(%r15)
43 br %r14
44
45 .section __ex_table,"a"
46 .quad sie_inst,sie_err
47 .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
new file mode 100644
index 000000000000..0a236acfb5f6
--- /dev/null
+++ b/arch/s390/kvm/sigp.c
@@ -0,0 +1,288 @@
1/*
2 * sigp.c - handlinge interprocessor communication
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
15#include <linux/kvm_host.h>
16#include "gaccess.h"
17#include "kvm-s390.h"
18
19/* sigp order codes */
20#define SIGP_SENSE 0x01
21#define SIGP_EXTERNAL_CALL 0x02
22#define SIGP_EMERGENCY 0x03
23#define SIGP_START 0x04
24#define SIGP_STOP 0x05
25#define SIGP_RESTART 0x06
26#define SIGP_STOP_STORE_STATUS 0x09
27#define SIGP_INITIAL_CPU_RESET 0x0b
28#define SIGP_CPU_RESET 0x0c
29#define SIGP_SET_PREFIX 0x0d
30#define SIGP_STORE_STATUS_ADDR 0x0e
31#define SIGP_SET_ARCH 0x12
32
33/* cpu status bits */
34#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
35#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
36#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
37#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
38#define SIGP_STAT_STOPPED 0x00000040UL
39#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
40#define SIGP_STAT_CHECK_STOP 0x00000010UL
41#define SIGP_STAT_INOPERATIVE 0x00000004UL
42#define SIGP_STAT_INVALID_ORDER 0x00000002UL
43#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
44
45
46static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
47{
48 struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
49 int rc;
50
51 if (cpu_addr >= KVM_MAX_VCPUS)
52 return 3; /* not operational */
53
54 spin_lock_bh(&fi->lock);
55 if (fi->local_int[cpu_addr] == NULL)
56 rc = 3; /* not operational */
57 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
58 & CPUSTAT_RUNNING) {
59 *reg &= 0xffffffff00000000UL;
60 rc = 1; /* status stored */
61 } else {
62 *reg &= 0xffffffff00000000UL;
63 *reg |= SIGP_STAT_STOPPED;
64 rc = 1; /* status stored */
65 }
66 spin_unlock_bh(&fi->lock);
67
68 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
69 return rc;
70}
71
72static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
73{
74 struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
75 struct local_interrupt *li;
76 struct interrupt_info *inti;
77 int rc;
78
79 if (cpu_addr >= KVM_MAX_VCPUS)
80 return 3; /* not operational */
81
82 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
83 if (!inti)
84 return -ENOMEM;
85
86 inti->type = KVM_S390_INT_EMERGENCY;
87
88 spin_lock_bh(&fi->lock);
89 li = fi->local_int[cpu_addr];
90 if (li == NULL) {
91 rc = 3; /* not operational */
92 kfree(inti);
93 goto unlock;
94 }
95 spin_lock_bh(&li->lock);
96 list_add_tail(&inti->list, &li->list);
97 atomic_set(&li->active, 1);
98 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
99 if (waitqueue_active(&li->wq))
100 wake_up_interruptible(&li->wq);
101 spin_unlock_bh(&li->lock);
102 rc = 0; /* order accepted */
103unlock:
104 spin_unlock_bh(&fi->lock);
105 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
106 return rc;
107}
108
109static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
110{
111 struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
112 struct local_interrupt *li;
113 struct interrupt_info *inti;
114 int rc;
115
116 if (cpu_addr >= KVM_MAX_VCPUS)
117 return 3; /* not operational */
118
119 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
120 if (!inti)
121 return -ENOMEM;
122
123 inti->type = KVM_S390_SIGP_STOP;
124
125 spin_lock_bh(&fi->lock);
126 li = fi->local_int[cpu_addr];
127 if (li == NULL) {
128 rc = 3; /* not operational */
129 kfree(inti);
130 goto unlock;
131 }
132 spin_lock_bh(&li->lock);
133 list_add_tail(&inti->list, &li->list);
134 atomic_set(&li->active, 1);
135 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
136 if (store)
137 li->action_bits |= ACTION_STORE_ON_STOP;
138 li->action_bits |= ACTION_STOP_ON_STOP;
139 if (waitqueue_active(&li->wq))
140 wake_up_interruptible(&li->wq);
141 spin_unlock_bh(&li->lock);
142 rc = 0; /* order accepted */
143unlock:
144 spin_unlock_bh(&fi->lock);
145 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
146 return rc;
147}
148
149static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
150{
151 int rc;
152
153 switch (parameter & 0xff) {
154 case 0:
155 printk(KERN_WARNING "kvm: request to switch to ESA/390 mode"
156 " not supported");
157 rc = 3; /* not operational */
158 break;
159 case 1:
160 case 2:
161 rc = 0; /* order accepted */
162 break;
163 default:
164 rc = -ENOTSUPP;
165 }
166 return rc;
167}
168
169static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
170 u64 *reg)
171{
172 struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
173 struct local_interrupt *li;
174 struct interrupt_info *inti;
175 int rc;
176 u8 tmp;
177
178 /* make sure that the new value is valid memory */
179 address = address & 0x7fffe000u;
180 if ((copy_from_guest(vcpu, &tmp,
181 (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
182 (copy_from_guest(vcpu, &tmp, (u64) (address +
183 vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
184 *reg |= SIGP_STAT_INVALID_PARAMETER;
185 return 1; /* invalid parameter */
186 }
187
188 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
189 if (!inti)
190 return 2; /* busy */
191
192 spin_lock_bh(&fi->lock);
193 li = fi->local_int[cpu_addr];
194
195 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
196 rc = 1; /* incorrect state */
197 *reg &= SIGP_STAT_INCORRECT_STATE;
198 kfree(inti);
199 goto out_fi;
200 }
201
202 spin_lock_bh(&li->lock);
203 /* cpu must be in stopped state */
204 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
205 rc = 1; /* incorrect state */
206 *reg &= SIGP_STAT_INCORRECT_STATE;
207 kfree(inti);
208 goto out_li;
209 }
210
211 inti->type = KVM_S390_SIGP_SET_PREFIX;
212 inti->prefix.address = address;
213
214 list_add_tail(&inti->list, &li->list);
215 atomic_set(&li->active, 1);
216 if (waitqueue_active(&li->wq))
217 wake_up_interruptible(&li->wq);
218 rc = 0; /* order accepted */
219
220 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
221out_li:
222 spin_unlock_bh(&li->lock);
223out_fi:
224 spin_unlock_bh(&fi->lock);
225 return rc;
226}
227
228int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
229{
230 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
231 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
232 int base2 = vcpu->arch.sie_block->ipb >> 28;
233 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
234 u32 parameter;
235 u16 cpu_addr = vcpu->arch.guest_gprs[r3];
236 u8 order_code;
237 int rc;
238
239 order_code = disp2;
240 if (base2)
241 order_code += vcpu->arch.guest_gprs[base2];
242
243 if (r1 % 2)
244 parameter = vcpu->arch.guest_gprs[r1];
245 else
246 parameter = vcpu->arch.guest_gprs[r1 + 1];
247
248 switch (order_code) {
249 case SIGP_SENSE:
250 vcpu->stat.instruction_sigp_sense++;
251 rc = __sigp_sense(vcpu, cpu_addr,
252 &vcpu->arch.guest_gprs[r1]);
253 break;
254 case SIGP_EMERGENCY:
255 vcpu->stat.instruction_sigp_emergency++;
256 rc = __sigp_emergency(vcpu, cpu_addr);
257 break;
258 case SIGP_STOP:
259 vcpu->stat.instruction_sigp_stop++;
260 rc = __sigp_stop(vcpu, cpu_addr, 0);
261 break;
262 case SIGP_STOP_STORE_STATUS:
263 vcpu->stat.instruction_sigp_stop++;
264 rc = __sigp_stop(vcpu, cpu_addr, 1);
265 break;
266 case SIGP_SET_ARCH:
267 vcpu->stat.instruction_sigp_arch++;
268 rc = __sigp_set_arch(vcpu, parameter);
269 break;
270 case SIGP_SET_PREFIX:
271 vcpu->stat.instruction_sigp_prefix++;
272 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
273 &vcpu->arch.guest_gprs[r1]);
274 break;
275 case SIGP_RESTART:
276 vcpu->stat.instruction_sigp_restart++;
277 /* user space must know about restart */
278 default:
279 return -ENOTSUPP;
280 }
281
282 if (rc < 0)
283 return rc;
284
285 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
286 vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
287 return 0;
288}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index fd072013f88c..5c1aea97cd12 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -30,11 +30,27 @@
30#define TABLES_PER_PAGE 4 30#define TABLES_PER_PAGE 4
31#define FRAG_MASK 15UL 31#define FRAG_MASK 15UL
32#define SECOND_HALVES 10UL 32#define SECOND_HALVES 10UL
33
34void clear_table_pgstes(unsigned long *table)
35{
36 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
37 memset(table + 256, 0, PAGE_SIZE/4);
38 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
39 memset(table + 768, 0, PAGE_SIZE/4);
40}
41
33#else 42#else
34#define ALLOC_ORDER 2 43#define ALLOC_ORDER 2
35#define TABLES_PER_PAGE 2 44#define TABLES_PER_PAGE 2
36#define FRAG_MASK 3UL 45#define FRAG_MASK 3UL
37#define SECOND_HALVES 2UL 46#define SECOND_HALVES 2UL
47
48void clear_table_pgstes(unsigned long *table)
49{
50 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
51 memset(table + 256, 0, PAGE_SIZE/2);
52}
53
38#endif 54#endif
39 55
40unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 56unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -153,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
153 unsigned long *table; 169 unsigned long *table;
154 unsigned long bits; 170 unsigned long bits;
155 171
156 bits = mm->context.noexec ? 3UL : 1UL; 172 bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
157 spin_lock(&mm->page_table_lock); 173 spin_lock(&mm->page_table_lock);
158 page = NULL; 174 page = NULL;
159 if (!list_empty(&mm->context.pgtable_list)) { 175 if (!list_empty(&mm->context.pgtable_list)) {
@@ -170,7 +186,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
170 pgtable_page_ctor(page); 186 pgtable_page_ctor(page);
171 page->flags &= ~FRAG_MASK; 187 page->flags &= ~FRAG_MASK;
172 table = (unsigned long *) page_to_phys(page); 188 table = (unsigned long *) page_to_phys(page);
173 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 189 if (mm->context.pgstes)
190 clear_table_pgstes(table);
191 else
192 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
174 spin_lock(&mm->page_table_lock); 193 spin_lock(&mm->page_table_lock);
175 list_add(&page->lru, &mm->context.pgtable_list); 194 list_add(&page->lru, &mm->context.pgtable_list);
176 } 195 }
@@ -191,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
191 struct page *page; 210 struct page *page;
192 unsigned long bits; 211 unsigned long bits;
193 212
194 bits = mm->context.noexec ? 3UL : 1UL; 213 bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
195 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 214 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
196 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 215 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
197 spin_lock(&mm->page_table_lock); 216 spin_lock(&mm->page_table_lock);
@@ -228,3 +247,43 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
228 mm->context.noexec = 0; 247 mm->context.noexec = 0;
229 update_mm(mm, tsk); 248 update_mm(mm, tsk);
230} 249}
250
251/*
252 * switch on pgstes for its userspace process (for kvm)
253 */
254int s390_enable_sie(void)
255{
256 struct task_struct *tsk = current;
257 struct mm_struct *mm;
258 int rc;
259
260 task_lock(tsk);
261
262 rc = 0;
263 if (tsk->mm->context.pgstes)
264 goto unlock;
265
266 rc = -EINVAL;
267 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
268 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
269 goto unlock;
270
271 tsk->mm->context.pgstes = 1; /* dirty little tricks .. */
272 mm = dup_mm(tsk);
273 tsk->mm->context.pgstes = 0;
274
275 rc = -ENOMEM;
276 if (!mm)
277 goto unlock;
278 mmput(tsk->mm);
279 tsk->mm = tsk->active_mm = mm;
280 preempt_disable();
281 update_mm(mm, tsk);
282 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
283 preempt_enable();
284 rc = 0;
285unlock:
286 task_unlock(tsk);
287 return rc;
288}
289EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index 3fbe69e359ed..5696e7b374b3 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -1,3 +1,10 @@
1
2menu "Host processor type and features"
3
4source "arch/x86/Kconfig.cpu"
5
6endmenu
7
1config UML_X86 8config UML_X86
2 bool 9 bool
3 default y 10 default y
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index f4bd349d4412..f25c29a12d00 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -14,6 +14,7 @@
14#include "os.h" 14#include "os.h"
15#include "um_malloc.h" 15#include "um_malloc.h"
16#include "user.h" 16#include "user.h"
17#include <linux/limits.h>
17 18
18struct helper_data { 19struct helper_data {
19 void (*pre_exec)(void*); 20 void (*pre_exec)(void*);
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 964dc1a04c37..598b5c1903af 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
6 ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ 6 ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
7 sys_call_table.o tls.o 7 sys_call_table.o tls.o
8 8
9subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o 9subarch-obj-y = lib/semaphore_32.o lib/string_32.o
10subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o 10subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
11subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o 11subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o
12 12
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 3c22de532088..c8b4cce9cfe1 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
10 10
11obj-$(CONFIG_MODULES) += um_module.o 11obj-$(CONFIG_MODULES) += um_module.o
12 12
13subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o 13subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
14subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o 14subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o
15 15
16ldt-y = ../sys-i386/ldt.o 16ldt-y = ../sys-i386/ldt.o
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4d350b5cbc71..e5790fe9e330 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -142,6 +142,9 @@ config AUDIT_ARCH
142config ARCH_SUPPORTS_AOUT 142config ARCH_SUPPORTS_AOUT
143 def_bool y 143 def_bool y
144 144
145config ARCH_SUPPORTS_OPTIMIZED_INLINING
146 def_bool y
147
145# Use the generic interrupt handling code in kernel/irq/: 148# Use the generic interrupt handling code in kernel/irq/:
146config GENERIC_HARDIRQS 149config GENERIC_HARDIRQS
147 bool 150 bool
@@ -370,6 +373,25 @@ config VMI
370 at the moment), by linking the kernel to a GPL-ed ROM module 373 at the moment), by linking the kernel to a GPL-ed ROM module
371 provided by the hypervisor. 374 provided by the hypervisor.
372 375
376config KVM_CLOCK
377 bool "KVM paravirtualized clock"
378 select PARAVIRT
379 depends on !(X86_VISWS || X86_VOYAGER)
380 help
381 Turning on this option will allow you to run a paravirtualized clock
382 when running over the KVM hypervisor. Instead of relying on a PIT
383 (or probably other) emulation by the underlying device model, the host
384 provides the guest with timing infrastructure such as time of day, and
385 system time
386
387config KVM_GUEST
388 bool "KVM Guest support"
389 select PARAVIRT
390 depends on !(X86_VISWS || X86_VOYAGER)
391 help
392 This option enables various optimizations for running under the KVM
393 hypervisor.
394
373source "arch/x86/lguest/Kconfig" 395source "arch/x86/lguest/Kconfig"
374 396
375config PARAVIRT 397config PARAVIRT
@@ -1049,9 +1071,9 @@ config MTRR
1049 See <file:Documentation/mtrr.txt> for more information. 1071 See <file:Documentation/mtrr.txt> for more information.
1050 1072
1051config X86_PAT 1073config X86_PAT
1052 def_bool y 1074 bool
1053 prompt "x86 PAT support" 1075 prompt "x86 PAT support"
1054 depends on MTRR && NONPROMISC_DEVMEM 1076 depends on MTRR
1055 help 1077 help
1056 Use PAT attributes to setup page level cache control. 1078 Use PAT attributes to setup page level cache control.
1057 1079
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 57072f2716f9..7ef18b01f0bc 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -21,8 +21,8 @@ config M386
21 21
22 Here are the settings recommended for greatest speed: 22 Here are the settings recommended for greatest speed:
23 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI 23 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
24 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels 24 486DLC/DLC2, and UMC 486SX-S. Only "386" kernels will run on a 386
25 will run on a 386 class machine. 25 class machine.
26 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or 26 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
27 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S. 27 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
28 - "586" for generic Pentium CPUs lacking the TSC 28 - "586" for generic Pentium CPUs lacking the TSC
@@ -278,6 +278,11 @@ config GENERIC_CPU
278 278
279endchoice 279endchoice
280 280
281config X86_CPU
282 def_bool y
283 select GENERIC_FIND_FIRST_BIT
284 select GENERIC_FIND_NEXT_BIT
285
281config X86_GENERIC 286config X86_GENERIC
282 bool "Generic x86 support" 287 bool "Generic x86 support"
283 depends on X86_32 288 depends on X86_32
@@ -398,7 +403,7 @@ config X86_TSC
398# generates cmov. 403# generates cmov.
399config X86_CMOV 404config X86_CMOV
400 def_bool y 405 def_bool y
401 depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7) 406 depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)
402 407
403config X86_MINIMUM_CPU_FAMILY 408config X86_MINIMUM_CPU_FAMILY
404 int 409 int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 239fd9fba0a5..5b1979a45a1e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -257,3 +257,16 @@ config CPA_DEBUG
257 Do change_page_attr() self-tests every 30 seconds. 257 Do change_page_attr() self-tests every 30 seconds.
258 258
259endmenu 259endmenu
260
261config OPTIMIZE_INLINING
262 bool "Allow gcc to uninline functions marked 'inline'"
263 default y
264 help
265 This option determines if the kernel forces gcc to inline the functions
266 developers have marked 'inline'. Doing so takes away freedom from gcc to
267 do what it thinks is best, which is desirable for the gcc 3.x series of
268 compilers. The gcc 4.x series have a rewritten inlining algorithm and
269 disabling this option will generate a smaller kernel there. Hopefully
270 this algorithm is so good that allowing gcc4 to make the decision can
271 become the default in the future, until then this option is there to
272 test gcc for this.
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 6d2df8d61c54..af86e431acfa 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -120,7 +120,7 @@ _start:
120 # Part 2 of the header, from the old setup.S 120 # Part 2 of the header, from the old setup.S
121 121
122 .ascii "HdrS" # header signature 122 .ascii "HdrS" # header signature
123 .word 0x0208 # header version number (>= 0x0105) 123 .word 0x0209 # header version number (>= 0x0105)
124 # or else old loadlin-1.5 will fail) 124 # or else old loadlin-1.5 will fail)
125 .globl realmode_swtch 125 .globl realmode_swtch
126realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 126realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -227,6 +227,10 @@ hardware_subarch_data: .quad 0
227payload_offset: .long input_data 227payload_offset: .long input_data
228payload_length: .long input_data_end-input_data 228payload_length: .long input_data_end-input_data
229 229
230setup_data: .quad 0 # 64-bit physical pointer to
231 # single linked list of
232 # struct setup_data
233
230# End of setup header ##################################################### 234# End of setup header #####################################################
231 235
232 .section ".inittext", "ax" 236 .section ".inittext", "ax"
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 3df340b54e57..ad7ddaaff588 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1421,6 +1421,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
1421# CONFIG_DEBUG_VM is not set 1421# CONFIG_DEBUG_VM is not set
1422# CONFIG_DEBUG_LIST is not set 1422# CONFIG_DEBUG_LIST is not set
1423# CONFIG_FRAME_POINTER is not set 1423# CONFIG_FRAME_POINTER is not set
1424CONFIG_OPTIMIZE_INLINING=y
1424# CONFIG_RCU_TORTURE_TEST is not set 1425# CONFIG_RCU_TORTURE_TEST is not set
1425# CONFIG_LKDTM is not set 1426# CONFIG_LKDTM is not set
1426# CONFIG_FAULT_INJECTION is not set 1427# CONFIG_FAULT_INJECTION is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index eef98cb00c62..2d6f5b2809d2 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1346,6 +1346,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
1346# CONFIG_DEBUG_VM is not set 1346# CONFIG_DEBUG_VM is not set
1347# CONFIG_DEBUG_LIST is not set 1347# CONFIG_DEBUG_LIST is not set
1348# CONFIG_FRAME_POINTER is not set 1348# CONFIG_FRAME_POINTER is not set
1349CONFIG_OPTIMIZE_INLINING=y
1349# CONFIG_RCU_TORTURE_TEST is not set 1350# CONFIG_RCU_TORTURE_TEST is not set
1350# CONFIG_LKDTM is not set 1351# CONFIG_LKDTM is not set
1351# CONFIG_FAULT_INJECTION is not set 1352# CONFIG_FAULT_INJECTION is not set
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 05e155d3fb6c..bbed3a26ce55 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -499,11 +499,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
499 regs->cs = __USER32_CS; 499 regs->cs = __USER32_CS;
500 regs->ss = __USER32_DS; 500 regs->ss = __USER32_DS;
501 501
502 set_fs(USER_DS);
503 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
504 if (test_thread_flag(TIF_SINGLESTEP))
505 ptrace_notify(SIGTRAP);
506
507#if DEBUG_SIG 502#if DEBUG_SIG
508 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", 503 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
509 current->comm, current->pid, frame, regs->ip, frame->pretcode); 504 current->comm, current->pid, frame, regs->ip, frame->pretcode);
@@ -599,11 +594,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
599 regs->cs = __USER32_CS; 594 regs->cs = __USER32_CS;
600 regs->ss = __USER32_DS; 595 regs->ss = __USER32_DS;
601 596
602 set_fs(USER_DS);
603 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
604 if (test_thread_flag(TIF_SINGLESTEP))
605 ptrace_notify(SIGTRAP);
606
607#if DEBUG_SIG 597#if DEBUG_SIG
608 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", 598 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
609 current->comm, current->pid, frame, regs->ip, frame->pretcode); 599 current->comm, current->pid, frame, regs->ip, frame->pretcode);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ae7158bce4d6..b5e329da166c 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -430,7 +430,7 @@ ia32_sys_call_table:
430 .quad sys_setuid16 430 .quad sys_setuid16
431 .quad sys_getuid16 431 .quad sys_getuid16
432 .quad compat_sys_stime /* stime */ /* 25 */ 432 .quad compat_sys_stime /* stime */ /* 25 */
433 .quad sys32_ptrace /* ptrace */ 433 .quad compat_sys_ptrace /* ptrace */
434 .quad sys_alarm 434 .quad sys_alarm
435 .quad sys_fstat /* (old)fstat */ 435 .quad sys_fstat /* (old)fstat */
436 .quad sys_pause 436 .quad sys_pause
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90e092d0af0c..fa19c3819540 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
80obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o 80obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
81 81
82obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o 82obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
83obj-$(CONFIG_KVM_GUEST) += kvm.o
84obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
83obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o 85obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
84 86
85ifdef CONFIG_INPUT_PCSPKR 87ifdef CONFIG_INPUT_PCSPKR
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 057ccf1d5ad4..977ed5cdeaa3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -697,10 +697,6 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
697#define HPET_RESOURCE_NAME_SIZE 9 697#define HPET_RESOURCE_NAME_SIZE 9
698 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); 698 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
699 699
700 if (!hpet_res)
701 return 0;
702
703 memset(hpet_res, 0, sizeof(*hpet_res));
704 hpet_res->name = (void *)&hpet_res[1]; 700 hpet_res->name = (void *)&hpet_res[1];
705 hpet_res->flags = IORESOURCE_MEM; 701 hpet_res->flags = IORESOURCE_MEM;
706 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", 702 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 8317401170b8..4b99b1bdeb6c 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -451,7 +451,8 @@ void __init setup_boot_APIC_clock(void)
451 } 451 }
452 452
453 /* Calculate the scaled math multiplication factor */ 453 /* Calculate the scaled math multiplication factor */
454 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 32); 454 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
455 lapic_clockevent.shift);
455 lapic_clockevent.max_delta_ns = 456 lapic_clockevent.max_delta_ns =
456 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 457 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
457 lapic_clockevent.min_delta_ns = 458 lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index bf83157337e4..5910020c3f24 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -360,7 +360,8 @@ static void __init calibrate_APIC_clock(void)
360 result / 1000 / 1000, result / 1000 % 1000); 360 result / 1000 / 1000, result / 1000 % 1000);
361 361
362 /* Calculate the scaled math multiplication factor */ 362 /* Calculate the scaled math multiplication factor */
363 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); 363 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
364 lapic_clockevent.shift);
364 lapic_clockevent.max_delta_ns = 365 lapic_clockevent.max_delta_ns =
365 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 366 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
366 lapic_clockevent.min_delta_ns = 367 lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f0030a0999c7..e4ea362e8480 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -904,6 +904,7 @@ recalc:
904 original_pm_idle(); 904 original_pm_idle();
905 else 905 else
906 default_idle(); 906 default_idle();
907 local_irq_disable();
907 jiffies_since_last_check = jiffies - last_jiffies; 908 jiffies_since_last_check = jiffies - last_jiffies;
908 if (jiffies_since_last_check > idle_period) 909 if (jiffies_since_last_check > idle_period)
909 goto recalc; 910 goto recalc;
@@ -911,6 +912,8 @@ recalc:
911 912
912 if (apm_idle_done) 913 if (apm_idle_done)
913 apm_do_busy(); 914 apm_do_busy();
915
916 local_irq_enable();
914} 917}
915 918
916/** 919/**
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ee7c45235e54..a0c6f8190887 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_X86_32) += cyrix.o
11obj-$(CONFIG_X86_32) += centaur.o 11obj-$(CONFIG_X86_32) += centaur.o
12obj-$(CONFIG_X86_32) += transmeta.o 12obj-$(CONFIG_X86_32) += transmeta.o
13obj-$(CONFIG_X86_32) += intel.o 13obj-$(CONFIG_X86_32) += intel.o
14obj-$(CONFIG_X86_32) += nexgen.o
15obj-$(CONFIG_X86_32) += umc.o 14obj-$(CONFIG_X86_32) += umc.o
16 15
17obj-$(CONFIG_X86_MCE) += mcheck/ 16obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0173065dc3b7..245866828294 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -343,10 +343,4 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
343 .c_size_cache = amd_size_cache, 343 .c_size_cache = amd_size_cache,
344}; 344};
345 345
346int __init amd_init_cpu(void)
347{
348 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
349 return 0;
350}
351
352cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 346cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 9a699ed03598..e07e8c068ae0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -49,7 +49,7 @@ static int banks;
49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; 49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50static unsigned long notify_user; 50static unsigned long notify_user;
51static int rip_msr; 51static int rip_msr;
52static int mce_bootlog = 1; 52static int mce_bootlog = -1;
53static atomic_t mce_events; 53static atomic_t mce_events;
54 54
55static char trigger[128]; 55static char trigger[128];
@@ -471,13 +471,15 @@ static void mce_init(void *dummy)
471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472{ 472{
473 /* This should be disabled by the BIOS, but isn't always */ 473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 474 if (c->x86_vendor == X86_VENDOR_AMD) {
475 /* disable GART TBL walk error reporting, which trips off 475 if(c->x86 == 15)
476 incorrectly with the IOMMU & 3ware & Cerberus. */ 476 /* disable GART TBL walk error reporting, which trips off
477 clear_bit(10, &bank[4]); 477 incorrectly with the IOMMU & 3ware & Cerberus. */
478 /* Lots of broken BIOS around that don't clear them 478 clear_bit(10, &bank[4]);
479 by default and leave crap in there. Don't log. */ 479 if(c->x86 <= 17 && mce_bootlog < 0)
480 mce_bootlog = 0; 480 /* Lots of broken BIOS around that don't clear them
481 by default and leave crap in there. Don't log. */
482 mce_bootlog = 0;
481 } 483 }
482 484
483} 485}
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
deleted file mode 100644
index 5d5e1c134123..000000000000
--- a/arch/x86/kernel/cpu/nexgen.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/string.h>
4#include <asm/processor.h>
5
6#include "cpu.h"
7
8/*
9 * Detect a NexGen CPU running without BIOS hypercode new enough
10 * to have CPUID. (Thanks to Herbert Oppmann)
11 */
12
13static int __cpuinit deep_magic_nexgen_probe(void)
14{
15 int ret;
16
17 __asm__ __volatile__ (
18 " movw $0x5555, %%ax\n"
19 " xorw %%dx,%%dx\n"
20 " movw $2, %%cx\n"
21 " divw %%cx\n"
22 " movl $0, %%eax\n"
23 " jnz 1f\n"
24 " movl $1, %%eax\n"
25 "1:\n"
26 : "=a" (ret) : : "cx", "dx");
27 return ret;
28}
29
30static void __cpuinit init_nexgen(struct cpuinfo_x86 *c)
31{
32 c->x86_cache_size = 256; /* A few had 1 MB... */
33}
34
35static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
36{
37 /* Detect NexGen with old hypercode */
38 if (deep_magic_nexgen_probe())
39 strcpy(c->x86_vendor_id, "NexGenDriven");
40}
41
42static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
43 .c_vendor = "Nexgen",
44 .c_ident = { "NexGenDriven" },
45 .c_models = {
46 { .vendor = X86_VENDOR_NEXGEN,
47 .family = 5,
48 .model_names = { [1] = "Nx586" }
49 },
50 },
51 .c_init = init_nexgen,
52 .c_identify = nexgen_identify,
53};
54
55int __init nexgen_init_cpu(void)
56{
57 cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
58 return 0;
59}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index b943e10ad814..f9ae93adffe5 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -614,16 +614,6 @@ static struct wd_ops intel_arch_wd_ops __read_mostly = {
614 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, 614 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
615}; 615};
616 616
617static struct wd_ops coreduo_wd_ops = {
618 .reserve = single_msr_reserve,
619 .unreserve = single_msr_unreserve,
620 .setup = setup_intel_arch_watchdog,
621 .rearm = p6_rearm,
622 .stop = single_msr_stop_watchdog,
623 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
624 .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
625};
626
627static void probe_nmi_watchdog(void) 617static void probe_nmi_watchdog(void)
628{ 618{
629 switch (boot_cpu_data.x86_vendor) { 619 switch (boot_cpu_data.x86_vendor) {
@@ -637,8 +627,8 @@ static void probe_nmi_watchdog(void)
637 /* Work around Core Duo (Yonah) errata AE49 where perfctr1 627 /* Work around Core Duo (Yonah) errata AE49 where perfctr1
638 doesn't have a working enable bit. */ 628 doesn't have a working enable bit. */
639 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { 629 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
640 wd_ops = &coreduo_wd_ops; 630 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
641 break; 631 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
642 } 632 }
643 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 633 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
644 wd_ops = &intel_arch_wd_ops; 634 wd_ops = &intel_arch_wd_ops;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 2251d0ae9570..268553817909 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -25,6 +25,7 @@
25#include <asm/hpet.h> 25#include <asm/hpet.h>
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <asm/smp.h> 27#include <asm/smp.h>
28#include <asm/reboot.h>
28 29
29#include <mach_ipi.h> 30#include <mach_ipi.h>
30 31
@@ -117,7 +118,7 @@ static void nmi_shootdown_cpus(void)
117} 118}
118#endif 119#endif
119 120
120void machine_crash_shutdown(struct pt_regs *regs) 121void native_machine_crash_shutdown(struct pt_regs *regs)
121{ 122{
122 /* This function is only called after the system 123 /* This function is only called after the system
123 * has panicked or is otherwise in a critical state. 124 * has panicked or is otherwise in a critical state.
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index cbd42e51cb08..645ee5e32a27 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -84,14 +84,41 @@ void __init reserve_early(unsigned long start, unsigned long end, char *name)
84 strncpy(r->name, name, sizeof(r->name) - 1); 84 strncpy(r->name, name, sizeof(r->name) - 1);
85} 85}
86 86
87void __init early_res_to_bootmem(void) 87void __init free_early(unsigned long start, unsigned long end)
88{
89 struct early_res *r;
90 int i, j;
91
92 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
93 r = &early_res[i];
94 if (start == r->start && end == r->end)
95 break;
96 }
97 if (i >= MAX_EARLY_RES || !early_res[i].end)
98 panic("free_early on not reserved area: %lx-%lx!", start, end);
99
100 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
101 ;
102
103 memcpy(&early_res[i], &early_res[i + 1],
104 (j - 1 - i) * sizeof(struct early_res));
105
106 early_res[j - 1].end = 0;
107}
108
109void __init early_res_to_bootmem(unsigned long start, unsigned long end)
88{ 110{
89 int i; 111 int i;
112 unsigned long final_start, final_end;
90 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 113 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
91 struct early_res *r = &early_res[i]; 114 struct early_res *r = &early_res[i];
92 printk(KERN_INFO "early res: %d [%lx-%lx] %s\n", i, 115 final_start = max(start, r->start);
93 r->start, r->end - 1, r->name); 116 final_end = min(end, r->end);
94 reserve_bootmem_generic(r->start, r->end - r->start); 117 if (final_start >= final_end)
118 continue;
119 printk(KERN_INFO " early res: %d [%lx-%lx] %s\n", i,
120 final_start, final_end - 1, r->name);
121 reserve_bootmem_generic(final_start, final_end - final_start);
95 } 122 }
96} 123}
97 124
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 9546ef408b92..021624c83583 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
51 else 51 else
52#endif 52#endif
53 53
54 if (cpus_weight(cpu_possible_map) <= 8) 54 if (num_possible_cpus() <= 8)
55 genapic = &apic_flat; 55 genapic = &apic_flat;
56 else 56 else
57 genapic = &apic_physflat; 57 genapic = &apic_physflat;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 993c76773256..e25c57b8aa84 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/start_kernel.h> 13#include <linux/start_kernel.h>
14#include <linux/io.h>
14 15
15#include <asm/processor.h> 16#include <asm/processor.h>
16#include <asm/proto.h> 17#include <asm/proto.h>
@@ -22,6 +23,7 @@
22#include <asm/sections.h> 23#include <asm/sections.h>
23#include <asm/kdebug.h> 24#include <asm/kdebug.h>
24#include <asm/e820.h> 25#include <asm/e820.h>
26#include <asm/bios_ebda.h>
25 27
26static void __init zap_identity_mappings(void) 28static void __init zap_identity_mappings(void)
27{ 29{
@@ -49,7 +51,6 @@ static void __init copy_bootdata(char *real_mode_data)
49 } 51 }
50} 52}
51 53
52#define BIOS_EBDA_SEGMENT 0x40E
53#define BIOS_LOWMEM_KILOBYTES 0x413 54#define BIOS_LOWMEM_KILOBYTES 0x413
54 55
55/* 56/*
@@ -80,8 +81,7 @@ static void __init reserve_ebda_region(void)
80 lowmem <<= 10; 81 lowmem <<= 10;
81 82
82 /* start of EBDA area */ 83 /* start of EBDA area */
83 ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); 84 ebda_addr = get_bios_ebda();
84 ebda_addr <<= 4;
85 85
86 /* Fixup: bios puts an EBDA in the top 64K segment */ 86 /* Fixup: bios puts an EBDA in the top 64K segment */
87 /* of conventional memory, but does not adjust lowmem. */ 87 /* of conventional memory, but does not adjust lowmem. */
@@ -101,6 +101,24 @@ static void __init reserve_ebda_region(void)
101 reserve_early(lowmem, 0x100000, "BIOS reserved"); 101 reserve_early(lowmem, 0x100000, "BIOS reserved");
102} 102}
103 103
104static void __init reserve_setup_data(void)
105{
106 struct setup_data *data;
107 unsigned long pa_data;
108 char buf[32];
109
110 if (boot_params.hdr.version < 0x0209)
111 return;
112 pa_data = boot_params.hdr.setup_data;
113 while (pa_data) {
114 data = early_ioremap(pa_data, sizeof(*data));
115 sprintf(buf, "setup data %x", data->type);
116 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
117 pa_data = data->next;
118 early_iounmap(data, sizeof(*data));
119 }
120}
121
104void __init x86_64_start_kernel(char * real_mode_data) 122void __init x86_64_start_kernel(char * real_mode_data)
105{ 123{
106 int i; 124 int i;
@@ -157,6 +175,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
157#endif 175#endif
158 176
159 reserve_ebda_region(); 177 reserve_ebda_region();
178 reserve_setup_data();
160 179
161 /* 180 /*
162 * At this point everything still needed from the boot loader 181 * At this point everything still needed from the boot loader
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 36652ea1a265..9007f9ea64ee 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -218,7 +218,7 @@ static void hpet_legacy_clockevent_register(void)
218 hpet_freq = 1000000000000000ULL; 218 hpet_freq = 1000000000000000ULL;
219 do_div(hpet_freq, hpet_period); 219 do_div(hpet_freq, hpet_period);
220 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, 220 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
221 NSEC_PER_SEC, 32); 221 NSEC_PER_SEC, hpet_clockevent.shift);
222 /* Calculate the min / max delta */ 222 /* Calculate the min / max delta */
223 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 223 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
224 &hpet_clockevent); 224 &hpet_clockevent);
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 8540abe86ade..c1b5e3ece1f2 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -115,7 +115,8 @@ void __init setup_pit_timer(void)
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32); 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift);
119 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
120 clockevent_delta2ns(0x7FFF, &pit_clockevent); 121 clockevent_delta2ns(0x7FFF, &pit_clockevent);
121 pit_clockevent.min_delta_ns = 122 pit_clockevent.min_delta_ns =
@@ -224,7 +225,8 @@ static int __init init_pit_clocksource(void)
224 pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC) 225 pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
225 return 0; 226 return 0;
226 227
227 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); 228 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE,
229 clocksource_pit.shift);
228 return clocksource_register(&clocksource_pit); 230 return clocksource_register(&clocksource_pit);
229} 231}
230arch_initcall(init_pit_clocksource); 232arch_initcall(init_pit_clocksource);
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 2e2f42074e18..696b8e4e66bb 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -2068,7 +2068,7 @@ static void __init setup_nmi(void)
2068 * cycles as some i82489DX-based boards have glue logic that keeps the 2068 * cycles as some i82489DX-based boards have glue logic that keeps the
2069 * 8259A interrupt line asserted until INTA. --macro 2069 * 8259A interrupt line asserted until INTA. --macro
2070 */ 2070 */
2071static inline void unlock_ExtINT_logic(void) 2071static inline void __init unlock_ExtINT_logic(void)
2072{ 2072{
2073 int apic, pin, i; 2073 int apic, pin, i;
2074 struct IO_APIC_route_entry entry0, entry1; 2074 struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 9ba11d07920f..ef1a8dfcc529 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1599,7 +1599,7 @@ static void __init setup_nmi(void)
1599 * cycles as some i82489DX-based boards have glue logic that keeps the 1599 * cycles as some i82489DX-based boards have glue logic that keeps the
1600 * 8259A interrupt line asserted until INTA. --macro 1600 * 8259A interrupt line asserted until INTA. --macro
1601 */ 1601 */
1602static inline void unlock_ExtINT_logic(void) 1602static inline void __init unlock_ExtINT_logic(void)
1603{ 1603{
1604 int apic, pin, i; 1604 int apic, pin, i;
1605 struct IO_APIC_route_entry entry0, entry1; 1605 struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 6ea67b76a214..00bda7bcda63 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -134,7 +134,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
134 : "=a" (arg1), "=d" (arg2), "=b" (bx) 134 : "=a" (arg1), "=d" (arg2), "=b" (bx)
135 : "0" (irq), "1" (desc), "2" (isp), 135 : "0" (irq), "1" (desc), "2" (isp),
136 "D" (desc->handle_irq) 136 "D" (desc->handle_irq)
137 : "memory", "cc" 137 : "memory", "cc", "ecx"
138 ); 138 );
139 } else 139 } else
140#endif 140#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 73354302fda7..c03205991718 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -6,23 +6,171 @@
6 * 6 *
7 * This file is released under the GPLv2. 7 * This file is released under the GPLv2.
8 */ 8 */
9
10#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/stat.h> 11#include <linux/stat.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/io.h>
14#include <linux/mm.h>
13 15
14#include <asm/setup.h> 16#include <asm/setup.h>
15 17
16#ifdef CONFIG_DEBUG_BOOT_PARAMS 18#ifdef CONFIG_DEBUG_BOOT_PARAMS
19struct setup_data_node {
20 u64 paddr;
21 u32 type;
22 u32 len;
23};
24
25static ssize_t
26setup_data_read(struct file *file, char __user *user_buf, size_t count,
27 loff_t *ppos)
28{
29 struct setup_data_node *node = file->private_data;
30 unsigned long remain;
31 loff_t pos = *ppos;
32 struct page *pg;
33 void *p;
34 u64 pa;
35
36 if (pos < 0)
37 return -EINVAL;
38 if (pos >= node->len)
39 return 0;
40
41 if (count > node->len - pos)
42 count = node->len - pos;
43 pa = node->paddr + sizeof(struct setup_data) + pos;
44 pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
45 if (PageHighMem(pg)) {
46 p = ioremap_cache(pa, count);
47 if (!p)
48 return -ENXIO;
49 } else {
50 p = __va(pa);
51 }
52
53 remain = copy_to_user(user_buf, p, count);
54
55 if (PageHighMem(pg))
56 iounmap(p);
57
58 if (remain)
59 return -EFAULT;
60
61 *ppos = pos + count;
62
63 return count;
64}
65
66static int setup_data_open(struct inode *inode, struct file *file)
67{
68 file->private_data = inode->i_private;
69 return 0;
70}
71
72static const struct file_operations fops_setup_data = {
73 .read = setup_data_read,
74 .open = setup_data_open,
75};
76
77static int __init
78create_setup_data_node(struct dentry *parent, int no,
79 struct setup_data_node *node)
80{
81 struct dentry *d, *type, *data;
82 char buf[16];
83 int error;
84
85 sprintf(buf, "%d", no);
86 d = debugfs_create_dir(buf, parent);
87 if (!d) {
88 error = -ENOMEM;
89 goto err_return;
90 }
91 type = debugfs_create_x32("type", S_IRUGO, d, &node->type);
92 if (!type) {
93 error = -ENOMEM;
94 goto err_dir;
95 }
96 data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data);
97 if (!data) {
98 error = -ENOMEM;
99 goto err_type;
100 }
101 return 0;
102
103err_type:
104 debugfs_remove(type);
105err_dir:
106 debugfs_remove(d);
107err_return:
108 return error;
109}
110
111static int __init create_setup_data_nodes(struct dentry *parent)
112{
113 struct setup_data_node *node;
114 struct setup_data *data;
115 int error, no = 0;
116 struct dentry *d;
117 struct page *pg;
118 u64 pa_data;
119
120 d = debugfs_create_dir("setup_data", parent);
121 if (!d) {
122 error = -ENOMEM;
123 goto err_return;
124 }
125
126 pa_data = boot_params.hdr.setup_data;
127
128 while (pa_data) {
129 node = kmalloc(sizeof(*node), GFP_KERNEL);
130 if (!node) {
131 error = -ENOMEM;
132 goto err_dir;
133 }
134 pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
135 if (PageHighMem(pg)) {
136 data = ioremap_cache(pa_data, sizeof(*data));
137 if (!data) {
138 error = -ENXIO;
139 goto err_dir;
140 }
141 } else {
142 data = __va(pa_data);
143 }
144
145 node->paddr = pa_data;
146 node->type = data->type;
147 node->len = data->len;
148 error = create_setup_data_node(d, no, node);
149 pa_data = data->next;
150
151 if (PageHighMem(pg))
152 iounmap(data);
153 if (error)
154 goto err_dir;
155 no++;
156 }
157 return 0;
158
159err_dir:
160 debugfs_remove(d);
161err_return:
162 return error;
163}
164
17static struct debugfs_blob_wrapper boot_params_blob = { 165static struct debugfs_blob_wrapper boot_params_blob = {
18 .data = &boot_params, 166 .data = &boot_params,
19 .size = sizeof(boot_params), 167 .size = sizeof(boot_params),
20}; 168};
21 169
22static int __init boot_params_kdebugfs_init(void) 170static int __init boot_params_kdebugfs_init(void)
23{ 171{
24 int error;
25 struct dentry *dbp, *version, *data; 172 struct dentry *dbp, *version, *data;
173 int error;
26 174
27 dbp = debugfs_create_dir("boot_params", NULL); 175 dbp = debugfs_create_dir("boot_params", NULL);
28 if (!dbp) { 176 if (!dbp) {
@@ -41,7 +189,13 @@ static int __init boot_params_kdebugfs_init(void)
41 error = -ENOMEM; 189 error = -ENOMEM;
42 goto err_version; 190 goto err_version;
43 } 191 }
192 error = create_setup_data_nodes(dbp);
193 if (error)
194 goto err_data;
44 return 0; 195 return 0;
196
197err_data:
198 debugfs_remove(data);
45err_version: 199err_version:
46 debugfs_remove(version); 200 debugfs_remove(version);
47err_dir: 201err_dir:
@@ -61,5 +215,4 @@ static int __init arch_kdebugfs_init(void)
61 215
62 return error; 216 return error;
63} 217}
64
65arch_initcall(arch_kdebugfs_init); 218arch_initcall(arch_kdebugfs_init);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
new file mode 100644
index 000000000000..8b7a3cf37d2b
--- /dev/null
+++ b/arch/x86/kernel/kvm.c
@@ -0,0 +1,248 @@
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/kvm_para.h>
26#include <linux/cpu.h>
27#include <linux/mm.h>
28#include <linux/highmem.h>
29#include <linux/hardirq.h>
30
31#define MMU_QUEUE_SIZE 1024
32
33struct kvm_para_state {
34 u8 mmu_queue[MMU_QUEUE_SIZE];
35 int mmu_queue_len;
36 enum paravirt_lazy_mode mode;
37};
38
39static DEFINE_PER_CPU(struct kvm_para_state, para_state);
40
41static struct kvm_para_state *kvm_para_state(void)
42{
43 return &per_cpu(para_state, raw_smp_processor_id());
44}
45
46/*
47 * No need for any "IO delay" on KVM
48 */
49static void kvm_io_delay(void)
50{
51}
52
53static void kvm_mmu_op(void *buffer, unsigned len)
54{
55 int r;
56 unsigned long a1, a2;
57
58 do {
59 a1 = __pa(buffer);
60 a2 = 0; /* on i386 __pa() always returns <4G */
61 r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
62 buffer += r;
63 len -= r;
64 } while (len);
65}
66
67static void mmu_queue_flush(struct kvm_para_state *state)
68{
69 if (state->mmu_queue_len) {
70 kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
71 state->mmu_queue_len = 0;
72 }
73}
74
75static void kvm_deferred_mmu_op(void *buffer, int len)
76{
77 struct kvm_para_state *state = kvm_para_state();
78
79 if (state->mode != PARAVIRT_LAZY_MMU) {
80 kvm_mmu_op(buffer, len);
81 return;
82 }
83 if (state->mmu_queue_len + len > sizeof state->mmu_queue)
84 mmu_queue_flush(state);
85 memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
86 state->mmu_queue_len += len;
87}
88
89static void kvm_mmu_write(void *dest, u64 val)
90{
91 __u64 pte_phys;
92 struct kvm_mmu_op_write_pte wpte;
93
94#ifdef CONFIG_HIGHPTE
95 struct page *page;
96 unsigned long dst = (unsigned long) dest;
97
98 page = kmap_atomic_to_page(dest);
99 pte_phys = page_to_pfn(page);
100 pte_phys <<= PAGE_SHIFT;
101 pte_phys += (dst & ~(PAGE_MASK));
102#else
103 pte_phys = (unsigned long)__pa(dest);
104#endif
105 wpte.header.op = KVM_MMU_OP_WRITE_PTE;
106 wpte.pte_val = val;
107 wpte.pte_phys = pte_phys;
108
109 kvm_deferred_mmu_op(&wpte, sizeof wpte);
110}
111
112/*
113 * We only need to hook operations that are MMU writes. We hook these so that
114 * we can use lazy MMU mode to batch these operations. We could probably
115 * improve the performance of the host code if we used some of the information
116 * here to simplify processing of batched writes.
117 */
118static void kvm_set_pte(pte_t *ptep, pte_t pte)
119{
120 kvm_mmu_write(ptep, pte_val(pte));
121}
122
123static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
124 pte_t *ptep, pte_t pte)
125{
126 kvm_mmu_write(ptep, pte_val(pte));
127}
128
129static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
130{
131 kvm_mmu_write(pmdp, pmd_val(pmd));
132}
133
134#if PAGETABLE_LEVELS >= 3
135#ifdef CONFIG_X86_PAE
136static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
137{
138 kvm_mmu_write(ptep, pte_val(pte));
139}
140
141static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
142 pte_t *ptep, pte_t pte)
143{
144 kvm_mmu_write(ptep, pte_val(pte));
145}
146
147static void kvm_pte_clear(struct mm_struct *mm,
148 unsigned long addr, pte_t *ptep)
149{
150 kvm_mmu_write(ptep, 0);
151}
152
153static void kvm_pmd_clear(pmd_t *pmdp)
154{
155 kvm_mmu_write(pmdp, 0);
156}
157#endif
158
159static void kvm_set_pud(pud_t *pudp, pud_t pud)
160{
161 kvm_mmu_write(pudp, pud_val(pud));
162}
163
164#if PAGETABLE_LEVELS == 4
165static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
166{
167 kvm_mmu_write(pgdp, pgd_val(pgd));
168}
169#endif
170#endif /* PAGETABLE_LEVELS >= 3 */
171
172static void kvm_flush_tlb(void)
173{
174 struct kvm_mmu_op_flush_tlb ftlb = {
175 .header.op = KVM_MMU_OP_FLUSH_TLB,
176 };
177
178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
179}
180
181static void kvm_release_pt(u32 pfn)
182{
183 struct kvm_mmu_op_release_pt rpt = {
184 .header.op = KVM_MMU_OP_RELEASE_PT,
185 .pt_phys = (u64)pfn << PAGE_SHIFT,
186 };
187
188 kvm_mmu_op(&rpt, sizeof rpt);
189}
190
191static void kvm_enter_lazy_mmu(void)
192{
193 struct kvm_para_state *state = kvm_para_state();
194
195 paravirt_enter_lazy_mmu();
196 state->mode = paravirt_get_lazy_mode();
197}
198
199static void kvm_leave_lazy_mmu(void)
200{
201 struct kvm_para_state *state = kvm_para_state();
202
203 mmu_queue_flush(state);
204 paravirt_leave_lazy(paravirt_get_lazy_mode());
205 state->mode = paravirt_get_lazy_mode();
206}
207
208static void paravirt_ops_setup(void)
209{
210 pv_info.name = "KVM";
211 pv_info.paravirt_enabled = 1;
212
213 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
214 pv_cpu_ops.io_delay = kvm_io_delay;
215
216 if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
217 pv_mmu_ops.set_pte = kvm_set_pte;
218 pv_mmu_ops.set_pte_at = kvm_set_pte_at;
219 pv_mmu_ops.set_pmd = kvm_set_pmd;
220#if PAGETABLE_LEVELS >= 3
221#ifdef CONFIG_X86_PAE
222 pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
223 pv_mmu_ops.set_pte_present = kvm_set_pte_present;
224 pv_mmu_ops.pte_clear = kvm_pte_clear;
225 pv_mmu_ops.pmd_clear = kvm_pmd_clear;
226#endif
227 pv_mmu_ops.set_pud = kvm_set_pud;
228#if PAGETABLE_LEVELS == 4
229 pv_mmu_ops.set_pgd = kvm_set_pgd;
230#endif
231#endif
232 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
233 pv_mmu_ops.release_pte = kvm_release_pt;
234 pv_mmu_ops.release_pmd = kvm_release_pt;
235 pv_mmu_ops.release_pud = kvm_release_pt;
236
237 pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
238 pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
239 }
240}
241
242void __init kvm_guest_init(void)
243{
244 if (!kvm_para_available())
245 return;
246
247 paravirt_ops_setup();
248}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
new file mode 100644
index 000000000000..ddee04043aeb
--- /dev/null
+++ b/arch/x86/kernel/kvmclock.c
@@ -0,0 +1,187 @@
1/* KVM paravirtual clock driver. A clocksource implementation
2 Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17*/
18
19#include <linux/clocksource.h>
20#include <linux/kvm_para.h>
21#include <asm/arch_hooks.h>
22#include <asm/msr.h>
23#include <asm/apic.h>
24#include <linux/percpu.h>
25#include <asm/reboot.h>
26
27#define KVM_SCALE 22
28
29static int kvmclock = 1;
30
31static int parse_no_kvmclock(char *arg)
32{
33 kvmclock = 0;
34 return 0;
35}
36early_param("no-kvmclock", parse_no_kvmclock);
37
38/* The hypervisor will put information about time periodically here */
39static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
40#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
41
42static inline u64 kvm_get_delta(u64 last_tsc)
43{
44 int cpu = smp_processor_id();
45 u64 delta = native_read_tsc() - last_tsc;
46 return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
47}
48
49static struct kvm_wall_clock wall_clock;
50static cycle_t kvm_clock_read(void);
51/*
52 * The wallclock is the time of day when we booted. Since then, some time may
53 * have elapsed since the hypervisor wrote the data. So we try to account for
54 * that with system time
55 */
56unsigned long kvm_get_wallclock(void)
57{
58 u32 wc_sec, wc_nsec;
59 u64 delta;
60 struct timespec ts;
61 int version, nsec;
62 int low, high;
63
64 low = (int)__pa(&wall_clock);
65 high = ((u64)__pa(&wall_clock) >> 32);
66
67 delta = kvm_clock_read();
68
69 native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
70 do {
71 version = wall_clock.wc_version;
72 rmb();
73 wc_sec = wall_clock.wc_sec;
74 wc_nsec = wall_clock.wc_nsec;
75 rmb();
76 } while ((wall_clock.wc_version != version) || (version & 1));
77
78 delta = kvm_clock_read() - delta;
79 delta += wc_nsec;
80 nsec = do_div(delta, NSEC_PER_SEC);
81 set_normalized_timespec(&ts, wc_sec + delta, nsec);
82 /*
83 * Of all mechanisms of time adjustment I've tested, this one
84 * was the champion!
85 */
86 return ts.tv_sec + 1;
87}
88
89int kvm_set_wallclock(unsigned long now)
90{
91 return 0;
92}
93
94/*
95 * This is our read_clock function. The host puts an tsc timestamp each time
96 * it updates a new time. Without the tsc adjustment, we can have a situation
97 * in which a vcpu starts to run earlier (smaller system_time), but probes
98 * time later (compared to another vcpu), leading to backwards time
99 */
100static cycle_t kvm_clock_read(void)
101{
102 u64 last_tsc, now;
103 int cpu;
104
105 preempt_disable();
106 cpu = smp_processor_id();
107
108 last_tsc = get_clock(cpu, tsc_timestamp);
109 now = get_clock(cpu, system_time);
110
111 now += kvm_get_delta(last_tsc);
112 preempt_enable();
113
114 return now;
115}
116static struct clocksource kvm_clock = {
117 .name = "kvm-clock",
118 .read = kvm_clock_read,
119 .rating = 400,
120 .mask = CLOCKSOURCE_MASK(64),
121 .mult = 1 << KVM_SCALE,
122 .shift = KVM_SCALE,
123 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
124};
125
126static int kvm_register_clock(void)
127{
128 int cpu = smp_processor_id();
129 int low, high;
130 low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
131 high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
132
133 return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
134}
135
136static void kvm_setup_secondary_clock(void)
137{
138 /*
139 * Now that the first cpu already had this clocksource initialized,
140 * we shouldn't fail.
141 */
142 WARN_ON(kvm_register_clock());
143 /* ok, done with our trickery, call native */
144 setup_secondary_APIC_clock();
145}
146
147/*
148 * After the clock is registered, the host will keep writing to the
149 * registered memory location. If the guest happens to shutdown, this memory
150 * won't be valid. In cases like kexec, in which you install a new kernel, this
151 * means a random memory location will be kept being written. So before any
152 * kind of shutdown from our side, we unregister the clock by writting anything
153 * that does not have the 'enable' bit set in the msr
154 */
155#ifdef CONFIG_KEXEC
156static void kvm_crash_shutdown(struct pt_regs *regs)
157{
158 native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
159 native_machine_crash_shutdown(regs);
160}
161#endif
162
163static void kvm_shutdown(void)
164{
165 native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
166 native_machine_shutdown();
167}
168
169void __init kvmclock_init(void)
170{
171 if (!kvm_para_available())
172 return;
173
174 if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
175 if (kvm_register_clock())
176 return;
177 pv_time_ops.get_wallclock = kvm_get_wallclock;
178 pv_time_ops.set_wallclock = kvm_set_wallclock;
179 pv_time_ops.sched_clock = kvm_clock_read;
180 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
181 machine_ops.shutdown = kvm_shutdown;
182#ifdef CONFIG_KEXEC
183 machine_ops.crash_shutdown = kvm_crash_shutdown;
184#endif
185 clocksource_register(&kvm_clock);
186 }
187}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index b402c0f3f192..cfc2648d25ff 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -364,7 +364,8 @@ int __init mfgpt_timer_setup(void)
364 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val); 364 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
365 365
366 /* Set up the clock event */ 366 /* Set up the clock event */
367 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32); 367 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
368 mfgpt_clockevent.shift);
368 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF, 369 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
369 &mfgpt_clockevent); 370 &mfgpt_clockevent);
370 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, 371 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 70744e344fa1..3e2c54dc8b29 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -686,13 +686,11 @@ void __init get_smp_config(void)
686static int __init smp_scan_config(unsigned long base, unsigned long length, 686static int __init smp_scan_config(unsigned long base, unsigned long length,
687 unsigned reserve) 687 unsigned reserve)
688{ 688{
689 extern void __bad_mpf_size(void);
690 unsigned int *bp = phys_to_virt(base); 689 unsigned int *bp = phys_to_virt(base);
691 struct intel_mp_floating *mpf; 690 struct intel_mp_floating *mpf;
692 691
693 Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); 692 Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length);
694 if (sizeof(*mpf) != 16) 693 BUILD_BUG_ON(sizeof(*mpf) != 16);
695 __bad_mpf_size();
696 694
697 while (length > 0) { 695 while (length > 0) {
698 mpf = (struct intel_mp_floating *)bp; 696 mpf = (struct intel_mp_floating *)bp;
@@ -801,7 +799,6 @@ void __init find_smp_config(void)
801#ifdef CONFIG_X86_IO_APIC 799#ifdef CONFIG_X86_IO_APIC
802 800
803#define MP_ISA_BUS 0 801#define MP_ISA_BUS 0
804#define MP_MAX_IOAPIC_PIN 127
805 802
806extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; 803extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
807 804
@@ -820,7 +817,7 @@ static int mp_find_ioapic(int gsi)
820 return -1; 817 return -1;
821} 818}
822 819
823static u8 uniq_ioapic_id(u8 id) 820static u8 __init uniq_ioapic_id(u8 id)
824{ 821{
825#ifdef CONFIG_X86_32 822#ifdef CONFIG_X86_32
826 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 823 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
@@ -909,14 +906,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
909 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ 906 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
910 intsrc.mpc_dstirq = pin; /* INTIN# */ 907 intsrc.mpc_dstirq = pin; /* INTIN# */
911 908
912 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 909 MP_intsrc_info(&intsrc);
913 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
914 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
915 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
916
917 mp_irqs[mp_irq_entries] = intsrc;
918 if (++mp_irq_entries == MAX_IRQ_SOURCES)
919 panic("Max # of irq sources exceeded!\n");
920} 910}
921 911
922int es7000_plat; 912int es7000_plat;
@@ -985,23 +975,14 @@ void __init mp_config_acpi_legacy_irqs(void)
985 intsrc.mpc_srcbusirq = i; /* Identity mapped */ 975 intsrc.mpc_srcbusirq = i; /* Identity mapped */
986 intsrc.mpc_dstirq = i; 976 intsrc.mpc_dstirq = i;
987 977
988 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " 978 MP_intsrc_info(&intsrc);
989 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
990 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
991 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
992 intsrc.mpc_dstirq);
993
994 mp_irqs[mp_irq_entries] = intsrc;
995 if (++mp_irq_entries == MAX_IRQ_SOURCES)
996 panic("Max # of irq sources exceeded!\n");
997 } 979 }
998} 980}
999 981
1000int mp_register_gsi(u32 gsi, int triggering, int polarity) 982int mp_register_gsi(u32 gsi, int triggering, int polarity)
1001{ 983{
1002 int ioapic = -1; 984 int ioapic;
1003 int ioapic_pin = 0; 985 int ioapic_pin;
1004 int idx, bit = 0;
1005#ifdef CONFIG_X86_32 986#ifdef CONFIG_X86_32
1006#define MAX_GSI_NUM 4096 987#define MAX_GSI_NUM 4096
1007#define IRQ_COMPRESSION_START 64 988#define IRQ_COMPRESSION_START 64
@@ -1041,15 +1022,13 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1041 * with redundant pin->gsi mappings (but unique PCI devices); 1022 * with redundant pin->gsi mappings (but unique PCI devices);
1042 * we only program the IOAPIC on the first. 1023 * we only program the IOAPIC on the first.
1043 */ 1024 */
1044 bit = ioapic_pin % 32; 1025 if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
1045 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1046 if (idx > 3) {
1047 printk(KERN_ERR "Invalid reference to IOAPIC pin " 1026 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1048 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 1027 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1049 ioapic_pin); 1028 ioapic_pin);
1050 return gsi; 1029 return gsi;
1051 } 1030 }
1052 if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 1031 if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
1053 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1032 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1054 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1033 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1055#ifdef CONFIG_X86_32 1034#ifdef CONFIG_X86_32
@@ -1059,7 +1038,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1059#endif 1038#endif
1060 } 1039 }
1061 1040
1062 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); 1041 set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
1063#ifdef CONFIG_X86_32 1042#ifdef CONFIG_X86_32
1064 /* 1043 /*
1065 * For GSI >= 64, use IRQ compression 1044 * For GSI >= 64, use IRQ compression
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 2edee22e9c30..e28ec497e142 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -43,6 +43,7 @@
43#include <asm/system.h> 43#include <asm/system.h>
44#include <asm/dma.h> 44#include <asm/dma.h>
45#include <asm/rio.h> 45#include <asm/rio.h>
46#include <asm/bios_ebda.h>
46 47
47#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT 48#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
48int use_calgary __read_mostly = 1; 49int use_calgary __read_mostly = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3004d716539d..67e9b4a1e89d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -4,6 +4,8 @@
4#include <linux/smp.h> 4#include <linux/smp.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/module.h>
8#include <linux/pm.h>
7 9
8struct kmem_cache *task_xstate_cachep; 10struct kmem_cache *task_xstate_cachep;
9 11
@@ -42,3 +44,118 @@ void arch_task_cache_init(void)
42 __alignof__(union thread_xstate), 44 __alignof__(union thread_xstate),
43 SLAB_PANIC, NULL); 45 SLAB_PANIC, NULL);
44} 46}
47
48static void do_nothing(void *unused)
49{
50}
51
52/*
53 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
54 * pm_idle and update to new pm_idle value. Required while changing pm_idle
55 * handler on SMP systems.
56 *
57 * Caller must have changed pm_idle to the new value before the call. Old
58 * pm_idle value will not be used by any CPU after the return of this function.
59 */
60void cpu_idle_wait(void)
61{
62 smp_mb();
63 /* kick all the CPUs so that they exit out of pm_idle */
64 smp_call_function(do_nothing, NULL, 0, 1);
65}
66EXPORT_SYMBOL_GPL(cpu_idle_wait);
67
68/*
69 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
70 * which can obviate IPI to trigger checking of need_resched.
71 * We execute MONITOR against need_resched and enter optimized wait state
72 * through MWAIT. Whenever someone changes need_resched, we would be woken
73 * up from MWAIT (without an IPI).
74 *
75 * New with Core Duo processors, MWAIT can take some hints based on CPU
76 * capability.
77 */
78void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
79{
80 if (!need_resched()) {
81 __monitor((void *)&current_thread_info()->flags, 0, 0);
82 smp_mb();
83 if (!need_resched())
84 __mwait(ax, cx);
85 }
86}
87
88/* Default MONITOR/MWAIT with no hints, used for default C1 state */
89static void mwait_idle(void)
90{
91 if (!need_resched()) {
92 __monitor((void *)&current_thread_info()->flags, 0, 0);
93 smp_mb();
94 if (!need_resched())
95 __sti_mwait(0, 0);
96 else
97 local_irq_enable();
98 } else
99 local_irq_enable();
100}
101
102
103static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
104{
105 if (force_mwait)
106 return 1;
107 /* Any C1 states supported? */
108 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
109}
110
111/*
112 * On SMP it's slightly faster (but much more power-consuming!)
113 * to poll the ->work.need_resched flag instead of waiting for the
114 * cross-CPU IPI to arrive. Use this option with caution.
115 */
116static void poll_idle(void)
117{
118 local_irq_enable();
119 cpu_relax();
120}
121
122void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
123{
124 static int selected;
125
126 if (selected)
127 return;
128#ifdef CONFIG_X86_SMP
129 if (pm_idle == poll_idle && smp_num_siblings > 1) {
130 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
131 " performance may degrade.\n");
132 }
133#endif
134 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
135 /*
136 * Skip, if setup has overridden idle.
137 * One CPU supports mwait => All CPUs supports mwait
138 */
139 if (!pm_idle) {
140 printk(KERN_INFO "using mwait in idle threads.\n");
141 pm_idle = mwait_idle;
142 }
143 }
144 selected = 1;
145}
146
147static int __init idle_setup(char *str)
148{
149 if (!strcmp(str, "poll")) {
150 printk("using polling idle threads.\n");
151 pm_idle = poll_idle;
152 } else if (!strcmp(str, "mwait"))
153 force_mwait = 1;
154 else
155 return -1;
156
157 boot_option_idle_override = 1;
158 return 0;
159}
160early_param("idle", idle_setup);
161
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 77de848bd1fb..f8476dfbb60d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -111,12 +111,10 @@ void default_idle(void)
111 */ 111 */
112 smp_mb(); 112 smp_mb();
113 113
114 local_irq_disable(); 114 if (!need_resched())
115 if (!need_resched()) {
116 safe_halt(); /* enables interrupts racelessly */ 115 safe_halt(); /* enables interrupts racelessly */
117 local_irq_disable(); 116 else
118 } 117 local_irq_enable();
119 local_irq_enable();
120 current_thread_info()->status |= TS_POLLING; 118 current_thread_info()->status |= TS_POLLING;
121 } else { 119 } else {
122 local_irq_enable(); 120 local_irq_enable();
@@ -128,17 +126,6 @@ void default_idle(void)
128EXPORT_SYMBOL(default_idle); 126EXPORT_SYMBOL(default_idle);
129#endif 127#endif
130 128
131/*
132 * On SMP it's slightly faster (but much more power-consuming!)
133 * to poll the ->work.need_resched flag instead of waiting for the
134 * cross-CPU IPI to arrive. Use this option with caution.
135 */
136static void poll_idle(void)
137{
138 local_irq_enable();
139 cpu_relax();
140}
141
142#ifdef CONFIG_HOTPLUG_CPU 129#ifdef CONFIG_HOTPLUG_CPU
143#include <asm/nmi.h> 130#include <asm/nmi.h>
144/* We don't actually take CPU down, just spin without interrupts. */ 131/* We don't actually take CPU down, just spin without interrupts. */
@@ -196,6 +183,7 @@ void cpu_idle(void)
196 if (cpu_is_offline(cpu)) 183 if (cpu_is_offline(cpu))
197 play_dead(); 184 play_dead();
198 185
186 local_irq_disable();
199 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 187 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
200 idle(); 188 idle();
201 } 189 }
@@ -206,104 +194,6 @@ void cpu_idle(void)
206 } 194 }
207} 195}
208 196
209static void do_nothing(void *unused)
210{
211}
212
213/*
214 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
215 * pm_idle and update to new pm_idle value. Required while changing pm_idle
216 * handler on SMP systems.
217 *
218 * Caller must have changed pm_idle to the new value before the call. Old
219 * pm_idle value will not be used by any CPU after the return of this function.
220 */
221void cpu_idle_wait(void)
222{
223 smp_mb();
224 /* kick all the CPUs so that they exit out of pm_idle */
225 smp_call_function(do_nothing, NULL, 0, 1);
226}
227EXPORT_SYMBOL_GPL(cpu_idle_wait);
228
229/*
230 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
231 * which can obviate IPI to trigger checking of need_resched.
232 * We execute MONITOR against need_resched and enter optimized wait state
233 * through MWAIT. Whenever someone changes need_resched, we would be woken
234 * up from MWAIT (without an IPI).
235 *
236 * New with Core Duo processors, MWAIT can take some hints based on CPU
237 * capability.
238 */
239void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
240{
241 if (!need_resched()) {
242 __monitor((void *)&current_thread_info()->flags, 0, 0);
243 smp_mb();
244 if (!need_resched())
245 __sti_mwait(ax, cx);
246 else
247 local_irq_enable();
248 } else
249 local_irq_enable();
250}
251
252/* Default MONITOR/MWAIT with no hints, used for default C1 state */
253static void mwait_idle(void)
254{
255 local_irq_enable();
256 mwait_idle_with_hints(0, 0);
257}
258
259static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
260{
261 if (force_mwait)
262 return 1;
263 /* Any C1 states supported? */
264 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
265}
266
267void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
268{
269 static int selected;
270
271 if (selected)
272 return;
273#ifdef CONFIG_X86_SMP
274 if (pm_idle == poll_idle && smp_num_siblings > 1) {
275 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
276 " performance may degrade.\n");
277 }
278#endif
279 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
280 /*
281 * Skip, if setup has overridden idle.
282 * One CPU supports mwait => All CPUs supports mwait
283 */
284 if (!pm_idle) {
285 printk(KERN_INFO "using mwait in idle threads.\n");
286 pm_idle = mwait_idle;
287 }
288 }
289 selected = 1;
290}
291
292static int __init idle_setup(char *str)
293{
294 if (!strcmp(str, "poll")) {
295 printk("using polling idle threads.\n");
296 pm_idle = poll_idle;
297 } else if (!strcmp(str, "mwait"))
298 force_mwait = 1;
299 else
300 return -1;
301
302 boot_option_idle_override = 1;
303 return 0;
304}
305early_param("idle", idle_setup);
306
307void __show_registers(struct pt_regs *regs, int all) 197void __show_registers(struct pt_regs *regs, int all)
308{ 198{
309 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 199 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 131c2ee7ac56..e2319f39988b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -106,26 +106,13 @@ void default_idle(void)
106 * test NEED_RESCHED: 106 * test NEED_RESCHED:
107 */ 107 */
108 smp_mb(); 108 smp_mb();
109 local_irq_disable(); 109 if (!need_resched())
110 if (!need_resched()) {
111 safe_halt(); /* enables interrupts racelessly */ 110 safe_halt(); /* enables interrupts racelessly */
112 local_irq_disable(); 111 else
113 } 112 local_irq_enable();
114 local_irq_enable();
115 current_thread_info()->status |= TS_POLLING; 113 current_thread_info()->status |= TS_POLLING;
116} 114}
117 115
118/*
119 * On SMP it's slightly faster (but much more power-consuming!)
120 * to poll the ->need_resched flag instead of waiting for the
121 * cross-CPU IPI to arrive. Use this option with caution.
122 */
123static void poll_idle(void)
124{
125 local_irq_enable();
126 cpu_relax();
127}
128
129#ifdef CONFIG_HOTPLUG_CPU 116#ifdef CONFIG_HOTPLUG_CPU
130DECLARE_PER_CPU(int, cpu_state); 117DECLARE_PER_CPU(int, cpu_state);
131 118
@@ -192,110 +179,6 @@ void cpu_idle(void)
192 } 179 }
193} 180}
194 181
195static void do_nothing(void *unused)
196{
197}
198
199/*
200 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
201 * pm_idle and update to new pm_idle value. Required while changing pm_idle
202 * handler on SMP systems.
203 *
204 * Caller must have changed pm_idle to the new value before the call. Old
205 * pm_idle value will not be used by any CPU after the return of this function.
206 */
207void cpu_idle_wait(void)
208{
209 smp_mb();
210 /* kick all the CPUs so that they exit out of pm_idle */
211 smp_call_function(do_nothing, NULL, 0, 1);
212}
213EXPORT_SYMBOL_GPL(cpu_idle_wait);
214
215/*
216 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
217 * which can obviate IPI to trigger checking of need_resched.
218 * We execute MONITOR against need_resched and enter optimized wait state
219 * through MWAIT. Whenever someone changes need_resched, we would be woken
220 * up from MWAIT (without an IPI).
221 *
222 * New with Core Duo processors, MWAIT can take some hints based on CPU
223 * capability.
224 */
225void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
226{
227 if (!need_resched()) {
228 __monitor((void *)&current_thread_info()->flags, 0, 0);
229 smp_mb();
230 if (!need_resched())
231 __mwait(ax, cx);
232 }
233}
234
235/* Default MONITOR/MWAIT with no hints, used for default C1 state */
236static void mwait_idle(void)
237{
238 if (!need_resched()) {
239 __monitor((void *)&current_thread_info()->flags, 0, 0);
240 smp_mb();
241 if (!need_resched())
242 __sti_mwait(0, 0);
243 else
244 local_irq_enable();
245 } else {
246 local_irq_enable();
247 }
248}
249
250
251static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
252{
253 if (force_mwait)
254 return 1;
255 /* Any C1 states supported? */
256 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
257}
258
259void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
260{
261 static int selected;
262
263 if (selected)
264 return;
265#ifdef CONFIG_X86_SMP
266 if (pm_idle == poll_idle && smp_num_siblings > 1) {
267 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
268 " performance may degrade.\n");
269 }
270#endif
271 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
272 /*
273 * Skip, if setup has overridden idle.
274 * One CPU supports mwait => All CPUs supports mwait
275 */
276 if (!pm_idle) {
277 printk(KERN_INFO "using mwait in idle threads.\n");
278 pm_idle = mwait_idle;
279 }
280 }
281 selected = 1;
282}
283
284static int __init idle_setup(char *str)
285{
286 if (!strcmp(str, "poll")) {
287 printk("using polling idle threads.\n");
288 pm_idle = poll_idle;
289 } else if (!strcmp(str, "mwait"))
290 force_mwait = 1;
291 else
292 return -1;
293
294 boot_option_idle_override = 1;
295 return 0;
296}
297early_param("idle", idle_setup);
298
299/* Prints also some state that isn't saved in the pt_regs */ 182/* Prints also some state that isn't saved in the pt_regs */
300void __show_regs(struct pt_regs * regs) 183void __show_regs(struct pt_regs * regs)
301{ 184{
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 559c1b027417..fb03ef380f0e 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1207,97 +1207,16 @@ static int genregs32_set(struct task_struct *target,
1207 return ret; 1207 return ret;
1208} 1208}
1209 1209
1210static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) 1210long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1211 compat_ulong_t caddr, compat_ulong_t cdata)
1211{ 1212{
1212 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t)); 1213 unsigned long addr = caddr;
1213 compat_siginfo_t __user *si32 = compat_ptr(data); 1214 unsigned long data = cdata;
1214 siginfo_t ssi;
1215 int ret;
1216
1217 if (request == PTRACE_SETSIGINFO) {
1218 memset(&ssi, 0, sizeof(siginfo_t));
1219 ret = copy_siginfo_from_user32(&ssi, si32);
1220 if (ret)
1221 return ret;
1222 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
1223 return -EFAULT;
1224 }
1225 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
1226 if (ret)
1227 return ret;
1228 if (request == PTRACE_GETSIGINFO) {
1229 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
1230 return -EFAULT;
1231 ret = copy_siginfo_to_user32(si32, &ssi);
1232 }
1233 return ret;
1234}
1235
1236asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1237{
1238 struct task_struct *child;
1239 struct pt_regs *childregs;
1240 void __user *datap = compat_ptr(data); 1215 void __user *datap = compat_ptr(data);
1241 int ret; 1216 int ret;
1242 __u32 val; 1217 __u32 val;
1243 1218
1244 switch (request) { 1219 switch (request) {
1245 case PTRACE_TRACEME:
1246 case PTRACE_ATTACH:
1247 case PTRACE_KILL:
1248 case PTRACE_CONT:
1249 case PTRACE_SINGLESTEP:
1250 case PTRACE_SINGLEBLOCK:
1251 case PTRACE_DETACH:
1252 case PTRACE_SYSCALL:
1253 case PTRACE_OLDSETOPTIONS:
1254 case PTRACE_SETOPTIONS:
1255 case PTRACE_SET_THREAD_AREA:
1256 case PTRACE_GET_THREAD_AREA:
1257#ifdef X86_BTS
1258 case PTRACE_BTS_CONFIG:
1259 case PTRACE_BTS_STATUS:
1260 case PTRACE_BTS_SIZE:
1261 case PTRACE_BTS_GET:
1262 case PTRACE_BTS_CLEAR:
1263 case PTRACE_BTS_DRAIN:
1264#endif
1265 return sys_ptrace(request, pid, addr, data);
1266
1267 default:
1268 return -EINVAL;
1269
1270 case PTRACE_PEEKTEXT:
1271 case PTRACE_PEEKDATA:
1272 case PTRACE_POKEDATA:
1273 case PTRACE_POKETEXT:
1274 case PTRACE_POKEUSR:
1275 case PTRACE_PEEKUSR:
1276 case PTRACE_GETREGS:
1277 case PTRACE_SETREGS:
1278 case PTRACE_SETFPREGS:
1279 case PTRACE_GETFPREGS:
1280 case PTRACE_SETFPXREGS:
1281 case PTRACE_GETFPXREGS:
1282 case PTRACE_GETEVENTMSG:
1283 break;
1284
1285 case PTRACE_SETSIGINFO:
1286 case PTRACE_GETSIGINFO:
1287 return ptrace32_siginfo(request, pid, addr, data);
1288 }
1289
1290 child = ptrace_get_task_struct(pid);
1291 if (IS_ERR(child))
1292 return PTR_ERR(child);
1293
1294 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1295 if (ret < 0)
1296 goto out;
1297
1298 childregs = task_pt_regs(child);
1299
1300 switch (request) {
1301 case PTRACE_PEEKUSR: 1220 case PTRACE_PEEKUSR:
1302 ret = getreg32(child, addr, &val); 1221 ret = getreg32(child, addr, &val);
1303 if (ret == 0) 1222 if (ret == 0)
@@ -1343,12 +1262,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1343 sizeof(struct user32_fxsr_struct), 1262 sizeof(struct user32_fxsr_struct),
1344 datap); 1263 datap);
1345 1264
1265 case PTRACE_GET_THREAD_AREA:
1266 case PTRACE_SET_THREAD_AREA:
1267 return arch_ptrace(child, request, addr, data);
1268
1346 default: 1269 default:
1347 return compat_ptrace_request(child, request, addr, data); 1270 return compat_ptrace_request(child, request, addr, data);
1348 } 1271 }
1349 1272
1350 out:
1351 put_task_struct(child);
1352 return ret; 1273 return ret;
1353} 1274}
1354 1275
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1791a751a772..a4a838306b2c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -399,7 +399,7 @@ static void native_machine_emergency_restart(void)
399 } 399 }
400} 400}
401 401
402static void native_machine_shutdown(void) 402void native_machine_shutdown(void)
403{ 403{
404 /* Stop the cpus and apics */ 404 /* Stop the cpus and apics */
405#ifdef CONFIG_SMP 405#ifdef CONFIG_SMP
@@ -470,7 +470,10 @@ struct machine_ops machine_ops = {
470 .shutdown = native_machine_shutdown, 470 .shutdown = native_machine_shutdown,
471 .emergency_restart = native_machine_emergency_restart, 471 .emergency_restart = native_machine_emergency_restart,
472 .restart = native_machine_restart, 472 .restart = native_machine_restart,
473 .halt = native_machine_halt 473 .halt = native_machine_halt,
474#ifdef CONFIG_KEXEC
475 .crash_shutdown = native_machine_crash_shutdown,
476#endif
474}; 477};
475 478
476void machine_power_off(void) 479void machine_power_off(void)
@@ -498,3 +501,9 @@ void machine_halt(void)
498 machine_ops.halt(); 501 machine_ops.halt();
499} 502}
500 503
504#ifdef CONFIG_KEXEC
505void machine_crash_shutdown(struct pt_regs *regs)
506{
507 machine_ops.crash_shutdown(regs);
508}
509#endif
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 455d3c80960b..2283422af794 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -47,6 +47,7 @@
47#include <linux/pfn.h> 47#include <linux/pfn.h>
48#include <linux/pci.h> 48#include <linux/pci.h>
49#include <linux/init_ohci1394_dma.h> 49#include <linux/init_ohci1394_dma.h>
50#include <linux/kvm_para.h>
50 51
51#include <video/edid.h> 52#include <video/edid.h>
52 53
@@ -389,7 +390,6 @@ unsigned long __init find_max_low_pfn(void)
389 return max_low_pfn; 390 return max_low_pfn;
390} 391}
391 392
392#define BIOS_EBDA_SEGMENT 0x40E
393#define BIOS_LOWMEM_KILOBYTES 0x413 393#define BIOS_LOWMEM_KILOBYTES 0x413
394 394
395/* 395/*
@@ -420,8 +420,7 @@ static void __init reserve_ebda_region(void)
420 lowmem <<= 10; 420 lowmem <<= 10;
421 421
422 /* start of EBDA area */ 422 /* start of EBDA area */
423 ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); 423 ebda_addr = get_bios_ebda();
424 ebda_addr <<= 4;
425 424
426 /* Fixup: bios puts an EBDA in the top 64K segment */ 425 /* Fixup: bios puts an EBDA in the top 64K segment */
427 /* of conventional memory, but does not adjust lowmem. */ 426 /* of conventional memory, but does not adjust lowmem. */
@@ -822,6 +821,10 @@ void __init setup_arch(char **cmdline_p)
822 821
823 max_low_pfn = setup_memory(); 822 max_low_pfn = setup_memory();
824 823
824#ifdef CONFIG_KVM_CLOCK
825 kvmclock_init();
826#endif
827
825#ifdef CONFIG_VMI 828#ifdef CONFIG_VMI
826 /* 829 /*
827 * Must be after max_low_pfn is determined, and before kernel 830 * Must be after max_low_pfn is determined, and before kernel
@@ -829,6 +832,7 @@ void __init setup_arch(char **cmdline_p)
829 */ 832 */
830 vmi_init(); 833 vmi_init();
831#endif 834#endif
835 kvm_guest_init();
832 836
833 /* 837 /*
834 * NOTE: before this point _nobody_ is allowed to allocate 838 * NOTE: before this point _nobody_ is allowed to allocate
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index c2ec3dcb6b99..a94fb959a87a 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -42,6 +42,7 @@
42#include <linux/ctype.h> 42#include <linux/ctype.h>
43#include <linux/uaccess.h> 43#include <linux/uaccess.h>
44#include <linux/init_ohci1394_dma.h> 44#include <linux/init_ohci1394_dma.h>
45#include <linux/kvm_para.h>
45 46
46#include <asm/mtrr.h> 47#include <asm/mtrr.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
@@ -116,7 +117,7 @@ extern int root_mountflags;
116 117
117char __initdata command_line[COMMAND_LINE_SIZE]; 118char __initdata command_line[COMMAND_LINE_SIZE];
118 119
119struct resource standard_io_resources[] = { 120static struct resource standard_io_resources[] = {
120 { .name = "dma1", .start = 0x00, .end = 0x1f, 121 { .name = "dma1", .start = 0x00, .end = 0x1f,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 122 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "pic1", .start = 0x20, .end = 0x21, 123 { .name = "pic1", .start = 0x20, .end = 0x21,
@@ -190,6 +191,7 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
190 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); 191 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
191 e820_register_active_regions(0, start_pfn, end_pfn); 192 e820_register_active_regions(0, start_pfn, end_pfn);
192 free_bootmem_with_active_regions(0, end_pfn); 193 free_bootmem_with_active_regions(0, end_pfn);
194 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
193 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); 195 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
194} 196}
195#endif 197#endif
@@ -264,6 +266,28 @@ void __attribute__((weak)) __init memory_setup(void)
264 machine_specific_memory_setup(); 266 machine_specific_memory_setup();
265} 267}
266 268
269static void __init parse_setup_data(void)
270{
271 struct setup_data *data;
272 unsigned long pa_data;
273
274 if (boot_params.hdr.version < 0x0209)
275 return;
276 pa_data = boot_params.hdr.setup_data;
277 while (pa_data) {
278 data = early_ioremap(pa_data, PAGE_SIZE);
279 switch (data->type) {
280 default:
281 break;
282 }
283#ifndef CONFIG_DEBUG_BOOT_PARAMS
284 free_early(pa_data, pa_data+sizeof(*data)+data->len);
285#endif
286 pa_data = data->next;
287 early_iounmap(data, PAGE_SIZE);
288 }
289}
290
267/* 291/*
268 * setup_arch - architecture-specific boot-time initializations 292 * setup_arch - architecture-specific boot-time initializations
269 * 293 *
@@ -316,6 +340,8 @@ void __init setup_arch(char **cmdline_p)
316 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 340 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
317 *cmdline_p = command_line; 341 *cmdline_p = command_line;
318 342
343 parse_setup_data();
344
319 parse_early_param(); 345 parse_early_param();
320 346
321#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 347#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
@@ -359,6 +385,10 @@ void __init setup_arch(char **cmdline_p)
359 385
360 io_delay_init(); 386 io_delay_init();
361 387
388#ifdef CONFIG_KVM_CLOCK
389 kvmclock_init();
390#endif
391
362#ifdef CONFIG_SMP 392#ifdef CONFIG_SMP
363 /* setup to use the early static init tables during kernel startup */ 393 /* setup to use the early static init tables during kernel startup */
364 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; 394 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
@@ -397,8 +427,6 @@ void __init setup_arch(char **cmdline_p)
397 contig_initmem_init(0, end_pfn); 427 contig_initmem_init(0, end_pfn);
398#endif 428#endif
399 429
400 early_res_to_bootmem();
401
402 dma32_reserve_bootmem(); 430 dma32_reserve_bootmem();
403 431
404#ifdef CONFIG_ACPI_SLEEP 432#ifdef CONFIG_ACPI_SLEEP
@@ -465,6 +493,8 @@ void __init setup_arch(char **cmdline_p)
465 init_apic_mappings(); 493 init_apic_mappings();
466 ioapic_init_mappings(); 494 ioapic_init_mappings();
467 495
496 kvm_guest_init();
497
468 /* 498 /*
469 * We trust e820 completely. No explicit ROM probing in memory. 499 * We trust e820 completely. No explicit ROM probing in memory.
470 */ 500 */
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index f1b117930837..8e05e7f7bd40 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -413,16 +413,6 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
413 regs->ss = __USER_DS; 413 regs->ss = __USER_DS;
414 regs->cs = __USER_CS; 414 regs->cs = __USER_CS;
415 415
416 /*
417 * Clear TF when entering the signal handler, but
418 * notify any tracer that was single-stepping it.
419 * The tracer may want to single-step inside the
420 * handler too.
421 */
422 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
423 if (test_thread_flag(TIF_SINGLESTEP))
424 ptrace_notify(SIGTRAP);
425
426 return 0; 416 return 0;
427 417
428give_sigsegv: 418give_sigsegv:
@@ -501,16 +491,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
501 regs->ss = __USER_DS; 491 regs->ss = __USER_DS;
502 regs->cs = __USER_CS; 492 regs->cs = __USER_CS;
503 493
504 /*
505 * Clear TF when entering the signal handler, but
506 * notify any tracer that was single-stepping it.
507 * The tracer may want to single-step inside the
508 * handler too.
509 */
510 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
511 if (test_thread_flag(TIF_SINGLESTEP))
512 ptrace_notify(SIGTRAP);
513
514 return 0; 494 return 0;
515 495
516give_sigsegv: 496give_sigsegv:
@@ -566,6 +546,21 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
566 if (ret) 546 if (ret)
567 return ret; 547 return ret;
568 548
549 /*
550 * Clear the direction flag as per the ABI for function entry.
551 */
552 regs->flags &= ~X86_EFLAGS_DF;
553
554 /*
555 * Clear TF when entering the signal handler, but
556 * notify any tracer that was single-stepping it.
557 * The tracer may want to single-step inside the
558 * handler too.
559 */
560 regs->flags &= ~X86_EFLAGS_TF;
561 if (test_thread_flag(TIF_SINGLESTEP))
562 ptrace_notify(SIGTRAP);
563
569 spin_lock_irq(&current->sighand->siglock); 564 spin_lock_irq(&current->sighand->siglock);
570 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
571 if (!(ka->sa.sa_flags & SA_NODEFER)) 566 if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 827179c5b32a..ccb2a4560c2d 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -285,14 +285,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
285 even if the handler happens to be interrupting 32-bit code. */ 285 even if the handler happens to be interrupting 32-bit code. */
286 regs->cs = __USER_CS; 286 regs->cs = __USER_CS;
287 287
288 /* This, by contrast, has nothing to do with segment registers -
289 see include/asm-x86_64/uaccess.h for details. */
290 set_fs(USER_DS);
291
292 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
293 if (test_thread_flag(TIF_SINGLESTEP))
294 ptrace_notify(SIGTRAP);
295
296 return 0; 288 return 0;
297 289
298give_sigsegv: 290give_sigsegv:
@@ -380,6 +372,28 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
380 ret = setup_rt_frame(sig, ka, info, oldset, regs); 372 ret = setup_rt_frame(sig, ka, info, oldset, regs);
381 373
382 if (ret == 0) { 374 if (ret == 0) {
375 /*
376 * This has nothing to do with segment registers,
377 * despite the name. This magic affects uaccess.h
378 * macros' behavior. Reset it to the normal setting.
379 */
380 set_fs(USER_DS);
381
382 /*
383 * Clear the direction flag as per the ABI for function entry.
384 */
385 regs->flags &= ~X86_EFLAGS_DF;
386
387 /*
388 * Clear TF when entering the signal handler, but
389 * notify any tracer that was single-stepping it.
390 * The tracer may want to single-step inside the
391 * handler too.
392 */
393 regs->flags &= ~X86_EFLAGS_TF;
394 if (test_thread_flag(TIF_SINGLESTEP))
395 ptrace_notify(SIGTRAP);
396
383 spin_lock_irq(&current->sighand->siglock); 397 spin_lock_irq(&current->sighand->siglock);
384 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 398 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
385 if (!(ka->sa.sa_flags & SA_NODEFER)) 399 if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eef79e84145f..04c662ba18f1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1058,7 +1058,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
1058 check_tsc_sync_source(cpu); 1058 check_tsc_sync_source(cpu);
1059 local_irq_restore(flags); 1059 local_irq_restore(flags);
1060 1060
1061 while (!cpu_isset(cpu, cpu_online_map)) { 1061 while (!cpu_online(cpu)) {
1062 cpu_relax(); 1062 cpu_relax();
1063 touch_nmi_watchdog(); 1063 touch_nmi_watchdog();
1064 } 1064 }
@@ -1168,7 +1168,7 @@ static void __init smp_cpu_index_default(void)
1168 int i; 1168 int i;
1169 struct cpuinfo_x86 *c; 1169 struct cpuinfo_x86 *c;
1170 1170
1171 for_each_cpu_mask(i, cpu_possible_map) { 1171 for_each_possible_cpu(i) {
1172 c = &cpu_data(i); 1172 c = &cpu_data(i);
1173 /* mark all to hotplug */ 1173 /* mark all to hotplug */
1174 c->cpu_index = NR_CPUS; 1174 c->cpu_index = NR_CPUS;
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 6878a9c2df5d..ae751094eba9 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/bios_ebda.h>
32#include <asm/mach-summit/mach_mpparse.h> 33#include <asm/mach-summit/mach_mpparse.h>
33 34
34static struct rio_table_hdr *rio_table_hdr __initdata; 35static struct rio_table_hdr *rio_table_hdr __initdata;
@@ -140,8 +141,8 @@ void __init setup_summit(void)
140 int i, next_wpeg, next_bus = 0; 141 int i, next_wpeg, next_bus = 0;
141 142
142 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */ 143 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
143 ptr = *(unsigned short *)phys_to_virt(0x40Eul); 144 ptr = get_bios_ebda();
144 ptr = (unsigned long)phys_to_virt(ptr << 4); 145 ptr = (unsigned long)phys_to_virt(ptr);
145 146
146 rio_table_hdr = NULL; 147 rio_table_hdr = NULL;
147 offset = 0x180; 148 offset = 0x180;
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index df224a8774cb..a1f07d793202 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -195,9 +195,9 @@ static int __cpuinit init_smp_flush(void)
195{ 195{
196 int i; 196 int i;
197 197
198 for_each_cpu_mask(i, cpu_possible_map) { 198 for_each_possible_cpu(i)
199 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); 199 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
200 } 200
201 return 0; 201 return 0;
202} 202}
203core_initcall(init_smp_flush); 203core_initcall(init_smp_flush);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index 64580679861e..d8ccc3c6552f 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -33,7 +33,7 @@
33 33
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 34/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 35#ifndef CONFIG_HOTPLUG_CPU
36.section ".init.data","aw",@progbits 36.section ".cpuinit.data","aw",@progbits
37#else 37#else
38.section .rodata,"a",@progbits 38.section .rodata,"a",@progbits
39#endif 39#endif
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 471e694d6713..bde6f63e15d5 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -602,7 +602,7 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
602DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 602DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
603DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 603DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
604DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) 604DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
605DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) 605DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1)
606 606
607void __kprobes do_general_protection(struct pt_regs *regs, long error_code) 607void __kprobes do_general_protection(struct pt_regs *regs, long error_code)
608{ 608{
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 41962e793c0f..8d45fabc5f3b 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -19,7 +19,7 @@ if VIRTUALIZATION
19 19
20config KVM 20config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support" 21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM && EXPERIMENTAL 22 depends on HAVE_KVM
23 select PREEMPT_NOTIFIERS 23 select PREEMPT_NOTIFIERS
24 select ANON_INODES 24 select ANON_INODES
25 ---help--- 25 ---help---
@@ -50,6 +50,17 @@ config KVM_AMD
50 Provides support for KVM on AMD processors equipped with the AMD-V 50 Provides support for KVM on AMD processors equipped with the AMD-V
51 (SVM) extensions. 51 (SVM) extensions.
52 52
53config KVM_TRACE
54 bool "KVM trace support"
55 depends on KVM && MARKERS && SYSFS
56 select RELAY
57 select DEBUG_FS
58 default n
59 ---help---
60 This option allows reading a trace of kvm-related events through
61 relayfs. Note the ABI is not considered stable and will be
62 modified in future updates.
63
53# OK, it's a little counter-intuitive to do this, but it puts it neatly under 64# OK, it's a little counter-intuitive to do this, but it puts it neatly under
54# the virtualization menu. 65# the virtualization menu.
55source drivers/lguest/Kconfig 66source drivers/lguest/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index ffdd0b310784..c97d35c218db 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -3,10 +3,14 @@
3# 3#
4 4
5common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) 5common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
6ifeq ($(CONFIG_KVM_TRACE),y)
7common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
8endif
6 9
7EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm 10EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
8 11
9kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o 12kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
13 i8254.o
10obj-$(CONFIG_KVM) += kvm.o 14obj-$(CONFIG_KVM) += kvm.o
11kvm-intel-objs = vmx.o 15kvm-intel-objs = vmx.o
12obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 16obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
new file mode 100644
index 000000000000..361e31611276
--- /dev/null
+++ b/arch/x86/kvm/i8254.c
@@ -0,0 +1,611 @@
1/*
2 * 8253/8254 interval timer emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to deal
11 * in the Software without restriction, including without limitation the rights
12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 * copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 * THE SOFTWARE.
26 *
27 * Authors:
28 * Sheng Yang <sheng.yang@intel.com>
29 * Based on QEMU and Xen.
30 */
31
32#include <linux/kvm_host.h>
33
34#include "irq.h"
35#include "i8254.h"
36
37#ifndef CONFIG_X86_64
38#define mod_64(x, y) ((x) - (y) * div64_64(x, y))
39#else
40#define mod_64(x, y) ((x) % (y))
41#endif
42
43#define RW_STATE_LSB 1
44#define RW_STATE_MSB 2
45#define RW_STATE_WORD0 3
46#define RW_STATE_WORD1 4
47
48/* Compute with 96 bit intermediate result: (a*b)/c */
49static u64 muldiv64(u64 a, u32 b, u32 c)
50{
51 union {
52 u64 ll;
53 struct {
54 u32 low, high;
55 } l;
56 } u, res;
57 u64 rl, rh;
58
59 u.ll = a;
60 rl = (u64)u.l.low * (u64)b;
61 rh = (u64)u.l.high * (u64)b;
62 rh += (rl >> 32);
63 res.l.high = div64_64(rh, c);
64 res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
65 return res.ll;
66}
67
68static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
69{
70 struct kvm_kpit_channel_state *c =
71 &kvm->arch.vpit->pit_state.channels[channel];
72
73 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
74
75 switch (c->mode) {
76 default:
77 case 0:
78 case 4:
79 /* XXX: just disable/enable counting */
80 break;
81 case 1:
82 case 2:
83 case 3:
84 case 5:
85 /* Restart counting on rising edge. */
86 if (c->gate < val)
87 c->count_load_time = ktime_get();
88 break;
89 }
90
91 c->gate = val;
92}
93
94int pit_get_gate(struct kvm *kvm, int channel)
95{
96 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
97
98 return kvm->arch.vpit->pit_state.channels[channel].gate;
99}
100
101static int pit_get_count(struct kvm *kvm, int channel)
102{
103 struct kvm_kpit_channel_state *c =
104 &kvm->arch.vpit->pit_state.channels[channel];
105 s64 d, t;
106 int counter;
107
108 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
109
110 t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
111 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
112
113 switch (c->mode) {
114 case 0:
115 case 1:
116 case 4:
117 case 5:
118 counter = (c->count - d) & 0xffff;
119 break;
120 case 3:
121 /* XXX: may be incorrect for odd counts */
122 counter = c->count - (mod_64((2 * d), c->count));
123 break;
124 default:
125 counter = c->count - mod_64(d, c->count);
126 break;
127 }
128 return counter;
129}
130
131static int pit_get_out(struct kvm *kvm, int channel)
132{
133 struct kvm_kpit_channel_state *c =
134 &kvm->arch.vpit->pit_state.channels[channel];
135 s64 d, t;
136 int out;
137
138 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
139
140 t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
141 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
142
143 switch (c->mode) {
144 default:
145 case 0:
146 out = (d >= c->count);
147 break;
148 case 1:
149 out = (d < c->count);
150 break;
151 case 2:
152 out = ((mod_64(d, c->count) == 0) && (d != 0));
153 break;
154 case 3:
155 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
156 break;
157 case 4:
158 case 5:
159 out = (d == c->count);
160 break;
161 }
162
163 return out;
164}
165
166static void pit_latch_count(struct kvm *kvm, int channel)
167{
168 struct kvm_kpit_channel_state *c =
169 &kvm->arch.vpit->pit_state.channels[channel];
170
171 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
172
173 if (!c->count_latched) {
174 c->latched_count = pit_get_count(kvm, channel);
175 c->count_latched = c->rw_mode;
176 }
177}
178
179static void pit_latch_status(struct kvm *kvm, int channel)
180{
181 struct kvm_kpit_channel_state *c =
182 &kvm->arch.vpit->pit_state.channels[channel];
183
184 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
185
186 if (!c->status_latched) {
187 /* TODO: Return NULL COUNT (bit 6). */
188 c->status = ((pit_get_out(kvm, channel) << 7) |
189 (c->rw_mode << 4) |
190 (c->mode << 1) |
191 c->bcd);
192 c->status_latched = 1;
193 }
194}
195
196int __pit_timer_fn(struct kvm_kpit_state *ps)
197{
198 struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
199 struct kvm_kpit_timer *pt = &ps->pit_timer;
200
201 atomic_inc(&pt->pending);
202 smp_mb__after_atomic_inc();
203 /* FIXME: handle case where the guest is in guest mode */
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
205 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
206 wake_up_interruptible(&vcpu0->wq);
207 }
208
209 pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
210 pt->scheduled = ktime_to_ns(pt->timer.expires);
211
212 return (pt->period == 0 ? 0 : 1);
213}
214
215int pit_has_pending_timer(struct kvm_vcpu *vcpu)
216{
217 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
218
219 if (pit && vcpu->vcpu_id == 0)
220 return atomic_read(&pit->pit_state.pit_timer.pending);
221
222 return 0;
223}
224
225static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
226{
227 struct kvm_kpit_state *ps;
228 int restart_timer = 0;
229
230 ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
231
232 restart_timer = __pit_timer_fn(ps);
233
234 if (restart_timer)
235 return HRTIMER_RESTART;
236 else
237 return HRTIMER_NORESTART;
238}
239
240static void destroy_pit_timer(struct kvm_kpit_timer *pt)
241{
242 pr_debug("pit: execute del timer!\n");
243 hrtimer_cancel(&pt->timer);
244}
245
246static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period)
247{
248 s64 interval;
249
250 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
251
252 pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
253
254 /* TODO The new value only affected after the retriggered */
255 hrtimer_cancel(&pt->timer);
256 pt->period = (is_period == 0) ? 0 : interval;
257 pt->timer.function = pit_timer_fn;
258 atomic_set(&pt->pending, 0);
259
260 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
261 HRTIMER_MODE_ABS);
262}
263
264static void pit_load_count(struct kvm *kvm, int channel, u32 val)
265{
266 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
267
268 WARN_ON(!mutex_is_locked(&ps->lock));
269
270 pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
271
272 /*
273 * Though spec said the state of 8254 is undefined after power-up,
274 * seems some tricky OS like Windows XP depends on IRQ0 interrupt
275 * when booting up.
276 * So here setting initialize rate for it, and not a specific number
277 */
278 if (val == 0)
279 val = 0x10000;
280
281 ps->channels[channel].count_load_time = ktime_get();
282 ps->channels[channel].count = val;
283
284 if (channel != 0)
285 return;
286
287 /* Two types of timer
288 * mode 1 is one shot, mode 2 is period, otherwise del timer */
289 switch (ps->channels[0].mode) {
290 case 1:
291 create_pit_timer(&ps->pit_timer, val, 0);
292 break;
293 case 2:
294 create_pit_timer(&ps->pit_timer, val, 1);
295 break;
296 default:
297 destroy_pit_timer(&ps->pit_timer);
298 }
299}
300
301void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
302{
303 mutex_lock(&kvm->arch.vpit->pit_state.lock);
304 pit_load_count(kvm, channel, val);
305 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
306}
307
308static void pit_ioport_write(struct kvm_io_device *this,
309 gpa_t addr, int len, const void *data)
310{
311 struct kvm_pit *pit = (struct kvm_pit *)this->private;
312 struct kvm_kpit_state *pit_state = &pit->pit_state;
313 struct kvm *kvm = pit->kvm;
314 int channel, access;
315 struct kvm_kpit_channel_state *s;
316 u32 val = *(u32 *) data;
317
318 val &= 0xff;
319 addr &= KVM_PIT_CHANNEL_MASK;
320
321 mutex_lock(&pit_state->lock);
322
323 if (val != 0)
324 pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
325 (unsigned int)addr, len, val);
326
327 if (addr == 3) {
328 channel = val >> 6;
329 if (channel == 3) {
330 /* Read-Back Command. */
331 for (channel = 0; channel < 3; channel++) {
332 s = &pit_state->channels[channel];
333 if (val & (2 << channel)) {
334 if (!(val & 0x20))
335 pit_latch_count(kvm, channel);
336 if (!(val & 0x10))
337 pit_latch_status(kvm, channel);
338 }
339 }
340 } else {
341 /* Select Counter <channel>. */
342 s = &pit_state->channels[channel];
343 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
344 if (access == 0) {
345 pit_latch_count(kvm, channel);
346 } else {
347 s->rw_mode = access;
348 s->read_state = access;
349 s->write_state = access;
350 s->mode = (val >> 1) & 7;
351 if (s->mode > 5)
352 s->mode -= 4;
353 s->bcd = val & 1;
354 }
355 }
356 } else {
357 /* Write Count. */
358 s = &pit_state->channels[addr];
359 switch (s->write_state) {
360 default:
361 case RW_STATE_LSB:
362 pit_load_count(kvm, addr, val);
363 break;
364 case RW_STATE_MSB:
365 pit_load_count(kvm, addr, val << 8);
366 break;
367 case RW_STATE_WORD0:
368 s->write_latch = val;
369 s->write_state = RW_STATE_WORD1;
370 break;
371 case RW_STATE_WORD1:
372 pit_load_count(kvm, addr, s->write_latch | (val << 8));
373 s->write_state = RW_STATE_WORD0;
374 break;
375 }
376 }
377
378 mutex_unlock(&pit_state->lock);
379}
380
381static void pit_ioport_read(struct kvm_io_device *this,
382 gpa_t addr, int len, void *data)
383{
384 struct kvm_pit *pit = (struct kvm_pit *)this->private;
385 struct kvm_kpit_state *pit_state = &pit->pit_state;
386 struct kvm *kvm = pit->kvm;
387 int ret, count;
388 struct kvm_kpit_channel_state *s;
389
390 addr &= KVM_PIT_CHANNEL_MASK;
391 s = &pit_state->channels[addr];
392
393 mutex_lock(&pit_state->lock);
394
395 if (s->status_latched) {
396 s->status_latched = 0;
397 ret = s->status;
398 } else if (s->count_latched) {
399 switch (s->count_latched) {
400 default:
401 case RW_STATE_LSB:
402 ret = s->latched_count & 0xff;
403 s->count_latched = 0;
404 break;
405 case RW_STATE_MSB:
406 ret = s->latched_count >> 8;
407 s->count_latched = 0;
408 break;
409 case RW_STATE_WORD0:
410 ret = s->latched_count & 0xff;
411 s->count_latched = RW_STATE_MSB;
412 break;
413 }
414 } else {
415 switch (s->read_state) {
416 default:
417 case RW_STATE_LSB:
418 count = pit_get_count(kvm, addr);
419 ret = count & 0xff;
420 break;
421 case RW_STATE_MSB:
422 count = pit_get_count(kvm, addr);
423 ret = (count >> 8) & 0xff;
424 break;
425 case RW_STATE_WORD0:
426 count = pit_get_count(kvm, addr);
427 ret = count & 0xff;
428 s->read_state = RW_STATE_WORD1;
429 break;
430 case RW_STATE_WORD1:
431 count = pit_get_count(kvm, addr);
432 ret = (count >> 8) & 0xff;
433 s->read_state = RW_STATE_WORD0;
434 break;
435 }
436 }
437
438 if (len > sizeof(ret))
439 len = sizeof(ret);
440 memcpy(data, (char *)&ret, len);
441
442 mutex_unlock(&pit_state->lock);
443}
444
445static int pit_in_range(struct kvm_io_device *this, gpa_t addr)
446{
447 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
448 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
449}
450
451static void speaker_ioport_write(struct kvm_io_device *this,
452 gpa_t addr, int len, const void *data)
453{
454 struct kvm_pit *pit = (struct kvm_pit *)this->private;
455 struct kvm_kpit_state *pit_state = &pit->pit_state;
456 struct kvm *kvm = pit->kvm;
457 u32 val = *(u32 *) data;
458
459 mutex_lock(&pit_state->lock);
460 pit_state->speaker_data_on = (val >> 1) & 1;
461 pit_set_gate(kvm, 2, val & 1);
462 mutex_unlock(&pit_state->lock);
463}
464
465static void speaker_ioport_read(struct kvm_io_device *this,
466 gpa_t addr, int len, void *data)
467{
468 struct kvm_pit *pit = (struct kvm_pit *)this->private;
469 struct kvm_kpit_state *pit_state = &pit->pit_state;
470 struct kvm *kvm = pit->kvm;
471 unsigned int refresh_clock;
472 int ret;
473
474 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
475 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
476
477 mutex_lock(&pit_state->lock);
478 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
479 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
480 if (len > sizeof(ret))
481 len = sizeof(ret);
482 memcpy(data, (char *)&ret, len);
483 mutex_unlock(&pit_state->lock);
484}
485
486static int speaker_in_range(struct kvm_io_device *this, gpa_t addr)
487{
488 return (addr == KVM_SPEAKER_BASE_ADDRESS);
489}
490
491void kvm_pit_reset(struct kvm_pit *pit)
492{
493 int i;
494 struct kvm_kpit_channel_state *c;
495
496 mutex_lock(&pit->pit_state.lock);
497 for (i = 0; i < 3; i++) {
498 c = &pit->pit_state.channels[i];
499 c->mode = 0xff;
500 c->gate = (i != 2);
501 pit_load_count(pit->kvm, i, 0);
502 }
503 mutex_unlock(&pit->pit_state.lock);
504
505 atomic_set(&pit->pit_state.pit_timer.pending, 0);
506 pit->pit_state.inject_pending = 1;
507}
508
509struct kvm_pit *kvm_create_pit(struct kvm *kvm)
510{
511 struct kvm_pit *pit;
512 struct kvm_kpit_state *pit_state;
513
514 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
515 if (!pit)
516 return NULL;
517
518 mutex_init(&pit->pit_state.lock);
519 mutex_lock(&pit->pit_state.lock);
520
521 /* Initialize PIO device */
522 pit->dev.read = pit_ioport_read;
523 pit->dev.write = pit_ioport_write;
524 pit->dev.in_range = pit_in_range;
525 pit->dev.private = pit;
526 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
527
528 pit->speaker_dev.read = speaker_ioport_read;
529 pit->speaker_dev.write = speaker_ioport_write;
530 pit->speaker_dev.in_range = speaker_in_range;
531 pit->speaker_dev.private = pit;
532 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
533
534 kvm->arch.vpit = pit;
535 pit->kvm = kvm;
536
537 pit_state = &pit->pit_state;
538 pit_state->pit = pit;
539 hrtimer_init(&pit_state->pit_timer.timer,
540 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
541 mutex_unlock(&pit->pit_state.lock);
542
543 kvm_pit_reset(pit);
544
545 return pit;
546}
547
548void kvm_free_pit(struct kvm *kvm)
549{
550 struct hrtimer *timer;
551
552 if (kvm->arch.vpit) {
553 mutex_lock(&kvm->arch.vpit->pit_state.lock);
554 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
555 hrtimer_cancel(timer);
556 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
557 kfree(kvm->arch.vpit);
558 }
559}
560
561void __inject_pit_timer_intr(struct kvm *kvm)
562{
563 mutex_lock(&kvm->lock);
564 kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
565 kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0);
566 kvm_pic_set_irq(pic_irqchip(kvm), 0, 1);
567 kvm_pic_set_irq(pic_irqchip(kvm), 0, 0);
568 mutex_unlock(&kvm->lock);
569}
570
571void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
572{
573 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
574 struct kvm *kvm = vcpu->kvm;
575 struct kvm_kpit_state *ps;
576
577 if (vcpu && pit) {
578 ps = &pit->pit_state;
579
580 /* Try to inject pending interrupts when:
581 * 1. Pending exists
582 * 2. Last interrupt was accepted or waited for too long time*/
583 if (atomic_read(&ps->pit_timer.pending) &&
584 (ps->inject_pending ||
585 (jiffies - ps->last_injected_time
586 >= KVM_MAX_PIT_INTR_INTERVAL))) {
587 ps->inject_pending = 0;
588 __inject_pit_timer_intr(kvm);
589 ps->last_injected_time = jiffies;
590 }
591 }
592}
593
594void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
595{
596 struct kvm_arch *arch = &vcpu->kvm->arch;
597 struct kvm_kpit_state *ps;
598
599 if (vcpu && arch->vpit) {
600 ps = &arch->vpit->pit_state;
601 if (atomic_read(&ps->pit_timer.pending) &&
602 (((arch->vpic->pics[0].imr & 1) == 0 &&
603 arch->vpic->pics[0].irq_base == vec) ||
604 (arch->vioapic->redirtbl[0].fields.vector == vec &&
605 arch->vioapic->redirtbl[0].fields.mask != 1))) {
606 ps->inject_pending = 1;
607 atomic_dec(&ps->pit_timer.pending);
608 ps->channels[0].count_load_time = ktime_get();
609 }
610 }
611}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
new file mode 100644
index 000000000000..db25c2a6c8c4
--- /dev/null
+++ b/arch/x86/kvm/i8254.h
@@ -0,0 +1,63 @@
1#ifndef __I8254_H
2#define __I8254_H
3
4#include "iodev.h"
5
6struct kvm_kpit_timer {
7 struct hrtimer timer;
8 int irq;
9 s64 period; /* unit: ns */
10 s64 scheduled;
11 ktime_t last_update;
12 atomic_t pending;
13};
14
15struct kvm_kpit_channel_state {
16 u32 count; /* can be 65536 */
17 u16 latched_count;
18 u8 count_latched;
19 u8 status_latched;
20 u8 status;
21 u8 read_state;
22 u8 write_state;
23 u8 write_latch;
24 u8 rw_mode;
25 u8 mode;
26 u8 bcd; /* not supported */
27 u8 gate; /* timer start */
28 ktime_t count_load_time;
29};
30
31struct kvm_kpit_state {
32 struct kvm_kpit_channel_state channels[3];
33 struct kvm_kpit_timer pit_timer;
34 u32 speaker_data_on;
35 struct mutex lock;
36 struct kvm_pit *pit;
37 bool inject_pending; /* if inject pending interrupts */
38 unsigned long last_injected_time;
39};
40
41struct kvm_pit {
42 unsigned long base_addresss;
43 struct kvm_io_device dev;
44 struct kvm_io_device speaker_dev;
45 struct kvm *kvm;
46 struct kvm_kpit_state pit_state;
47};
48
49#define KVM_PIT_BASE_ADDRESS 0x40
50#define KVM_SPEAKER_BASE_ADDRESS 0x61
51#define KVM_PIT_MEM_LENGTH 4
52#define KVM_PIT_FREQ 1193181
53#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
54#define KVM_PIT_CHANNEL_MASK 0x3
55
56void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
57void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
58void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
59struct kvm_pit *kvm_create_pit(struct kvm *kvm);
60void kvm_free_pit(struct kvm *kvm);
61void kvm_pit_reset(struct kvm_pit *pit);
62
63#endif
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index e5714759e97f..ce1f583459b1 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -23,6 +23,22 @@
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24 24
25#include "irq.h" 25#include "irq.h"
26#include "i8254.h"
27
28/*
29 * check if there are pending timer events
30 * to be processed.
31 */
32int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
33{
34 int ret;
35
36 ret = pit_has_pending_timer(vcpu);
37 ret |= apic_has_pending_timer(vcpu);
38
39 return ret;
40}
41EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
26 42
27/* 43/*
28 * check if there is pending interrupt without 44 * check if there is pending interrupt without
@@ -66,6 +82,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
66void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) 82void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
67{ 83{
68 kvm_inject_apic_timer_irqs(vcpu); 84 kvm_inject_apic_timer_irqs(vcpu);
85 kvm_inject_pit_timer_irqs(vcpu);
69 /* TODO: PIT, RTC etc. */ 86 /* TODO: PIT, RTC etc. */
70} 87}
71EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); 88EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
@@ -73,6 +90,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
73void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec) 90void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
74{ 91{
75 kvm_apic_timer_intr_post(vcpu, vec); 92 kvm_apic_timer_intr_post(vcpu, vec);
93 kvm_pit_timer_intr_post(vcpu, vec);
76 /* TODO: PIT, RTC etc. */ 94 /* TODO: PIT, RTC etc. */
77} 95}
78EXPORT_SYMBOL_GPL(kvm_timer_intr_post); 96EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index fa5ed5d59b5d..1802134b836f 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -85,4 +85,7 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); 85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); 86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
87 87
88int pit_has_pending_timer(struct kvm_vcpu *vcpu);
89int apic_has_pending_timer(struct kvm_vcpu *vcpu);
90
88#endif 91#endif
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index ecdfe97e4635..65ef0fc2c036 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -39,6 +39,8 @@ struct vcpu_svm {
39 unsigned long host_db_regs[NUM_DB_REGS]; 39 unsigned long host_db_regs[NUM_DB_REGS];
40 unsigned long host_dr6; 40 unsigned long host_dr6;
41 unsigned long host_dr7; 41 unsigned long host_dr7;
42
43 u32 *msrpm;
42}; 44};
43 45
44#endif 46#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 68a6b1511934..57ac4e4c556a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
338 } else 338 } else
339 apic_clear_vector(vector, apic->regs + APIC_TMR); 339 apic_clear_vector(vector, apic->regs + APIC_TMR);
340 340
341 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 341 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
342 kvm_vcpu_kick(vcpu); 342 kvm_vcpu_kick(vcpu);
343 else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { 343 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
344 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 344 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
345 if (waitqueue_active(&vcpu->wq)) 345 if (waitqueue_active(&vcpu->wq))
346 wake_up_interruptible(&vcpu->wq); 346 wake_up_interruptible(&vcpu->wq);
347 } 347 }
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
362 362
363 case APIC_DM_INIT: 363 case APIC_DM_INIT:
364 if (level) { 364 if (level) {
365 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 365 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
366 printk(KERN_DEBUG 366 printk(KERN_DEBUG
367 "INIT on a runnable vcpu %d\n", 367 "INIT on a runnable vcpu %d\n",
368 vcpu->vcpu_id); 368 vcpu->vcpu_id);
369 vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED; 369 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
370 kvm_vcpu_kick(vcpu); 370 kvm_vcpu_kick(vcpu);
371 } else { 371 } else {
372 printk(KERN_DEBUG 372 printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
379 case APIC_DM_STARTUP: 379 case APIC_DM_STARTUP:
380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", 380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
381 vcpu->vcpu_id, vector); 381 vcpu->vcpu_id, vector);
382 if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) { 382 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
383 vcpu->arch.sipi_vector = vector; 383 vcpu->arch.sipi_vector = vector;
384 vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED; 384 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
385 if (waitqueue_active(&vcpu->wq)) 385 if (waitqueue_active(&vcpu->wq))
386 wake_up_interruptible(&vcpu->wq); 386 wake_up_interruptible(&vcpu->wq);
387 } 387 }
@@ -658,7 +658,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
658 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 658 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
659 PRIx64 ", " 659 PRIx64 ", "
660 "timer initial count 0x%x, period %lldns, " 660 "timer initial count 0x%x, period %lldns, "
661 "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__, 661 "expire @ 0x%016" PRIx64 ".\n", __func__,
662 APIC_BUS_CYCLE_NS, ktime_to_ns(now), 662 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
663 apic_get_reg(apic, APIC_TMICT), 663 apic_get_reg(apic, APIC_TMICT),
664 apic->timer.period, 664 apic->timer.period,
@@ -691,7 +691,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
691 /* too common printing */ 691 /* too common printing */
692 if (offset != APIC_EOI) 692 if (offset != APIC_EOI)
693 apic_debug("%s: offset 0x%x with length 0x%x, and value is " 693 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
694 "0x%x\n", __FUNCTION__, offset, len, val); 694 "0x%x\n", __func__, offset, len, val);
695 695
696 offset &= 0xff0; 696 offset &= 0xff0;
697 697
@@ -822,6 +822,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
822 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 822 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
823 | (apic_get_reg(apic, APIC_TASKPRI) & 4)); 823 | (apic_get_reg(apic, APIC_TASKPRI) & 4));
824} 824}
825EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
825 826
826u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 827u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
827{ 828{
@@ -869,7 +870,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
869 struct kvm_lapic *apic; 870 struct kvm_lapic *apic;
870 int i; 871 int i;
871 872
872 apic_debug("%s\n", __FUNCTION__); 873 apic_debug("%s\n", __func__);
873 874
874 ASSERT(vcpu); 875 ASSERT(vcpu);
875 apic = vcpu->arch.apic; 876 apic = vcpu->arch.apic;
@@ -907,7 +908,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
907 apic_update_ppr(apic); 908 apic_update_ppr(apic);
908 909
909 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" 910 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
910 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__, 911 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
911 vcpu, kvm_apic_id(apic), 912 vcpu, kvm_apic_id(apic),
912 vcpu->arch.apic_base, apic->base_address); 913 vcpu->arch.apic_base, apic->base_address);
913} 914}
@@ -940,7 +941,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
940 941
941 atomic_inc(&apic->timer.pending); 942 atomic_inc(&apic->timer.pending);
942 if (waitqueue_active(q)) { 943 if (waitqueue_active(q)) {
943 apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 944 apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
944 wake_up_interruptible(q); 945 wake_up_interruptible(q);
945 } 946 }
946 if (apic_lvtt_period(apic)) { 947 if (apic_lvtt_period(apic)) {
@@ -952,6 +953,16 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
952 return result; 953 return result;
953} 954}
954 955
956int apic_has_pending_timer(struct kvm_vcpu *vcpu)
957{
958 struct kvm_lapic *lapic = vcpu->arch.apic;
959
960 if (lapic)
961 return atomic_read(&lapic->timer.pending);
962
963 return 0;
964}
965
955static int __inject_apic_timer_irq(struct kvm_lapic *apic) 966static int __inject_apic_timer_irq(struct kvm_lapic *apic)
956{ 967{
957 int vector; 968 int vector;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e55af12e11b7..2ad6f5481671 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -27,11 +27,22 @@
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/swap.h> 29#include <linux/swap.h>
30#include <linux/hugetlb.h>
31#include <linux/compiler.h>
30 32
31#include <asm/page.h> 33#include <asm/page.h>
32#include <asm/cmpxchg.h> 34#include <asm/cmpxchg.h>
33#include <asm/io.h> 35#include <asm/io.h>
34 36
37/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
44bool tdp_enabled = false;
45
35#undef MMU_DEBUG 46#undef MMU_DEBUG
36 47
37#undef AUDIT 48#undef AUDIT
@@ -101,8 +112,6 @@ static int dbg = 1;
101#define PT_FIRST_AVAIL_BITS_SHIFT 9 112#define PT_FIRST_AVAIL_BITS_SHIFT 9
102#define PT64_SECOND_AVAIL_BITS_SHIFT 52 113#define PT64_SECOND_AVAIL_BITS_SHIFT 52
103 114
104#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
105
106#define VALID_PAGE(x) ((x) != INVALID_PAGE) 115#define VALID_PAGE(x) ((x) != INVALID_PAGE)
107 116
108#define PT64_LEVEL_BITS 9 117#define PT64_LEVEL_BITS 9
@@ -159,6 +168,13 @@ static int dbg = 1;
159#define ACC_USER_MASK PT_USER_MASK 168#define ACC_USER_MASK PT_USER_MASK
160#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 169#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
161 170
171struct kvm_pv_mmu_op_buffer {
172 void *ptr;
173 unsigned len;
174 unsigned processed;
175 char buf[512] __aligned(sizeof(long));
176};
177
162struct kvm_rmap_desc { 178struct kvm_rmap_desc {
163 u64 *shadow_ptes[RMAP_EXT]; 179 u64 *shadow_ptes[RMAP_EXT];
164 struct kvm_rmap_desc *more; 180 struct kvm_rmap_desc *more;
@@ -200,11 +216,15 @@ static int is_present_pte(unsigned long pte)
200 216
201static int is_shadow_present_pte(u64 pte) 217static int is_shadow_present_pte(u64 pte)
202{ 218{
203 pte &= ~PT_SHADOW_IO_MARK;
204 return pte != shadow_trap_nonpresent_pte 219 return pte != shadow_trap_nonpresent_pte
205 && pte != shadow_notrap_nonpresent_pte; 220 && pte != shadow_notrap_nonpresent_pte;
206} 221}
207 222
223static int is_large_pte(u64 pte)
224{
225 return pte & PT_PAGE_SIZE_MASK;
226}
227
208static int is_writeble_pte(unsigned long pte) 228static int is_writeble_pte(unsigned long pte)
209{ 229{
210 return pte & PT_WRITABLE_MASK; 230 return pte & PT_WRITABLE_MASK;
@@ -215,14 +235,14 @@ static int is_dirty_pte(unsigned long pte)
215 return pte & PT_DIRTY_MASK; 235 return pte & PT_DIRTY_MASK;
216} 236}
217 237
218static int is_io_pte(unsigned long pte) 238static int is_rmap_pte(u64 pte)
219{ 239{
220 return pte & PT_SHADOW_IO_MARK; 240 return is_shadow_present_pte(pte);
221} 241}
222 242
223static int is_rmap_pte(u64 pte) 243static pfn_t spte_to_pfn(u64 pte)
224{ 244{
225 return is_shadow_present_pte(pte); 245 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
226} 246}
227 247
228static gfn_t pse36_gfn_delta(u32 gpte) 248static gfn_t pse36_gfn_delta(u32 gpte)
@@ -349,16 +369,100 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
349} 369}
350 370
351/* 371/*
372 * Return the pointer to the largepage write count for a given
373 * gfn, handling slots that are not large page aligned.
374 */
375static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
376{
377 unsigned long idx;
378
379 idx = (gfn / KVM_PAGES_PER_HPAGE) -
380 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
381 return &slot->lpage_info[idx].write_count;
382}
383
384static void account_shadowed(struct kvm *kvm, gfn_t gfn)
385{
386 int *write_count;
387
388 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
389 *write_count += 1;
390 WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
391}
392
393static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
394{
395 int *write_count;
396
397 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
398 *write_count -= 1;
399 WARN_ON(*write_count < 0);
400}
401
402static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
403{
404 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
405 int *largepage_idx;
406
407 if (slot) {
408 largepage_idx = slot_largepage_idx(gfn, slot);
409 return *largepage_idx;
410 }
411
412 return 1;
413}
414
415static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
416{
417 struct vm_area_struct *vma;
418 unsigned long addr;
419
420 addr = gfn_to_hva(kvm, gfn);
421 if (kvm_is_error_hva(addr))
422 return 0;
423
424 vma = find_vma(current->mm, addr);
425 if (vma && is_vm_hugetlb_page(vma))
426 return 1;
427
428 return 0;
429}
430
431static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
432{
433 struct kvm_memory_slot *slot;
434
435 if (has_wrprotected_page(vcpu->kvm, large_gfn))
436 return 0;
437
438 if (!host_largepage_backed(vcpu->kvm, large_gfn))
439 return 0;
440
441 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
442 if (slot && slot->dirty_bitmap)
443 return 0;
444
445 return 1;
446}
447
448/*
352 * Take gfn and return the reverse mapping to it. 449 * Take gfn and return the reverse mapping to it.
353 * Note: gfn must be unaliased before this function get called 450 * Note: gfn must be unaliased before this function get called
354 */ 451 */
355 452
356static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn) 453static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
357{ 454{
358 struct kvm_memory_slot *slot; 455 struct kvm_memory_slot *slot;
456 unsigned long idx;
359 457
360 slot = gfn_to_memslot(kvm, gfn); 458 slot = gfn_to_memslot(kvm, gfn);
361 return &slot->rmap[gfn - slot->base_gfn]; 459 if (!lpage)
460 return &slot->rmap[gfn - slot->base_gfn];
461
462 idx = (gfn / KVM_PAGES_PER_HPAGE) -
463 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
464
465 return &slot->lpage_info[idx].rmap_pde;
362} 466}
363 467
364/* 468/*
@@ -370,7 +474,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
370 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc 474 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
371 * containing more mappings. 475 * containing more mappings.
372 */ 476 */
373static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 477static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
374{ 478{
375 struct kvm_mmu_page *sp; 479 struct kvm_mmu_page *sp;
376 struct kvm_rmap_desc *desc; 480 struct kvm_rmap_desc *desc;
@@ -382,7 +486,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
382 gfn = unalias_gfn(vcpu->kvm, gfn); 486 gfn = unalias_gfn(vcpu->kvm, gfn);
383 sp = page_header(__pa(spte)); 487 sp = page_header(__pa(spte));
384 sp->gfns[spte - sp->spt] = gfn; 488 sp->gfns[spte - sp->spt] = gfn;
385 rmapp = gfn_to_rmap(vcpu->kvm, gfn); 489 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
386 if (!*rmapp) { 490 if (!*rmapp) {
387 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 491 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
388 *rmapp = (unsigned long)spte; 492 *rmapp = (unsigned long)spte;
@@ -435,20 +539,21 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
435 struct kvm_rmap_desc *desc; 539 struct kvm_rmap_desc *desc;
436 struct kvm_rmap_desc *prev_desc; 540 struct kvm_rmap_desc *prev_desc;
437 struct kvm_mmu_page *sp; 541 struct kvm_mmu_page *sp;
438 struct page *page; 542 pfn_t pfn;
439 unsigned long *rmapp; 543 unsigned long *rmapp;
440 int i; 544 int i;
441 545
442 if (!is_rmap_pte(*spte)) 546 if (!is_rmap_pte(*spte))
443 return; 547 return;
444 sp = page_header(__pa(spte)); 548 sp = page_header(__pa(spte));
445 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 549 pfn = spte_to_pfn(*spte);
446 mark_page_accessed(page); 550 if (*spte & PT_ACCESSED_MASK)
551 kvm_set_pfn_accessed(pfn);
447 if (is_writeble_pte(*spte)) 552 if (is_writeble_pte(*spte))
448 kvm_release_page_dirty(page); 553 kvm_release_pfn_dirty(pfn);
449 else 554 else
450 kvm_release_page_clean(page); 555 kvm_release_pfn_clean(pfn);
451 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]); 556 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
452 if (!*rmapp) { 557 if (!*rmapp) {
453 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 558 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
454 BUG(); 559 BUG();
@@ -514,7 +619,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
514 int write_protected = 0; 619 int write_protected = 0;
515 620
516 gfn = unalias_gfn(kvm, gfn); 621 gfn = unalias_gfn(kvm, gfn);
517 rmapp = gfn_to_rmap(kvm, gfn); 622 rmapp = gfn_to_rmap(kvm, gfn, 0);
518 623
519 spte = rmap_next(kvm, rmapp, NULL); 624 spte = rmap_next(kvm, rmapp, NULL);
520 while (spte) { 625 while (spte) {
@@ -527,8 +632,35 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
527 } 632 }
528 spte = rmap_next(kvm, rmapp, spte); 633 spte = rmap_next(kvm, rmapp, spte);
529 } 634 }
635 if (write_protected) {
636 pfn_t pfn;
637
638 spte = rmap_next(kvm, rmapp, NULL);
639 pfn = spte_to_pfn(*spte);
640 kvm_set_pfn_dirty(pfn);
641 }
642
643 /* check for huge page mappings */
644 rmapp = gfn_to_rmap(kvm, gfn, 1);
645 spte = rmap_next(kvm, rmapp, NULL);
646 while (spte) {
647 BUG_ON(!spte);
648 BUG_ON(!(*spte & PT_PRESENT_MASK));
649 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
650 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
651 if (is_writeble_pte(*spte)) {
652 rmap_remove(kvm, spte);
653 --kvm->stat.lpages;
654 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
655 write_protected = 1;
656 }
657 spte = rmap_next(kvm, rmapp, spte);
658 }
659
530 if (write_protected) 660 if (write_protected)
531 kvm_flush_remote_tlbs(kvm); 661 kvm_flush_remote_tlbs(kvm);
662
663 account_shadowed(kvm, gfn);
532} 664}
533 665
534#ifdef MMU_DEBUG 666#ifdef MMU_DEBUG
@@ -538,8 +670,8 @@ static int is_empty_shadow_page(u64 *spt)
538 u64 *end; 670 u64 *end;
539 671
540 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 672 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
541 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) { 673 if (*pos != shadow_trap_nonpresent_pte) {
542 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, 674 printk(KERN_ERR "%s: %p %llx\n", __func__,
543 pos, *pos); 675 pos, *pos);
544 return 0; 676 return 0;
545 } 677 }
@@ -559,7 +691,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
559 691
560static unsigned kvm_page_table_hashfn(gfn_t gfn) 692static unsigned kvm_page_table_hashfn(gfn_t gfn)
561{ 693{
562 return gfn; 694 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
563} 695}
564 696
565static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, 697static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
@@ -662,13 +794,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
662 struct kvm_mmu_page *sp; 794 struct kvm_mmu_page *sp;
663 struct hlist_node *node; 795 struct hlist_node *node;
664 796
665 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 797 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
666 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 798 index = kvm_page_table_hashfn(gfn);
667 bucket = &kvm->arch.mmu_page_hash[index]; 799 bucket = &kvm->arch.mmu_page_hash[index];
668 hlist_for_each_entry(sp, node, bucket, hash_link) 800 hlist_for_each_entry(sp, node, bucket, hash_link)
669 if (sp->gfn == gfn && !sp->role.metaphysical) { 801 if (sp->gfn == gfn && !sp->role.metaphysical
802 && !sp->role.invalid) {
670 pgprintk("%s: found role %x\n", 803 pgprintk("%s: found role %x\n",
671 __FUNCTION__, sp->role.word); 804 __func__, sp->role.word);
672 return sp; 805 return sp;
673 } 806 }
674 return NULL; 807 return NULL;
@@ -699,27 +832,27 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
699 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 832 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
700 role.quadrant = quadrant; 833 role.quadrant = quadrant;
701 } 834 }
702 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, 835 pgprintk("%s: looking gfn %lx role %x\n", __func__,
703 gfn, role.word); 836 gfn, role.word);
704 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 837 index = kvm_page_table_hashfn(gfn);
705 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 838 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
706 hlist_for_each_entry(sp, node, bucket, hash_link) 839 hlist_for_each_entry(sp, node, bucket, hash_link)
707 if (sp->gfn == gfn && sp->role.word == role.word) { 840 if (sp->gfn == gfn && sp->role.word == role.word) {
708 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 841 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
709 pgprintk("%s: found\n", __FUNCTION__); 842 pgprintk("%s: found\n", __func__);
710 return sp; 843 return sp;
711 } 844 }
712 ++vcpu->kvm->stat.mmu_cache_miss; 845 ++vcpu->kvm->stat.mmu_cache_miss;
713 sp = kvm_mmu_alloc_page(vcpu, parent_pte); 846 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
714 if (!sp) 847 if (!sp)
715 return sp; 848 return sp;
716 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); 849 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
717 sp->gfn = gfn; 850 sp->gfn = gfn;
718 sp->role = role; 851 sp->role = role;
719 hlist_add_head(&sp->hash_link, bucket); 852 hlist_add_head(&sp->hash_link, bucket);
720 vcpu->arch.mmu.prefetch_page(vcpu, sp);
721 if (!metaphysical) 853 if (!metaphysical)
722 rmap_write_protect(vcpu->kvm, gfn); 854 rmap_write_protect(vcpu->kvm, gfn);
855 vcpu->arch.mmu.prefetch_page(vcpu, sp);
723 return sp; 856 return sp;
724} 857}
725 858
@@ -745,11 +878,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
745 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 878 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
746 ent = pt[i]; 879 ent = pt[i];
747 880
881 if (is_shadow_present_pte(ent)) {
882 if (!is_large_pte(ent)) {
883 ent &= PT64_BASE_ADDR_MASK;
884 mmu_page_remove_parent_pte(page_header(ent),
885 &pt[i]);
886 } else {
887 --kvm->stat.lpages;
888 rmap_remove(kvm, &pt[i]);
889 }
890 }
748 pt[i] = shadow_trap_nonpresent_pte; 891 pt[i] = shadow_trap_nonpresent_pte;
749 if (!is_shadow_present_pte(ent))
750 continue;
751 ent &= PT64_BASE_ADDR_MASK;
752 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
753 } 892 }
754 kvm_flush_remote_tlbs(kvm); 893 kvm_flush_remote_tlbs(kvm);
755} 894}
@@ -789,10 +928,15 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
789 } 928 }
790 kvm_mmu_page_unlink_children(kvm, sp); 929 kvm_mmu_page_unlink_children(kvm, sp);
791 if (!sp->root_count) { 930 if (!sp->root_count) {
931 if (!sp->role.metaphysical)
932 unaccount_shadowed(kvm, sp->gfn);
792 hlist_del(&sp->hash_link); 933 hlist_del(&sp->hash_link);
793 kvm_mmu_free_page(kvm, sp); 934 kvm_mmu_free_page(kvm, sp);
794 } else 935 } else {
795 list_move(&sp->link, &kvm->arch.active_mmu_pages); 936 list_move(&sp->link, &kvm->arch.active_mmu_pages);
937 sp->role.invalid = 1;
938 kvm_reload_remote_mmus(kvm);
939 }
796 kvm_mmu_reset_last_pte_updated(kvm); 940 kvm_mmu_reset_last_pte_updated(kvm);
797} 941}
798 942
@@ -838,13 +982,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
838 struct hlist_node *node, *n; 982 struct hlist_node *node, *n;
839 int r; 983 int r;
840 984
841 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 985 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
842 r = 0; 986 r = 0;
843 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 987 index = kvm_page_table_hashfn(gfn);
844 bucket = &kvm->arch.mmu_page_hash[index]; 988 bucket = &kvm->arch.mmu_page_hash[index];
845 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 989 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
846 if (sp->gfn == gfn && !sp->role.metaphysical) { 990 if (sp->gfn == gfn && !sp->role.metaphysical) {
847 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, 991 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
848 sp->role.word); 992 sp->role.word);
849 kvm_mmu_zap_page(kvm, sp); 993 kvm_mmu_zap_page(kvm, sp);
850 r = 1; 994 r = 1;
@@ -857,7 +1001,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
857 struct kvm_mmu_page *sp; 1001 struct kvm_mmu_page *sp;
858 1002
859 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { 1003 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
860 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word); 1004 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
861 kvm_mmu_zap_page(kvm, sp); 1005 kvm_mmu_zap_page(kvm, sp);
862 } 1006 }
863} 1007}
@@ -889,26 +1033,39 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
889static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1033static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
890 unsigned pt_access, unsigned pte_access, 1034 unsigned pt_access, unsigned pte_access,
891 int user_fault, int write_fault, int dirty, 1035 int user_fault, int write_fault, int dirty,
892 int *ptwrite, gfn_t gfn, struct page *page) 1036 int *ptwrite, int largepage, gfn_t gfn,
1037 pfn_t pfn, bool speculative)
893{ 1038{
894 u64 spte; 1039 u64 spte;
895 int was_rmapped = 0; 1040 int was_rmapped = 0;
896 int was_writeble = is_writeble_pte(*shadow_pte); 1041 int was_writeble = is_writeble_pte(*shadow_pte);
897 hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
898 1042
899 pgprintk("%s: spte %llx access %x write_fault %d" 1043 pgprintk("%s: spte %llx access %x write_fault %d"
900 " user_fault %d gfn %lx\n", 1044 " user_fault %d gfn %lx\n",
901 __FUNCTION__, *shadow_pte, pt_access, 1045 __func__, *shadow_pte, pt_access,
902 write_fault, user_fault, gfn); 1046 write_fault, user_fault, gfn);
903 1047
904 if (is_rmap_pte(*shadow_pte)) { 1048 if (is_rmap_pte(*shadow_pte)) {
905 if (host_pfn != page_to_pfn(page)) { 1049 /*
1050 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1051 * the parent of the now unreachable PTE.
1052 */
1053 if (largepage && !is_large_pte(*shadow_pte)) {
1054 struct kvm_mmu_page *child;
1055 u64 pte = *shadow_pte;
1056
1057 child = page_header(pte & PT64_BASE_ADDR_MASK);
1058 mmu_page_remove_parent_pte(child, shadow_pte);
1059 } else if (pfn != spte_to_pfn(*shadow_pte)) {
906 pgprintk("hfn old %lx new %lx\n", 1060 pgprintk("hfn old %lx new %lx\n",
907 host_pfn, page_to_pfn(page)); 1061 spte_to_pfn(*shadow_pte), pfn);
908 rmap_remove(vcpu->kvm, shadow_pte); 1062 rmap_remove(vcpu->kvm, shadow_pte);
1063 } else {
1064 if (largepage)
1065 was_rmapped = is_large_pte(*shadow_pte);
1066 else
1067 was_rmapped = 1;
909 } 1068 }
910 else
911 was_rmapped = 1;
912 } 1069 }
913 1070
914 /* 1071 /*
@@ -917,6 +1074,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
917 * demand paging). 1074 * demand paging).
918 */ 1075 */
919 spte = PT_PRESENT_MASK | PT_DIRTY_MASK; 1076 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1077 if (!speculative)
1078 pte_access |= PT_ACCESSED_MASK;
920 if (!dirty) 1079 if (!dirty)
921 pte_access &= ~ACC_WRITE_MASK; 1080 pte_access &= ~ACC_WRITE_MASK;
922 if (!(pte_access & ACC_EXEC_MASK)) 1081 if (!(pte_access & ACC_EXEC_MASK))
@@ -925,15 +1084,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
925 spte |= PT_PRESENT_MASK; 1084 spte |= PT_PRESENT_MASK;
926 if (pte_access & ACC_USER_MASK) 1085 if (pte_access & ACC_USER_MASK)
927 spte |= PT_USER_MASK; 1086 spte |= PT_USER_MASK;
1087 if (largepage)
1088 spte |= PT_PAGE_SIZE_MASK;
928 1089
929 if (is_error_page(page)) { 1090 spte |= (u64)pfn << PAGE_SHIFT;
930 set_shadow_pte(shadow_pte,
931 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
932 kvm_release_page_clean(page);
933 return;
934 }
935
936 spte |= page_to_phys(page);
937 1091
938 if ((pte_access & ACC_WRITE_MASK) 1092 if ((pte_access & ACC_WRITE_MASK)
939 || (write_fault && !is_write_protection(vcpu) && !user_fault)) { 1093 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
@@ -946,9 +1100,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
946 } 1100 }
947 1101
948 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); 1102 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
949 if (shadow) { 1103 if (shadow ||
1104 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
950 pgprintk("%s: found shadow page for %lx, marking ro\n", 1105 pgprintk("%s: found shadow page for %lx, marking ro\n",
951 __FUNCTION__, gfn); 1106 __func__, gfn);
952 pte_access &= ~ACC_WRITE_MASK; 1107 pte_access &= ~ACC_WRITE_MASK;
953 if (is_writeble_pte(spte)) { 1108 if (is_writeble_pte(spte)) {
954 spte &= ~PT_WRITABLE_MASK; 1109 spte &= ~PT_WRITABLE_MASK;
@@ -964,18 +1119,25 @@ unshadowed:
964 if (pte_access & ACC_WRITE_MASK) 1119 if (pte_access & ACC_WRITE_MASK)
965 mark_page_dirty(vcpu->kvm, gfn); 1120 mark_page_dirty(vcpu->kvm, gfn);
966 1121
967 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); 1122 pgprintk("%s: setting spte %llx\n", __func__, spte);
1123 pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1124 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1125 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
968 set_shadow_pte(shadow_pte, spte); 1126 set_shadow_pte(shadow_pte, spte);
1127 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1128 && (spte & PT_PRESENT_MASK))
1129 ++vcpu->kvm->stat.lpages;
1130
969 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1131 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
970 if (!was_rmapped) { 1132 if (!was_rmapped) {
971 rmap_add(vcpu, shadow_pte, gfn); 1133 rmap_add(vcpu, shadow_pte, gfn, largepage);
972 if (!is_rmap_pte(*shadow_pte)) 1134 if (!is_rmap_pte(*shadow_pte))
973 kvm_release_page_clean(page); 1135 kvm_release_pfn_clean(pfn);
974 } else { 1136 } else {
975 if (was_writeble) 1137 if (was_writeble)
976 kvm_release_page_dirty(page); 1138 kvm_release_pfn_dirty(pfn);
977 else 1139 else
978 kvm_release_page_clean(page); 1140 kvm_release_pfn_clean(pfn);
979 } 1141 }
980 if (!ptwrite || !*ptwrite) 1142 if (!ptwrite || !*ptwrite)
981 vcpu->arch.last_pte_updated = shadow_pte; 1143 vcpu->arch.last_pte_updated = shadow_pte;
@@ -985,10 +1147,10 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
985{ 1147{
986} 1148}
987 1149
988static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, 1150static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
989 gfn_t gfn, struct page *page) 1151 int largepage, gfn_t gfn, pfn_t pfn,
1152 int level)
990{ 1153{
991 int level = PT32E_ROOT_LEVEL;
992 hpa_t table_addr = vcpu->arch.mmu.root_hpa; 1154 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
993 int pt_write = 0; 1155 int pt_write = 0;
994 1156
@@ -1001,8 +1163,14 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1001 1163
1002 if (level == 1) { 1164 if (level == 1) {
1003 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 1165 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1004 0, write, 1, &pt_write, gfn, page); 1166 0, write, 1, &pt_write, 0, gfn, pfn, false);
1005 return pt_write || is_io_pte(table[index]); 1167 return pt_write;
1168 }
1169
1170 if (largepage && level == 2) {
1171 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1172 0, write, 1, &pt_write, 1, gfn, pfn, false);
1173 return pt_write;
1006 } 1174 }
1007 1175
1008 if (table[index] == shadow_trap_nonpresent_pte) { 1176 if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1016,7 +1184,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1016 1, ACC_ALL, &table[index]); 1184 1, ACC_ALL, &table[index]);
1017 if (!new_table) { 1185 if (!new_table) {
1018 pgprintk("nonpaging_map: ENOMEM\n"); 1186 pgprintk("nonpaging_map: ENOMEM\n");
1019 kvm_release_page_clean(page); 1187 kvm_release_pfn_clean(pfn);
1020 return -ENOMEM; 1188 return -ENOMEM;
1021 } 1189 }
1022 1190
@@ -1030,21 +1198,30 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1030static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 1198static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1031{ 1199{
1032 int r; 1200 int r;
1033 1201 int largepage = 0;
1034 struct page *page; 1202 pfn_t pfn;
1035
1036 down_read(&vcpu->kvm->slots_lock);
1037 1203
1038 down_read(&current->mm->mmap_sem); 1204 down_read(&current->mm->mmap_sem);
1039 page = gfn_to_page(vcpu->kvm, gfn); 1205 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1206 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1207 largepage = 1;
1208 }
1209
1210 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1040 up_read(&current->mm->mmap_sem); 1211 up_read(&current->mm->mmap_sem);
1041 1212
1213 /* mmio */
1214 if (is_error_pfn(pfn)) {
1215 kvm_release_pfn_clean(pfn);
1216 return 1;
1217 }
1218
1042 spin_lock(&vcpu->kvm->mmu_lock); 1219 spin_lock(&vcpu->kvm->mmu_lock);
1043 kvm_mmu_free_some_pages(vcpu); 1220 kvm_mmu_free_some_pages(vcpu);
1044 r = __nonpaging_map(vcpu, v, write, gfn, page); 1221 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
1222 PT32E_ROOT_LEVEL);
1045 spin_unlock(&vcpu->kvm->mmu_lock); 1223 spin_unlock(&vcpu->kvm->mmu_lock);
1046 1224
1047 up_read(&vcpu->kvm->slots_lock);
1048 1225
1049 return r; 1226 return r;
1050} 1227}
@@ -1073,6 +1250,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1073 1250
1074 sp = page_header(root); 1251 sp = page_header(root);
1075 --sp->root_count; 1252 --sp->root_count;
1253 if (!sp->root_count && sp->role.invalid)
1254 kvm_mmu_zap_page(vcpu->kvm, sp);
1076 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 1255 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1077 spin_unlock(&vcpu->kvm->mmu_lock); 1256 spin_unlock(&vcpu->kvm->mmu_lock);
1078 return; 1257 return;
@@ -1085,6 +1264,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1085 root &= PT64_BASE_ADDR_MASK; 1264 root &= PT64_BASE_ADDR_MASK;
1086 sp = page_header(root); 1265 sp = page_header(root);
1087 --sp->root_count; 1266 --sp->root_count;
1267 if (!sp->root_count && sp->role.invalid)
1268 kvm_mmu_zap_page(vcpu->kvm, sp);
1088 } 1269 }
1089 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 1270 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1090 } 1271 }
@@ -1097,6 +1278,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1097 int i; 1278 int i;
1098 gfn_t root_gfn; 1279 gfn_t root_gfn;
1099 struct kvm_mmu_page *sp; 1280 struct kvm_mmu_page *sp;
1281 int metaphysical = 0;
1100 1282
1101 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 1283 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1102 1284
@@ -1105,14 +1287,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1105 hpa_t root = vcpu->arch.mmu.root_hpa; 1287 hpa_t root = vcpu->arch.mmu.root_hpa;
1106 1288
1107 ASSERT(!VALID_PAGE(root)); 1289 ASSERT(!VALID_PAGE(root));
1290 if (tdp_enabled)
1291 metaphysical = 1;
1108 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1292 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1109 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); 1293 PT64_ROOT_LEVEL, metaphysical,
1294 ACC_ALL, NULL);
1110 root = __pa(sp->spt); 1295 root = __pa(sp->spt);
1111 ++sp->root_count; 1296 ++sp->root_count;
1112 vcpu->arch.mmu.root_hpa = root; 1297 vcpu->arch.mmu.root_hpa = root;
1113 return; 1298 return;
1114 } 1299 }
1115#endif 1300#endif
1301 metaphysical = !is_paging(vcpu);
1302 if (tdp_enabled)
1303 metaphysical = 1;
1116 for (i = 0; i < 4; ++i) { 1304 for (i = 0; i < 4; ++i) {
1117 hpa_t root = vcpu->arch.mmu.pae_root[i]; 1305 hpa_t root = vcpu->arch.mmu.pae_root[i];
1118 1306
@@ -1126,7 +1314,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1126 } else if (vcpu->arch.mmu.root_level == 0) 1314 } else if (vcpu->arch.mmu.root_level == 0)
1127 root_gfn = 0; 1315 root_gfn = 0;
1128 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1316 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1129 PT32_ROOT_LEVEL, !is_paging(vcpu), 1317 PT32_ROOT_LEVEL, metaphysical,
1130 ACC_ALL, NULL); 1318 ACC_ALL, NULL);
1131 root = __pa(sp->spt); 1319 root = __pa(sp->spt);
1132 ++sp->root_count; 1320 ++sp->root_count;
@@ -1146,7 +1334,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1146 gfn_t gfn; 1334 gfn_t gfn;
1147 int r; 1335 int r;
1148 1336
1149 pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code); 1337 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1150 r = mmu_topup_memory_caches(vcpu); 1338 r = mmu_topup_memory_caches(vcpu);
1151 if (r) 1339 if (r)
1152 return r; 1340 return r;
@@ -1160,6 +1348,41 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1160 error_code & PFERR_WRITE_MASK, gfn); 1348 error_code & PFERR_WRITE_MASK, gfn);
1161} 1349}
1162 1350
1351static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1352 u32 error_code)
1353{
1354 pfn_t pfn;
1355 int r;
1356 int largepage = 0;
1357 gfn_t gfn = gpa >> PAGE_SHIFT;
1358
1359 ASSERT(vcpu);
1360 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1361
1362 r = mmu_topup_memory_caches(vcpu);
1363 if (r)
1364 return r;
1365
1366 down_read(&current->mm->mmap_sem);
1367 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1368 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1369 largepage = 1;
1370 }
1371 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1372 up_read(&current->mm->mmap_sem);
1373 if (is_error_pfn(pfn)) {
1374 kvm_release_pfn_clean(pfn);
1375 return 1;
1376 }
1377 spin_lock(&vcpu->kvm->mmu_lock);
1378 kvm_mmu_free_some_pages(vcpu);
1379 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1380 largepage, gfn, pfn, TDP_ROOT_LEVEL);
1381 spin_unlock(&vcpu->kvm->mmu_lock);
1382
1383 return r;
1384}
1385
1163static void nonpaging_free(struct kvm_vcpu *vcpu) 1386static void nonpaging_free(struct kvm_vcpu *vcpu)
1164{ 1387{
1165 mmu_free_roots(vcpu); 1388 mmu_free_roots(vcpu);
@@ -1188,7 +1411,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1188 1411
1189static void paging_new_cr3(struct kvm_vcpu *vcpu) 1412static void paging_new_cr3(struct kvm_vcpu *vcpu)
1190{ 1413{
1191 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3); 1414 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1192 mmu_free_roots(vcpu); 1415 mmu_free_roots(vcpu);
1193} 1416}
1194 1417
@@ -1253,7 +1476,35 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
1253 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); 1476 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1254} 1477}
1255 1478
1256static int init_kvm_mmu(struct kvm_vcpu *vcpu) 1479static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1480{
1481 struct kvm_mmu *context = &vcpu->arch.mmu;
1482
1483 context->new_cr3 = nonpaging_new_cr3;
1484 context->page_fault = tdp_page_fault;
1485 context->free = nonpaging_free;
1486 context->prefetch_page = nonpaging_prefetch_page;
1487 context->shadow_root_level = TDP_ROOT_LEVEL;
1488 context->root_hpa = INVALID_PAGE;
1489
1490 if (!is_paging(vcpu)) {
1491 context->gva_to_gpa = nonpaging_gva_to_gpa;
1492 context->root_level = 0;
1493 } else if (is_long_mode(vcpu)) {
1494 context->gva_to_gpa = paging64_gva_to_gpa;
1495 context->root_level = PT64_ROOT_LEVEL;
1496 } else if (is_pae(vcpu)) {
1497 context->gva_to_gpa = paging64_gva_to_gpa;
1498 context->root_level = PT32E_ROOT_LEVEL;
1499 } else {
1500 context->gva_to_gpa = paging32_gva_to_gpa;
1501 context->root_level = PT32_ROOT_LEVEL;
1502 }
1503
1504 return 0;
1505}
1506
1507static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1257{ 1508{
1258 ASSERT(vcpu); 1509 ASSERT(vcpu);
1259 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 1510 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1268,6 +1519,16 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1268 return paging32_init_context(vcpu); 1519 return paging32_init_context(vcpu);
1269} 1520}
1270 1521
1522static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1523{
1524 vcpu->arch.update_pte.pfn = bad_pfn;
1525
1526 if (tdp_enabled)
1527 return init_kvm_tdp_mmu(vcpu);
1528 else
1529 return init_kvm_softmmu(vcpu);
1530}
1531
1271static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) 1532static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1272{ 1533{
1273 ASSERT(vcpu); 1534 ASSERT(vcpu);
@@ -1316,7 +1577,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1316 1577
1317 pte = *spte; 1578 pte = *spte;
1318 if (is_shadow_present_pte(pte)) { 1579 if (is_shadow_present_pte(pte)) {
1319 if (sp->role.level == PT_PAGE_TABLE_LEVEL) 1580 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1581 is_large_pte(pte))
1320 rmap_remove(vcpu->kvm, spte); 1582 rmap_remove(vcpu->kvm, spte);
1321 else { 1583 else {
1322 child = page_header(pte & PT64_BASE_ADDR_MASK); 1584 child = page_header(pte & PT64_BASE_ADDR_MASK);
@@ -1324,24 +1586,26 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1324 } 1586 }
1325 } 1587 }
1326 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 1588 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1589 if (is_large_pte(pte))
1590 --vcpu->kvm->stat.lpages;
1327} 1591}
1328 1592
1329static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 1593static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1330 struct kvm_mmu_page *sp, 1594 struct kvm_mmu_page *sp,
1331 u64 *spte, 1595 u64 *spte,
1332 const void *new, int bytes, 1596 const void *new)
1333 int offset_in_pte)
1334{ 1597{
1335 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 1598 if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
1599 && !vcpu->arch.update_pte.largepage) {
1336 ++vcpu->kvm->stat.mmu_pde_zapped; 1600 ++vcpu->kvm->stat.mmu_pde_zapped;
1337 return; 1601 return;
1338 } 1602 }
1339 1603
1340 ++vcpu->kvm->stat.mmu_pte_updated; 1604 ++vcpu->kvm->stat.mmu_pte_updated;
1341 if (sp->role.glevels == PT32_ROOT_LEVEL) 1605 if (sp->role.glevels == PT32_ROOT_LEVEL)
1342 paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); 1606 paging32_update_pte(vcpu, sp, spte, new);
1343 else 1607 else
1344 paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); 1608 paging64_update_pte(vcpu, sp, spte, new);
1345} 1609}
1346 1610
1347static bool need_remote_flush(u64 old, u64 new) 1611static bool need_remote_flush(u64 old, u64 new)
@@ -1378,7 +1642,9 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1378 gfn_t gfn; 1642 gfn_t gfn;
1379 int r; 1643 int r;
1380 u64 gpte = 0; 1644 u64 gpte = 0;
1381 struct page *page; 1645 pfn_t pfn;
1646
1647 vcpu->arch.update_pte.largepage = 0;
1382 1648
1383 if (bytes != 4 && bytes != 8) 1649 if (bytes != 4 && bytes != 8)
1384 return; 1650 return;
@@ -1408,11 +1674,19 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1408 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 1674 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1409 1675
1410 down_read(&current->mm->mmap_sem); 1676 down_read(&current->mm->mmap_sem);
1411 page = gfn_to_page(vcpu->kvm, gfn); 1677 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1678 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1679 vcpu->arch.update_pte.largepage = 1;
1680 }
1681 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1412 up_read(&current->mm->mmap_sem); 1682 up_read(&current->mm->mmap_sem);
1413 1683
1684 if (is_error_pfn(pfn)) {
1685 kvm_release_pfn_clean(pfn);
1686 return;
1687 }
1414 vcpu->arch.update_pte.gfn = gfn; 1688 vcpu->arch.update_pte.gfn = gfn;
1415 vcpu->arch.update_pte.page = page; 1689 vcpu->arch.update_pte.pfn = pfn;
1416} 1690}
1417 1691
1418void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1692void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1423,7 +1697,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1423 struct hlist_node *node, *n; 1697 struct hlist_node *node, *n;
1424 struct hlist_head *bucket; 1698 struct hlist_head *bucket;
1425 unsigned index; 1699 unsigned index;
1426 u64 entry; 1700 u64 entry, gentry;
1427 u64 *spte; 1701 u64 *spte;
1428 unsigned offset = offset_in_page(gpa); 1702 unsigned offset = offset_in_page(gpa);
1429 unsigned pte_size; 1703 unsigned pte_size;
@@ -1433,8 +1707,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1433 int level; 1707 int level;
1434 int flooded = 0; 1708 int flooded = 0;
1435 int npte; 1709 int npte;
1710 int r;
1436 1711
1437 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1712 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1438 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); 1713 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1439 spin_lock(&vcpu->kvm->mmu_lock); 1714 spin_lock(&vcpu->kvm->mmu_lock);
1440 kvm_mmu_free_some_pages(vcpu); 1715 kvm_mmu_free_some_pages(vcpu);
@@ -1450,7 +1725,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1450 vcpu->arch.last_pt_write_count = 1; 1725 vcpu->arch.last_pt_write_count = 1;
1451 vcpu->arch.last_pte_updated = NULL; 1726 vcpu->arch.last_pte_updated = NULL;
1452 } 1727 }
1453 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1728 index = kvm_page_table_hashfn(gfn);
1454 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1729 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1455 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 1730 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1456 if (sp->gfn != gfn || sp->role.metaphysical) 1731 if (sp->gfn != gfn || sp->role.metaphysical)
@@ -1496,20 +1771,29 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1496 continue; 1771 continue;
1497 } 1772 }
1498 spte = &sp->spt[page_offset / sizeof(*spte)]; 1773 spte = &sp->spt[page_offset / sizeof(*spte)];
1774 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1775 gentry = 0;
1776 r = kvm_read_guest_atomic(vcpu->kvm,
1777 gpa & ~(u64)(pte_size - 1),
1778 &gentry, pte_size);
1779 new = (const void *)&gentry;
1780 if (r < 0)
1781 new = NULL;
1782 }
1499 while (npte--) { 1783 while (npte--) {
1500 entry = *spte; 1784 entry = *spte;
1501 mmu_pte_write_zap_pte(vcpu, sp, spte); 1785 mmu_pte_write_zap_pte(vcpu, sp, spte);
1502 mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes, 1786 if (new)
1503 page_offset & (pte_size - 1)); 1787 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1504 mmu_pte_write_flush_tlb(vcpu, entry, *spte); 1788 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1505 ++spte; 1789 ++spte;
1506 } 1790 }
1507 } 1791 }
1508 kvm_mmu_audit(vcpu, "post pte write"); 1792 kvm_mmu_audit(vcpu, "post pte write");
1509 spin_unlock(&vcpu->kvm->mmu_lock); 1793 spin_unlock(&vcpu->kvm->mmu_lock);
1510 if (vcpu->arch.update_pte.page) { 1794 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1511 kvm_release_page_clean(vcpu->arch.update_pte.page); 1795 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1512 vcpu->arch.update_pte.page = NULL; 1796 vcpu->arch.update_pte.pfn = bad_pfn;
1513 } 1797 }
1514} 1798}
1515 1799
@@ -1518,9 +1802,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1518 gpa_t gpa; 1802 gpa_t gpa;
1519 int r; 1803 int r;
1520 1804
1521 down_read(&vcpu->kvm->slots_lock);
1522 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1805 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1523 up_read(&vcpu->kvm->slots_lock);
1524 1806
1525 spin_lock(&vcpu->kvm->mmu_lock); 1807 spin_lock(&vcpu->kvm->mmu_lock);
1526 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1808 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -1577,6 +1859,12 @@ out:
1577} 1859}
1578EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 1860EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1579 1861
1862void kvm_enable_tdp(void)
1863{
1864 tdp_enabled = true;
1865}
1866EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1867
1580static void free_mmu_pages(struct kvm_vcpu *vcpu) 1868static void free_mmu_pages(struct kvm_vcpu *vcpu)
1581{ 1869{
1582 struct kvm_mmu_page *sp; 1870 struct kvm_mmu_page *sp;
@@ -1677,7 +1965,53 @@ void kvm_mmu_zap_all(struct kvm *kvm)
1677 kvm_flush_remote_tlbs(kvm); 1965 kvm_flush_remote_tlbs(kvm);
1678} 1966}
1679 1967
1680void kvm_mmu_module_exit(void) 1968void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
1969{
1970 struct kvm_mmu_page *page;
1971
1972 page = container_of(kvm->arch.active_mmu_pages.prev,
1973 struct kvm_mmu_page, link);
1974 kvm_mmu_zap_page(kvm, page);
1975}
1976
1977static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1978{
1979 struct kvm *kvm;
1980 struct kvm *kvm_freed = NULL;
1981 int cache_count = 0;
1982
1983 spin_lock(&kvm_lock);
1984
1985 list_for_each_entry(kvm, &vm_list, vm_list) {
1986 int npages;
1987
1988 spin_lock(&kvm->mmu_lock);
1989 npages = kvm->arch.n_alloc_mmu_pages -
1990 kvm->arch.n_free_mmu_pages;
1991 cache_count += npages;
1992 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
1993 kvm_mmu_remove_one_alloc_mmu_page(kvm);
1994 cache_count--;
1995 kvm_freed = kvm;
1996 }
1997 nr_to_scan--;
1998
1999 spin_unlock(&kvm->mmu_lock);
2000 }
2001 if (kvm_freed)
2002 list_move_tail(&kvm_freed->vm_list, &vm_list);
2003
2004 spin_unlock(&kvm_lock);
2005
2006 return cache_count;
2007}
2008
2009static struct shrinker mmu_shrinker = {
2010 .shrink = mmu_shrink,
2011 .seeks = DEFAULT_SEEKS * 10,
2012};
2013
2014void mmu_destroy_caches(void)
1681{ 2015{
1682 if (pte_chain_cache) 2016 if (pte_chain_cache)
1683 kmem_cache_destroy(pte_chain_cache); 2017 kmem_cache_destroy(pte_chain_cache);
@@ -1687,6 +2021,12 @@ void kvm_mmu_module_exit(void)
1687 kmem_cache_destroy(mmu_page_header_cache); 2021 kmem_cache_destroy(mmu_page_header_cache);
1688} 2022}
1689 2023
2024void kvm_mmu_module_exit(void)
2025{
2026 mmu_destroy_caches();
2027 unregister_shrinker(&mmu_shrinker);
2028}
2029
1690int kvm_mmu_module_init(void) 2030int kvm_mmu_module_init(void)
1691{ 2031{
1692 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 2032 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -1706,10 +2046,12 @@ int kvm_mmu_module_init(void)
1706 if (!mmu_page_header_cache) 2046 if (!mmu_page_header_cache)
1707 goto nomem; 2047 goto nomem;
1708 2048
2049 register_shrinker(&mmu_shrinker);
2050
1709 return 0; 2051 return 0;
1710 2052
1711nomem: 2053nomem:
1712 kvm_mmu_module_exit(); 2054 mmu_destroy_caches();
1713 return -ENOMEM; 2055 return -ENOMEM;
1714} 2056}
1715 2057
@@ -1732,6 +2074,127 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1732 return nr_mmu_pages; 2074 return nr_mmu_pages;
1733} 2075}
1734 2076
2077static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2078 unsigned len)
2079{
2080 if (len > buffer->len)
2081 return NULL;
2082 return buffer->ptr;
2083}
2084
2085static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2086 unsigned len)
2087{
2088 void *ret;
2089
2090 ret = pv_mmu_peek_buffer(buffer, len);
2091 if (!ret)
2092 return ret;
2093 buffer->ptr += len;
2094 buffer->len -= len;
2095 buffer->processed += len;
2096 return ret;
2097}
2098
2099static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2100 gpa_t addr, gpa_t value)
2101{
2102 int bytes = 8;
2103 int r;
2104
2105 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2106 bytes = 4;
2107
2108 r = mmu_topup_memory_caches(vcpu);
2109 if (r)
2110 return r;
2111
2112 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2113 return -EFAULT;
2114
2115 return 1;
2116}
2117
2118static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2119{
2120 kvm_x86_ops->tlb_flush(vcpu);
2121 return 1;
2122}
2123
2124static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2125{
2126 spin_lock(&vcpu->kvm->mmu_lock);
2127 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2128 spin_unlock(&vcpu->kvm->mmu_lock);
2129 return 1;
2130}
2131
2132static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2133 struct kvm_pv_mmu_op_buffer *buffer)
2134{
2135 struct kvm_mmu_op_header *header;
2136
2137 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2138 if (!header)
2139 return 0;
2140 switch (header->op) {
2141 case KVM_MMU_OP_WRITE_PTE: {
2142 struct kvm_mmu_op_write_pte *wpte;
2143
2144 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2145 if (!wpte)
2146 return 0;
2147 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2148 wpte->pte_val);
2149 }
2150 case KVM_MMU_OP_FLUSH_TLB: {
2151 struct kvm_mmu_op_flush_tlb *ftlb;
2152
2153 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2154 if (!ftlb)
2155 return 0;
2156 return kvm_pv_mmu_flush_tlb(vcpu);
2157 }
2158 case KVM_MMU_OP_RELEASE_PT: {
2159 struct kvm_mmu_op_release_pt *rpt;
2160
2161 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2162 if (!rpt)
2163 return 0;
2164 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2165 }
2166 default: return 0;
2167 }
2168}
2169
2170int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2171 gpa_t addr, unsigned long *ret)
2172{
2173 int r;
2174 struct kvm_pv_mmu_op_buffer buffer;
2175
2176 buffer.ptr = buffer.buf;
2177 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2178 buffer.processed = 0;
2179
2180 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2181 if (r)
2182 goto out;
2183
2184 while (buffer.len) {
2185 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2186 if (r < 0)
2187 goto out;
2188 if (r == 0)
2189 break;
2190 }
2191
2192 r = 1;
2193out:
2194 *ret = buffer.processed;
2195 return r;
2196}
2197
1735#ifdef AUDIT 2198#ifdef AUDIT
1736 2199
1737static const char *audit_msg; 2200static const char *audit_msg;
@@ -1768,8 +2231,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1768 audit_mappings_page(vcpu, ent, va, level - 1); 2231 audit_mappings_page(vcpu, ent, va, level - 1);
1769 } else { 2232 } else {
1770 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); 2233 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1771 struct page *page = gpa_to_page(vcpu, gpa); 2234 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
1772 hpa_t hpa = page_to_phys(page);
1773 2235
1774 if (is_shadow_present_pte(ent) 2236 if (is_shadow_present_pte(ent)
1775 && (ent & PT64_BASE_ADDR_MASK) != hpa) 2237 && (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1782,7 +2244,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1782 && !is_error_hpa(hpa)) 2244 && !is_error_hpa(hpa))
1783 printk(KERN_ERR "audit: (%s) notrap shadow," 2245 printk(KERN_ERR "audit: (%s) notrap shadow,"
1784 " valid guest gva %lx\n", audit_msg, va); 2246 " valid guest gva %lx\n", audit_msg, va);
1785 kvm_release_page_clean(page); 2247 kvm_release_pfn_clean(pfn);
1786 2248
1787 } 2249 }
1788 } 2250 }
@@ -1867,7 +2329,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
1867 2329
1868 if (n_rmap != n_actual) 2330 if (n_rmap != n_actual)
1869 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", 2331 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1870 __FUNCTION__, audit_msg, n_rmap, n_actual); 2332 __func__, audit_msg, n_rmap, n_actual);
1871} 2333}
1872 2334
1873static void audit_write_protection(struct kvm_vcpu *vcpu) 2335static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -1887,7 +2349,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
1887 if (*rmapp) 2349 if (*rmapp)
1888 printk(KERN_ERR "%s: (%s) shadow page has writable" 2350 printk(KERN_ERR "%s: (%s) shadow page has writable"
1889 " mappings: gfn %lx role %x\n", 2351 " mappings: gfn %lx role %x\n",
1890 __FUNCTION__, audit_msg, sp->gfn, 2352 __func__, audit_msg, sp->gfn,
1891 sp->role.word); 2353 sp->role.word);
1892 } 2354 }
1893} 2355}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fce19ec7a23..e64e9f56a65e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -3,6 +3,12 @@
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5 5
6#ifdef CONFIG_X86_64
7#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
8#else
9#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
10#endif
11
6static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 12static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
7{ 13{
8 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) 14 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ecc0856268c4..156fe10288ae 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -130,7 +130,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
130 unsigned index, pt_access, pte_access; 130 unsigned index, pt_access, pte_access;
131 gpa_t pte_gpa; 131 gpa_t pte_gpa;
132 132
133 pgprintk("%s: addr %lx\n", __FUNCTION__, addr); 133 pgprintk("%s: addr %lx\n", __func__, addr);
134walk: 134walk:
135 walker->level = vcpu->arch.mmu.root_level; 135 walker->level = vcpu->arch.mmu.root_level;
136 pte = vcpu->arch.cr3; 136 pte = vcpu->arch.cr3;
@@ -155,7 +155,7 @@ walk:
155 pte_gpa += index * sizeof(pt_element_t); 155 pte_gpa += index * sizeof(pt_element_t);
156 walker->table_gfn[walker->level - 1] = table_gfn; 156 walker->table_gfn[walker->level - 1] = table_gfn;
157 walker->pte_gpa[walker->level - 1] = pte_gpa; 157 walker->pte_gpa[walker->level - 1] = pte_gpa;
158 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, 158 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
159 walker->level - 1, table_gfn); 159 walker->level - 1, table_gfn);
160 160
161 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); 161 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
@@ -222,7 +222,7 @@ walk:
222 walker->pt_access = pt_access; 222 walker->pt_access = pt_access;
223 walker->pte_access = pte_access; 223 walker->pte_access = pte_access;
224 pgprintk("%s: pte %llx pte_access %x pt_access %x\n", 224 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
225 __FUNCTION__, (u64)pte, pt_access, pte_access); 225 __func__, (u64)pte, pt_access, pte_access);
226 return 1; 226 return 1;
227 227
228not_present: 228not_present:
@@ -243,31 +243,30 @@ err:
243} 243}
244 244
245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, 245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
246 u64 *spte, const void *pte, int bytes, 246 u64 *spte, const void *pte)
247 int offset_in_pte)
248{ 247{
249 pt_element_t gpte; 248 pt_element_t gpte;
250 unsigned pte_access; 249 unsigned pte_access;
251 struct page *npage; 250 pfn_t pfn;
251 int largepage = vcpu->arch.update_pte.largepage;
252 252
253 gpte = *(const pt_element_t *)pte; 253 gpte = *(const pt_element_t *)pte;
254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
255 if (!offset_in_pte && !is_present_pte(gpte)) 255 if (!is_present_pte(gpte))
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte); 256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return; 257 return;
258 } 258 }
259 if (bytes < sizeof(pt_element_t)) 259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
260 return;
261 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
262 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); 260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
263 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) 261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
264 return; 262 return;
265 npage = vcpu->arch.update_pte.page; 263 pfn = vcpu->arch.update_pte.pfn;
266 if (!npage) 264 if (is_error_pfn(pfn))
267 return; 265 return;
268 get_page(npage); 266 kvm_get_pfn(pfn);
269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
270 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage); 268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
269 pfn, true);
271} 270}
272 271
273/* 272/*
@@ -275,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
275 */ 274 */
276static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
277 struct guest_walker *walker, 276 struct guest_walker *walker,
278 int user_fault, int write_fault, int *ptwrite, 277 int user_fault, int write_fault, int largepage,
279 struct page *page) 278 int *ptwrite, pfn_t pfn)
280{ 279{
281 hpa_t shadow_addr; 280 hpa_t shadow_addr;
282 int level; 281 int level;
@@ -304,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
304 shadow_ent = ((u64 *)__va(shadow_addr)) + index; 303 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
305 if (level == PT_PAGE_TABLE_LEVEL) 304 if (level == PT_PAGE_TABLE_LEVEL)
306 break; 305 break;
307 if (is_shadow_present_pte(*shadow_ent)) { 306
307 if (largepage && level == PT_DIRECTORY_LEVEL)
308 break;
309
310 if (is_shadow_present_pte(*shadow_ent)
311 && !is_large_pte(*shadow_ent)) {
308 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; 312 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
309 continue; 313 continue;
310 } 314 }
311 315
316 if (is_large_pte(*shadow_ent))
317 rmap_remove(vcpu->kvm, shadow_ent);
318
312 if (level - 1 == PT_PAGE_TABLE_LEVEL 319 if (level - 1 == PT_PAGE_TABLE_LEVEL
313 && walker->level == PT_DIRECTORY_LEVEL) { 320 && walker->level == PT_DIRECTORY_LEVEL) {
314 metaphysical = 1; 321 metaphysical = 1;
@@ -329,7 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
329 walker->pte_gpa[level - 2], 336 walker->pte_gpa[level - 2],
330 &curr_pte, sizeof(curr_pte)); 337 &curr_pte, sizeof(curr_pte));
331 if (r || curr_pte != walker->ptes[level - 2]) { 338 if (r || curr_pte != walker->ptes[level - 2]) {
332 kvm_release_page_clean(page); 339 kvm_release_pfn_clean(pfn);
333 return NULL; 340 return NULL;
334 } 341 }
335 } 342 }
@@ -342,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
342 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
343 user_fault, write_fault, 350 user_fault, write_fault,
344 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 351 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
345 ptwrite, walker->gfn, page); 352 ptwrite, largepage, walker->gfn, pfn, false);
346 353
347 return shadow_ent; 354 return shadow_ent;
348} 355}
@@ -371,16 +378,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
371 u64 *shadow_pte; 378 u64 *shadow_pte;
372 int write_pt = 0; 379 int write_pt = 0;
373 int r; 380 int r;
374 struct page *page; 381 pfn_t pfn;
382 int largepage = 0;
375 383
376 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 384 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
377 kvm_mmu_audit(vcpu, "pre page fault"); 385 kvm_mmu_audit(vcpu, "pre page fault");
378 386
379 r = mmu_topup_memory_caches(vcpu); 387 r = mmu_topup_memory_caches(vcpu);
380 if (r) 388 if (r)
381 return r; 389 return r;
382 390
383 down_read(&vcpu->kvm->slots_lock);
384 /* 391 /*
385 * Look up the shadow pte for the faulting address. 392 * Look up the shadow pte for the faulting address.
386 */ 393 */
@@ -391,40 +398,45 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
391 * The page is not mapped by the guest. Let the guest handle it. 398 * The page is not mapped by the guest. Let the guest handle it.
392 */ 399 */
393 if (!r) { 400 if (!r) {
394 pgprintk("%s: guest page fault\n", __FUNCTION__); 401 pgprintk("%s: guest page fault\n", __func__);
395 inject_page_fault(vcpu, addr, walker.error_code); 402 inject_page_fault(vcpu, addr, walker.error_code);
396 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 403 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
397 up_read(&vcpu->kvm->slots_lock);
398 return 0; 404 return 0;
399 } 405 }
400 406
401 down_read(&current->mm->mmap_sem); 407 down_read(&current->mm->mmap_sem);
402 page = gfn_to_page(vcpu->kvm, walker.gfn); 408 if (walker.level == PT_DIRECTORY_LEVEL) {
409 gfn_t large_gfn;
410 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
411 if (is_largepage_backed(vcpu, large_gfn)) {
412 walker.gfn = large_gfn;
413 largepage = 1;
414 }
415 }
416 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
403 up_read(&current->mm->mmap_sem); 417 up_read(&current->mm->mmap_sem);
404 418
419 /* mmio */
420 if (is_error_pfn(pfn)) {
421 pgprintk("gfn %x is mmio\n", walker.gfn);
422 kvm_release_pfn_clean(pfn);
423 return 1;
424 }
425
405 spin_lock(&vcpu->kvm->mmu_lock); 426 spin_lock(&vcpu->kvm->mmu_lock);
406 kvm_mmu_free_some_pages(vcpu); 427 kvm_mmu_free_some_pages(vcpu);
407 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 428 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
408 &write_pt, page); 429 largepage, &write_pt, pfn);
409 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 430
431 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
410 shadow_pte, *shadow_pte, write_pt); 432 shadow_pte, *shadow_pte, write_pt);
411 433
412 if (!write_pt) 434 if (!write_pt)
413 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 435 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
414 436
415 /*
416 * mmio: emulate if accessible, otherwise its a guest fault.
417 */
418 if (shadow_pte && is_io_pte(*shadow_pte)) {
419 spin_unlock(&vcpu->kvm->mmu_lock);
420 up_read(&vcpu->kvm->slots_lock);
421 return 1;
422 }
423
424 ++vcpu->stat.pf_fixed; 437 ++vcpu->stat.pf_fixed;
425 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 438 kvm_mmu_audit(vcpu, "post page fault (fixed)");
426 spin_unlock(&vcpu->kvm->mmu_lock); 439 spin_unlock(&vcpu->kvm->mmu_lock);
427 up_read(&vcpu->kvm->slots_lock);
428 440
429 return write_pt; 441 return write_pt;
430} 442}
diff --git a/arch/x86/kvm/segment_descriptor.h b/arch/x86/kvm/segment_descriptor.h
deleted file mode 100644
index 56fc4c873389..000000000000
--- a/arch/x86/kvm/segment_descriptor.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __SEGMENT_DESCRIPTOR_H
2#define __SEGMENT_DESCRIPTOR_H
3
4struct segment_descriptor {
5 u16 limit_low;
6 u16 base_low;
7 u8 base_mid;
8 u8 type : 4;
9 u8 system : 1;
10 u8 dpl : 2;
11 u8 present : 1;
12 u8 limit_high : 4;
13 u8 avl : 1;
14 u8 long_mode : 1;
15 u8 default_op : 1;
16 u8 granularity : 1;
17 u8 base_high;
18} __attribute__((packed));
19
20#ifdef CONFIG_X86_64
21/* LDT or TSS descriptor in the GDT. 16 bytes. */
22struct segment_descriptor_64 {
23 struct segment_descriptor s;
24 u32 base_higher;
25 u32 pad_zero;
26};
27
28#endif
29#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1a582f1090e8..89e0be2c10d0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -47,6 +47,18 @@ MODULE_LICENSE("GPL");
47#define SVM_FEATURE_LBRV (1 << 1) 47#define SVM_FEATURE_LBRV (1 << 1)
48#define SVM_DEATURE_SVML (1 << 2) 48#define SVM_DEATURE_SVML (1 << 2)
49 49
50#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
51
52/* enable NPT for AMD64 and X86 with PAE */
53#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
54static bool npt_enabled = true;
55#else
56static bool npt_enabled = false;
57#endif
58static int npt = 1;
59
60module_param(npt, int, S_IRUGO);
61
50static void kvm_reput_irq(struct vcpu_svm *svm); 62static void kvm_reput_irq(struct vcpu_svm *svm);
51 63
52static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 64static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
@@ -54,8 +66,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
54 return container_of(vcpu, struct vcpu_svm, vcpu); 66 return container_of(vcpu, struct vcpu_svm, vcpu);
55} 67}
56 68
57unsigned long iopm_base; 69static unsigned long iopm_base;
58unsigned long msrpm_base;
59 70
60struct kvm_ldttss_desc { 71struct kvm_ldttss_desc {
61 u16 limit0; 72 u16 limit0;
@@ -182,7 +193,7 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
182 193
183static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 194static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
184{ 195{
185 if (!(efer & EFER_LMA)) 196 if (!npt_enabled && !(efer & EFER_LMA))
186 efer &= ~EFER_LME; 197 efer &= ~EFER_LME;
187 198
188 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 199 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
@@ -219,12 +230,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
219 struct vcpu_svm *svm = to_svm(vcpu); 230 struct vcpu_svm *svm = to_svm(vcpu);
220 231
221 if (!svm->next_rip) { 232 if (!svm->next_rip) {
222 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); 233 printk(KERN_DEBUG "%s: NOP\n", __func__);
223 return; 234 return;
224 } 235 }
225 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) 236 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
226 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 237 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
227 __FUNCTION__, 238 __func__,
228 svm->vmcb->save.rip, 239 svm->vmcb->save.rip,
229 svm->next_rip); 240 svm->next_rip);
230 241
@@ -279,11 +290,7 @@ static void svm_hardware_enable(void *garbage)
279 290
280 struct svm_cpu_data *svm_data; 291 struct svm_cpu_data *svm_data;
281 uint64_t efer; 292 uint64_t efer;
282#ifdef CONFIG_X86_64
283 struct desc_ptr gdt_descr;
284#else
285 struct desc_ptr gdt_descr; 293 struct desc_ptr gdt_descr;
286#endif
287 struct desc_struct *gdt; 294 struct desc_struct *gdt;
288 int me = raw_smp_processor_id(); 295 int me = raw_smp_processor_id();
289 296
@@ -302,7 +309,6 @@ static void svm_hardware_enable(void *garbage)
302 svm_data->asid_generation = 1; 309 svm_data->asid_generation = 1;
303 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 310 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
304 svm_data->next_asid = svm_data->max_asid + 1; 311 svm_data->next_asid = svm_data->max_asid + 1;
305 svm_features = cpuid_edx(SVM_CPUID_FUNC);
306 312
307 asm volatile ("sgdt %0" : "=m"(gdt_descr)); 313 asm volatile ("sgdt %0" : "=m"(gdt_descr));
308 gdt = (struct desc_struct *)gdt_descr.address; 314 gdt = (struct desc_struct *)gdt_descr.address;
@@ -361,12 +367,51 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
361 BUG(); 367 BUG();
362} 368}
363 369
370static void svm_vcpu_init_msrpm(u32 *msrpm)
371{
372 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
373
374#ifdef CONFIG_X86_64
375 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
376 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
377 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
378 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
379 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
380 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
381#endif
382 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
383 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
386}
387
388static void svm_enable_lbrv(struct vcpu_svm *svm)
389{
390 u32 *msrpm = svm->msrpm;
391
392 svm->vmcb->control.lbr_ctl = 1;
393 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
394 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
395 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
396 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
397}
398
399static void svm_disable_lbrv(struct vcpu_svm *svm)
400{
401 u32 *msrpm = svm->msrpm;
402
403 svm->vmcb->control.lbr_ctl = 0;
404 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
405 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
406 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
407 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
408}
409
364static __init int svm_hardware_setup(void) 410static __init int svm_hardware_setup(void)
365{ 411{
366 int cpu; 412 int cpu;
367 struct page *iopm_pages; 413 struct page *iopm_pages;
368 struct page *msrpm_pages; 414 void *iopm_va;
369 void *iopm_va, *msrpm_va;
370 int r; 415 int r;
371 416
372 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 417 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
@@ -379,41 +424,33 @@ static __init int svm_hardware_setup(void)
379 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */ 424 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
380 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 425 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
381 426
427 if (boot_cpu_has(X86_FEATURE_NX))
428 kvm_enable_efer_bits(EFER_NX);
382 429
383 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 430 for_each_online_cpu(cpu) {
431 r = svm_cpu_init(cpu);
432 if (r)
433 goto err;
434 }
384 435
385 r = -ENOMEM; 436 svm_features = cpuid_edx(SVM_CPUID_FUNC);
386 if (!msrpm_pages)
387 goto err_1;
388 437
389 msrpm_va = page_address(msrpm_pages); 438 if (!svm_has(SVM_FEATURE_NPT))
390 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); 439 npt_enabled = false;
391 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
392 440
393#ifdef CONFIG_X86_64 441 if (npt_enabled && !npt) {
394 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); 442 printk(KERN_INFO "kvm: Nested Paging disabled\n");
395 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); 443 npt_enabled = false;
396 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); 444 }
397 set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
398 set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
399 set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
400#endif
401 set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
402 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
403 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
404 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
405 445
406 for_each_online_cpu(cpu) { 446 if (npt_enabled) {
407 r = svm_cpu_init(cpu); 447 printk(KERN_INFO "kvm: Nested Paging enabled\n");
408 if (r) 448 kvm_enable_tdp();
409 goto err_2;
410 } 449 }
450
411 return 0; 451 return 0;
412 452
413err_2: 453err:
414 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
415 msrpm_base = 0;
416err_1:
417 __free_pages(iopm_pages, IOPM_ALLOC_ORDER); 454 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
418 iopm_base = 0; 455 iopm_base = 0;
419 return r; 456 return r;
@@ -421,9 +458,8 @@ err_1:
421 458
422static __exit void svm_hardware_unsetup(void) 459static __exit void svm_hardware_unsetup(void)
423{ 460{
424 __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
425 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); 461 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
426 iopm_base = msrpm_base = 0; 462 iopm_base = 0;
427} 463}
428 464
429static void init_seg(struct vmcb_seg *seg) 465static void init_seg(struct vmcb_seg *seg)
@@ -443,15 +479,14 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
443 seg->base = 0; 479 seg->base = 0;
444} 480}
445 481
446static void init_vmcb(struct vmcb *vmcb) 482static void init_vmcb(struct vcpu_svm *svm)
447{ 483{
448 struct vmcb_control_area *control = &vmcb->control; 484 struct vmcb_control_area *control = &svm->vmcb->control;
449 struct vmcb_save_area *save = &vmcb->save; 485 struct vmcb_save_area *save = &svm->vmcb->save;
450 486
451 control->intercept_cr_read = INTERCEPT_CR0_MASK | 487 control->intercept_cr_read = INTERCEPT_CR0_MASK |
452 INTERCEPT_CR3_MASK | 488 INTERCEPT_CR3_MASK |
453 INTERCEPT_CR4_MASK | 489 INTERCEPT_CR4_MASK;
454 INTERCEPT_CR8_MASK;
455 490
456 control->intercept_cr_write = INTERCEPT_CR0_MASK | 491 control->intercept_cr_write = INTERCEPT_CR0_MASK |
457 INTERCEPT_CR3_MASK | 492 INTERCEPT_CR3_MASK |
@@ -471,23 +506,13 @@ static void init_vmcb(struct vmcb *vmcb)
471 INTERCEPT_DR7_MASK; 506 INTERCEPT_DR7_MASK;
472 507
473 control->intercept_exceptions = (1 << PF_VECTOR) | 508 control->intercept_exceptions = (1 << PF_VECTOR) |
474 (1 << UD_VECTOR); 509 (1 << UD_VECTOR) |
510 (1 << MC_VECTOR);
475 511
476 512
477 control->intercept = (1ULL << INTERCEPT_INTR) | 513 control->intercept = (1ULL << INTERCEPT_INTR) |
478 (1ULL << INTERCEPT_NMI) | 514 (1ULL << INTERCEPT_NMI) |
479 (1ULL << INTERCEPT_SMI) | 515 (1ULL << INTERCEPT_SMI) |
480 /*
481 * selective cr0 intercept bug?
482 * 0: 0f 22 d8 mov %eax,%cr3
483 * 3: 0f 20 c0 mov %cr0,%eax
484 * 6: 0d 00 00 00 80 or $0x80000000,%eax
485 * b: 0f 22 c0 mov %eax,%cr0
486 * set cr3 ->interception
487 * get cr0 ->interception
488 * set cr0 -> no interception
489 */
490 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
491 (1ULL << INTERCEPT_CPUID) | 516 (1ULL << INTERCEPT_CPUID) |
492 (1ULL << INTERCEPT_INVD) | 517 (1ULL << INTERCEPT_INVD) |
493 (1ULL << INTERCEPT_HLT) | 518 (1ULL << INTERCEPT_HLT) |
@@ -508,7 +533,7 @@ static void init_vmcb(struct vmcb *vmcb)
508 (1ULL << INTERCEPT_MWAIT); 533 (1ULL << INTERCEPT_MWAIT);
509 534
510 control->iopm_base_pa = iopm_base; 535 control->iopm_base_pa = iopm_base;
511 control->msrpm_base_pa = msrpm_base; 536 control->msrpm_base_pa = __pa(svm->msrpm);
512 control->tsc_offset = 0; 537 control->tsc_offset = 0;
513 control->int_ctl = V_INTR_MASKING_MASK; 538 control->int_ctl = V_INTR_MASKING_MASK;
514 539
@@ -550,13 +575,30 @@ static void init_vmcb(struct vmcb *vmcb)
550 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; 575 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
551 save->cr4 = X86_CR4_PAE; 576 save->cr4 = X86_CR4_PAE;
552 /* rdx = ?? */ 577 /* rdx = ?? */
578
579 if (npt_enabled) {
580 /* Setup VMCB for Nested Paging */
581 control->nested_ctl = 1;
582 control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
583 control->intercept_exceptions &= ~(1 << PF_VECTOR);
584 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
585 INTERCEPT_CR3_MASK);
586 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
587 INTERCEPT_CR3_MASK);
588 save->g_pat = 0x0007040600070406ULL;
589 /* enable caching because the QEMU Bios doesn't enable it */
590 save->cr0 = X86_CR0_ET;
591 save->cr3 = 0;
592 save->cr4 = 0;
593 }
594 force_new_asid(&svm->vcpu);
553} 595}
554 596
555static int svm_vcpu_reset(struct kvm_vcpu *vcpu) 597static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
556{ 598{
557 struct vcpu_svm *svm = to_svm(vcpu); 599 struct vcpu_svm *svm = to_svm(vcpu);
558 600
559 init_vmcb(svm->vmcb); 601 init_vmcb(svm);
560 602
561 if (vcpu->vcpu_id != 0) { 603 if (vcpu->vcpu_id != 0) {
562 svm->vmcb->save.rip = 0; 604 svm->vmcb->save.rip = 0;
@@ -571,6 +613,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
571{ 613{
572 struct vcpu_svm *svm; 614 struct vcpu_svm *svm;
573 struct page *page; 615 struct page *page;
616 struct page *msrpm_pages;
574 int err; 617 int err;
575 618
576 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 619 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -589,12 +632,19 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
589 goto uninit; 632 goto uninit;
590 } 633 }
591 634
635 err = -ENOMEM;
636 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
637 if (!msrpm_pages)
638 goto uninit;
639 svm->msrpm = page_address(msrpm_pages);
640 svm_vcpu_init_msrpm(svm->msrpm);
641
592 svm->vmcb = page_address(page); 642 svm->vmcb = page_address(page);
593 clear_page(svm->vmcb); 643 clear_page(svm->vmcb);
594 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 644 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
595 svm->asid_generation = 0; 645 svm->asid_generation = 0;
596 memset(svm->db_regs, 0, sizeof(svm->db_regs)); 646 memset(svm->db_regs, 0, sizeof(svm->db_regs));
597 init_vmcb(svm->vmcb); 647 init_vmcb(svm);
598 648
599 fx_init(&svm->vcpu); 649 fx_init(&svm->vcpu);
600 svm->vcpu.fpu_active = 1; 650 svm->vcpu.fpu_active = 1;
@@ -617,6 +667,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
617 struct vcpu_svm *svm = to_svm(vcpu); 667 struct vcpu_svm *svm = to_svm(vcpu);
618 668
619 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); 669 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
670 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
620 kvm_vcpu_uninit(vcpu); 671 kvm_vcpu_uninit(vcpu);
621 kmem_cache_free(kvm_vcpu_cache, svm); 672 kmem_cache_free(kvm_vcpu_cache, svm);
622} 673}
@@ -731,6 +782,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
731 var->unusable = !var->present; 782 var->unusable = !var->present;
732} 783}
733 784
785static int svm_get_cpl(struct kvm_vcpu *vcpu)
786{
787 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
788
789 return save->cpl;
790}
791
734static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 792static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
735{ 793{
736 struct vcpu_svm *svm = to_svm(vcpu); 794 struct vcpu_svm *svm = to_svm(vcpu);
@@ -784,6 +842,9 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
784 } 842 }
785 } 843 }
786#endif 844#endif
845 if (npt_enabled)
846 goto set;
847
787 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 848 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
788 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 849 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
789 vcpu->fpu_active = 1; 850 vcpu->fpu_active = 1;
@@ -791,18 +852,29 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
791 852
792 vcpu->arch.cr0 = cr0; 853 vcpu->arch.cr0 = cr0;
793 cr0 |= X86_CR0_PG | X86_CR0_WP; 854 cr0 |= X86_CR0_PG | X86_CR0_WP;
794 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
795 if (!vcpu->fpu_active) { 855 if (!vcpu->fpu_active) {
796 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); 856 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
797 cr0 |= X86_CR0_TS; 857 cr0 |= X86_CR0_TS;
798 } 858 }
859set:
860 /*
861 * re-enable caching here because the QEMU bios
862 * does not do it - this results in some delay at
863 * reboot
864 */
865 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
799 svm->vmcb->save.cr0 = cr0; 866 svm->vmcb->save.cr0 = cr0;
800} 867}
801 868
802static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 869static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
803{ 870{
804 vcpu->arch.cr4 = cr4; 871 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
805 to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; 872
873 vcpu->arch.cr4 = cr4;
874 if (!npt_enabled)
875 cr4 |= X86_CR4_PAE;
876 cr4 |= host_cr4_mce;
877 to_svm(vcpu)->vmcb->save.cr4 = cr4;
806} 878}
807 879
808static void svm_set_segment(struct kvm_vcpu *vcpu, 880static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -833,13 +905,6 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
833 905
834} 906}
835 907
836/* FIXME:
837
838 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
839 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
840
841*/
842
843static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 908static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
844{ 909{
845 return -EOPNOTSUPP; 910 return -EOPNOTSUPP;
@@ -920,7 +985,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
920 } 985 }
921 default: 986 default:
922 printk(KERN_DEBUG "%s: unexpected dr %u\n", 987 printk(KERN_DEBUG "%s: unexpected dr %u\n",
923 __FUNCTION__, dr); 988 __func__, dr);
924 *exception = UD_VECTOR; 989 *exception = UD_VECTOR;
925 return; 990 return;
926 } 991 }
@@ -962,6 +1027,19 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
962 return 1; 1027 return 1;
963} 1028}
964 1029
1030static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1031{
1032 /*
1033 * On an #MC intercept the MCE handler is not called automatically in
1034 * the host. So do it by hand here.
1035 */
1036 asm volatile (
1037 "int $0x12\n");
1038 /* not sure if we ever come back to this point */
1039
1040 return 1;
1041}
1042
965static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1043static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
966{ 1044{
967 /* 1045 /*
@@ -969,7 +1047,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
969 * so reinitialize it. 1047 * so reinitialize it.
970 */ 1048 */
971 clear_page(svm->vmcb); 1049 clear_page(svm->vmcb);
972 init_vmcb(svm->vmcb); 1050 init_vmcb(svm);
973 1051
974 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 1052 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
975 return 0; 1053 return 0;
@@ -1033,9 +1111,18 @@ static int invalid_op_interception(struct vcpu_svm *svm,
1033static int task_switch_interception(struct vcpu_svm *svm, 1111static int task_switch_interception(struct vcpu_svm *svm,
1034 struct kvm_run *kvm_run) 1112 struct kvm_run *kvm_run)
1035{ 1113{
1036 pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__); 1114 u16 tss_selector;
1037 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1115
1038 return 0; 1116 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1117 if (svm->vmcb->control.exit_info_2 &
1118 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1119 return kvm_task_switch(&svm->vcpu, tss_selector,
1120 TASK_SWITCH_IRET);
1121 if (svm->vmcb->control.exit_info_2 &
1122 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1123 return kvm_task_switch(&svm->vcpu, tss_selector,
1124 TASK_SWITCH_JMP);
1125 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
1039} 1126}
1040 1127
1041static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1128static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
@@ -1049,7 +1136,7 @@ static int emulate_on_interception(struct vcpu_svm *svm,
1049 struct kvm_run *kvm_run) 1136 struct kvm_run *kvm_run)
1050{ 1137{
1051 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) 1138 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
1052 pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); 1139 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1053 return 1; 1140 return 1;
1054} 1141}
1055 1142
@@ -1179,8 +1266,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1179 svm->vmcb->save.sysenter_esp = data; 1266 svm->vmcb->save.sysenter_esp = data;
1180 break; 1267 break;
1181 case MSR_IA32_DEBUGCTLMSR: 1268 case MSR_IA32_DEBUGCTLMSR:
1182 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 1269 if (!svm_has(SVM_FEATURE_LBRV)) {
1183 __FUNCTION__, data); 1270 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
1271 __func__, data);
1272 break;
1273 }
1274 if (data & DEBUGCTL_RESERVED_BITS)
1275 return 1;
1276
1277 svm->vmcb->save.dbgctl = data;
1278 if (data & (1ULL<<0))
1279 svm_enable_lbrv(svm);
1280 else
1281 svm_disable_lbrv(svm);
1184 break; 1282 break;
1185 case MSR_K7_EVNTSEL0: 1283 case MSR_K7_EVNTSEL0:
1186 case MSR_K7_EVNTSEL1: 1284 case MSR_K7_EVNTSEL1:
@@ -1265,6 +1363,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1265 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 1363 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
1266 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 1364 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
1267 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, 1365 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
1366 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
1268 [SVM_EXIT_INTR] = nop_on_interception, 1367 [SVM_EXIT_INTR] = nop_on_interception,
1269 [SVM_EXIT_NMI] = nop_on_interception, 1368 [SVM_EXIT_NMI] = nop_on_interception,
1270 [SVM_EXIT_SMI] = nop_on_interception, 1369 [SVM_EXIT_SMI] = nop_on_interception,
@@ -1290,14 +1389,34 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1290 [SVM_EXIT_WBINVD] = emulate_on_interception, 1389 [SVM_EXIT_WBINVD] = emulate_on_interception,
1291 [SVM_EXIT_MONITOR] = invalid_op_interception, 1390 [SVM_EXIT_MONITOR] = invalid_op_interception,
1292 [SVM_EXIT_MWAIT] = invalid_op_interception, 1391 [SVM_EXIT_MWAIT] = invalid_op_interception,
1392 [SVM_EXIT_NPF] = pf_interception,
1293}; 1393};
1294 1394
1295
1296static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1395static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1297{ 1396{
1298 struct vcpu_svm *svm = to_svm(vcpu); 1397 struct vcpu_svm *svm = to_svm(vcpu);
1299 u32 exit_code = svm->vmcb->control.exit_code; 1398 u32 exit_code = svm->vmcb->control.exit_code;
1300 1399
1400 if (npt_enabled) {
1401 int mmu_reload = 0;
1402 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1403 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1404 mmu_reload = 1;
1405 }
1406 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1407 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1408 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1409 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1410 kvm_inject_gp(vcpu, 0);
1411 return 1;
1412 }
1413 }
1414 if (mmu_reload) {
1415 kvm_mmu_reset_context(vcpu);
1416 kvm_mmu_load(vcpu);
1417 }
1418 }
1419
1301 kvm_reput_irq(svm); 1420 kvm_reput_irq(svm);
1302 1421
1303 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 1422 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
@@ -1308,10 +1427,11 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1308 } 1427 }
1309 1428
1310 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 1429 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
1311 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) 1430 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1431 exit_code != SVM_EXIT_NPF)
1312 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 1432 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1313 "exit_code 0x%x\n", 1433 "exit_code 0x%x\n",
1314 __FUNCTION__, svm->vmcb->control.exit_int_info, 1434 __func__, svm->vmcb->control.exit_int_info,
1315 exit_code); 1435 exit_code);
1316 1436
1317 if (exit_code >= ARRAY_SIZE(svm_exit_handlers) 1437 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1364,6 +1484,27 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1364 svm_inject_irq(svm, irq); 1484 svm_inject_irq(svm, irq);
1365} 1485}
1366 1486
1487static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1488{
1489 struct vcpu_svm *svm = to_svm(vcpu);
1490 struct vmcb *vmcb = svm->vmcb;
1491 int max_irr, tpr;
1492
1493 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1494 return;
1495
1496 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1497
1498 max_irr = kvm_lapic_find_highest_irr(vcpu);
1499 if (max_irr == -1)
1500 return;
1501
1502 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1503
1504 if (tpr >= (max_irr & 0xf0))
1505 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1506}
1507
1367static void svm_intr_assist(struct kvm_vcpu *vcpu) 1508static void svm_intr_assist(struct kvm_vcpu *vcpu)
1368{ 1509{
1369 struct vcpu_svm *svm = to_svm(vcpu); 1510 struct vcpu_svm *svm = to_svm(vcpu);
@@ -1376,14 +1517,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
1376 SVM_EVTINJ_VEC_MASK; 1517 SVM_EVTINJ_VEC_MASK;
1377 vmcb->control.exit_int_info = 0; 1518 vmcb->control.exit_int_info = 0;
1378 svm_inject_irq(svm, intr_vector); 1519 svm_inject_irq(svm, intr_vector);
1379 return; 1520 goto out;
1380 } 1521 }
1381 1522
1382 if (vmcb->control.int_ctl & V_IRQ_MASK) 1523 if (vmcb->control.int_ctl & V_IRQ_MASK)
1383 return; 1524 goto out;
1384 1525
1385 if (!kvm_cpu_has_interrupt(vcpu)) 1526 if (!kvm_cpu_has_interrupt(vcpu))
1386 return; 1527 goto out;
1387 1528
1388 if (!(vmcb->save.rflags & X86_EFLAGS_IF) || 1529 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1389 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || 1530 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
@@ -1391,12 +1532,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
1391 /* unable to deliver irq, set pending irq */ 1532 /* unable to deliver irq, set pending irq */
1392 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1533 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1393 svm_inject_irq(svm, 0x0); 1534 svm_inject_irq(svm, 0x0);
1394 return; 1535 goto out;
1395 } 1536 }
1396 /* Okay, we can deliver the interrupt: grab it and update PIC state. */ 1537 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1397 intr_vector = kvm_cpu_get_interrupt(vcpu); 1538 intr_vector = kvm_cpu_get_interrupt(vcpu);
1398 svm_inject_irq(svm, intr_vector); 1539 svm_inject_irq(svm, intr_vector);
1399 kvm_timer_intr_post(vcpu, intr_vector); 1540 kvm_timer_intr_post(vcpu, intr_vector);
1541out:
1542 update_cr8_intercept(vcpu);
1400} 1543}
1401 1544
1402static void kvm_reput_irq(struct vcpu_svm *svm) 1545static void kvm_reput_irq(struct vcpu_svm *svm)
@@ -1482,6 +1625,29 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1482{ 1625{
1483} 1626}
1484 1627
1628static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1629{
1630 struct vcpu_svm *svm = to_svm(vcpu);
1631
1632 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1633 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1634 kvm_lapic_set_tpr(vcpu, cr8);
1635 }
1636}
1637
1638static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1639{
1640 struct vcpu_svm *svm = to_svm(vcpu);
1641 u64 cr8;
1642
1643 if (!irqchip_in_kernel(vcpu->kvm))
1644 return;
1645
1646 cr8 = kvm_get_cr8(vcpu);
1647 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1648 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1649}
1650
1485static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1651static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1486{ 1652{
1487 struct vcpu_svm *svm = to_svm(vcpu); 1653 struct vcpu_svm *svm = to_svm(vcpu);
@@ -1491,6 +1657,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1491 1657
1492 pre_svm_run(svm); 1658 pre_svm_run(svm);
1493 1659
1660 sync_lapic_to_cr8(vcpu);
1661
1494 save_host_msrs(vcpu); 1662 save_host_msrs(vcpu);
1495 fs_selector = read_fs(); 1663 fs_selector = read_fs();
1496 gs_selector = read_gs(); 1664 gs_selector = read_gs();
@@ -1499,6 +1667,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1499 svm->host_dr6 = read_dr6(); 1667 svm->host_dr6 = read_dr6();
1500 svm->host_dr7 = read_dr7(); 1668 svm->host_dr7 = read_dr7();
1501 svm->vmcb->save.cr2 = vcpu->arch.cr2; 1669 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1670 /* required for live migration with NPT */
1671 if (npt_enabled)
1672 svm->vmcb->save.cr3 = vcpu->arch.cr3;
1502 1673
1503 if (svm->vmcb->save.dr7 & 0xff) { 1674 if (svm->vmcb->save.dr7 & 0xff) {
1504 write_dr7(0); 1675 write_dr7(0);
@@ -1635,6 +1806,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1635 1806
1636 stgi(); 1807 stgi();
1637 1808
1809 sync_cr8_to_lapic(vcpu);
1810
1638 svm->next_rip = 0; 1811 svm->next_rip = 0;
1639} 1812}
1640 1813
@@ -1642,6 +1815,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1642{ 1815{
1643 struct vcpu_svm *svm = to_svm(vcpu); 1816 struct vcpu_svm *svm = to_svm(vcpu);
1644 1817
1818 if (npt_enabled) {
1819 svm->vmcb->control.nested_cr3 = root;
1820 force_new_asid(vcpu);
1821 return;
1822 }
1823
1645 svm->vmcb->save.cr3 = root; 1824 svm->vmcb->save.cr3 = root;
1646 force_new_asid(vcpu); 1825 force_new_asid(vcpu);
1647 1826
@@ -1709,6 +1888,7 @@ static struct kvm_x86_ops svm_x86_ops = {
1709 .get_segment_base = svm_get_segment_base, 1888 .get_segment_base = svm_get_segment_base,
1710 .get_segment = svm_get_segment, 1889 .get_segment = svm_get_segment,
1711 .set_segment = svm_set_segment, 1890 .set_segment = svm_set_segment,
1891 .get_cpl = svm_get_cpl,
1712 .get_cs_db_l_bits = kvm_get_cs_db_l_bits, 1892 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
1713 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 1893 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
1714 .set_cr0 = svm_set_cr0, 1894 .set_cr0 = svm_set_cr0,
diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h
index 5fd50491b555..1b8afa78e869 100644
--- a/arch/x86/kvm/svm.h
+++ b/arch/x86/kvm/svm.h
@@ -238,6 +238,9 @@ struct __attribute__ ((__packed__)) vmcb {
238#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID 238#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
239#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR 239#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
240 240
241#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
242#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
243
241#define SVM_EXIT_READ_CR0 0x000 244#define SVM_EXIT_READ_CR0 0x000
242#define SVM_EXIT_READ_CR3 0x003 245#define SVM_EXIT_READ_CR3 0x003
243#define SVM_EXIT_READ_CR4 0x004 246#define SVM_EXIT_READ_CR4 0x004
diff --git a/arch/x86/kvm/tss.h b/arch/x86/kvm/tss.h
new file mode 100644
index 000000000000..622aa10f692f
--- /dev/null
+++ b/arch/x86/kvm/tss.h
@@ -0,0 +1,59 @@
1#ifndef __TSS_SEGMENT_H
2#define __TSS_SEGMENT_H
3
4struct tss_segment_32 {
5 u32 prev_task_link;
6 u32 esp0;
7 u32 ss0;
8 u32 esp1;
9 u32 ss1;
10 u32 esp2;
11 u32 ss2;
12 u32 cr3;
13 u32 eip;
14 u32 eflags;
15 u32 eax;
16 u32 ecx;
17 u32 edx;
18 u32 ebx;
19 u32 esp;
20 u32 ebp;
21 u32 esi;
22 u32 edi;
23 u32 es;
24 u32 cs;
25 u32 ss;
26 u32 ds;
27 u32 fs;
28 u32 gs;
29 u32 ldt_selector;
30 u16 t;
31 u16 io_map;
32};
33
34struct tss_segment_16 {
35 u16 prev_task_link;
36 u16 sp0;
37 u16 ss0;
38 u16 sp1;
39 u16 ss1;
40 u16 sp2;
41 u16 ss2;
42 u16 ip;
43 u16 flag;
44 u16 ax;
45 u16 cx;
46 u16 dx;
47 u16 bx;
48 u16 sp;
49 u16 bp;
50 u16 si;
51 u16 di;
52 u16 es;
53 u16 cs;
54 u16 ss;
55 u16 ds;
56 u16 ldt;
57};
58
59#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8e1462880d1f..8e5d6645b90d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -17,7 +17,6 @@
17 17
18#include "irq.h" 18#include "irq.h"
19#include "vmx.h" 19#include "vmx.h"
20#include "segment_descriptor.h"
21#include "mmu.h" 20#include "mmu.h"
22 21
23#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
@@ -37,6 +36,12 @@ MODULE_LICENSE("GPL");
37static int bypass_guest_pf = 1; 36static int bypass_guest_pf = 1;
38module_param(bypass_guest_pf, bool, 0); 37module_param(bypass_guest_pf, bool, 0);
39 38
39static int enable_vpid = 1;
40module_param(enable_vpid, bool, 0);
41
42static int flexpriority_enabled = 1;
43module_param(flexpriority_enabled, bool, 0);
44
40struct vmcs { 45struct vmcs {
41 u32 revision_id; 46 u32 revision_id;
42 u32 abort; 47 u32 abort;
@@ -71,6 +76,7 @@ struct vcpu_vmx {
71 unsigned rip; 76 unsigned rip;
72 } irq; 77 } irq;
73 } rmode; 78 } rmode;
79 int vpid;
74}; 80};
75 81
76static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 82static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -85,6 +91,10 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
85 91
86static struct page *vmx_io_bitmap_a; 92static struct page *vmx_io_bitmap_a;
87static struct page *vmx_io_bitmap_b; 93static struct page *vmx_io_bitmap_b;
94static struct page *vmx_msr_bitmap;
95
96static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
97static DEFINE_SPINLOCK(vmx_vpid_lock);
88 98
89static struct vmcs_config { 99static struct vmcs_config {
90 int size; 100 int size;
@@ -176,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info)
176 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 186 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
177} 187}
178 188
189static inline int cpu_has_vmx_msr_bitmap(void)
190{
191 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
192}
193
179static inline int cpu_has_vmx_tpr_shadow(void) 194static inline int cpu_has_vmx_tpr_shadow(void)
180{ 195{
181 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); 196 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
@@ -194,8 +209,9 @@ static inline int cpu_has_secondary_exec_ctrls(void)
194 209
195static inline bool cpu_has_vmx_virtualize_apic_accesses(void) 210static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
196{ 211{
197 return (vmcs_config.cpu_based_2nd_exec_ctrl & 212 return flexpriority_enabled
198 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); 213 && (vmcs_config.cpu_based_2nd_exec_ctrl &
214 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
199} 215}
200 216
201static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) 217static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
@@ -204,6 +220,12 @@ static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
204 (irqchip_in_kernel(kvm))); 220 (irqchip_in_kernel(kvm)));
205} 221}
206 222
223static inline int cpu_has_vmx_vpid(void)
224{
225 return (vmcs_config.cpu_based_2nd_exec_ctrl &
226 SECONDARY_EXEC_ENABLE_VPID);
227}
228
207static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) 229static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
208{ 230{
209 int i; 231 int i;
@@ -214,6 +236,20 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
214 return -1; 236 return -1;
215} 237}
216 238
239static inline void __invvpid(int ext, u16 vpid, gva_t gva)
240{
241 struct {
242 u64 vpid : 16;
243 u64 rsvd : 48;
244 u64 gva;
245 } operand = { vpid, 0, gva };
246
247 asm volatile (ASM_VMX_INVVPID
248 /* CF==1 or ZF==1 --> rc = -1 */
249 "; ja 1f ; ud2 ; 1:"
250 : : "a"(&operand), "c"(ext) : "cc", "memory");
251}
252
217static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) 253static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
218{ 254{
219 int i; 255 int i;
@@ -257,6 +293,14 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
257 vmx->launched = 0; 293 vmx->launched = 0;
258} 294}
259 295
296static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
297{
298 if (vmx->vpid == 0)
299 return;
300
301 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
302}
303
260static unsigned long vmcs_readl(unsigned long field) 304static unsigned long vmcs_readl(unsigned long field)
261{ 305{
262 unsigned long value; 306 unsigned long value;
@@ -353,7 +397,7 @@ static void reload_tss(void)
353 * VT restores TR but not its size. Useless. 397 * VT restores TR but not its size. Useless.
354 */ 398 */
355 struct descriptor_table gdt; 399 struct descriptor_table gdt;
356 struct segment_descriptor *descs; 400 struct desc_struct *descs;
357 401
358 get_gdt(&gdt); 402 get_gdt(&gdt);
359 descs = (void *)gdt.base; 403 descs = (void *)gdt.base;
@@ -485,11 +529,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
485{ 529{
486 struct vcpu_vmx *vmx = to_vmx(vcpu); 530 struct vcpu_vmx *vmx = to_vmx(vcpu);
487 u64 phys_addr = __pa(vmx->vmcs); 531 u64 phys_addr = __pa(vmx->vmcs);
488 u64 tsc_this, delta; 532 u64 tsc_this, delta, new_offset;
489 533
490 if (vcpu->cpu != cpu) { 534 if (vcpu->cpu != cpu) {
491 vcpu_clear(vmx); 535 vcpu_clear(vmx);
492 kvm_migrate_apic_timer(vcpu); 536 kvm_migrate_apic_timer(vcpu);
537 vpid_sync_vcpu_all(vmx);
493 } 538 }
494 539
495 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { 540 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
@@ -524,8 +569,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
524 * Make sure the time stamp counter is monotonous. 569 * Make sure the time stamp counter is monotonous.
525 */ 570 */
526 rdtscll(tsc_this); 571 rdtscll(tsc_this);
527 delta = vcpu->arch.host_tsc - tsc_this; 572 if (tsc_this < vcpu->arch.host_tsc) {
528 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); 573 delta = vcpu->arch.host_tsc - tsc_this;
574 new_offset = vmcs_read64(TSC_OFFSET) + delta;
575 vmcs_write64(TSC_OFFSET, new_offset);
576 }
529 } 577 }
530} 578}
531 579
@@ -596,7 +644,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
596{ 644{
597 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 645 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
598 nr | INTR_TYPE_EXCEPTION 646 nr | INTR_TYPE_EXCEPTION
599 | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0) 647 | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
600 | INTR_INFO_VALID_MASK); 648 | INTR_INFO_VALID_MASK);
601 if (has_error_code) 649 if (has_error_code)
602 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 650 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
@@ -959,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
959 CPU_BASED_MOV_DR_EXITING | 1007 CPU_BASED_MOV_DR_EXITING |
960 CPU_BASED_USE_TSC_OFFSETING; 1008 CPU_BASED_USE_TSC_OFFSETING;
961 opt = CPU_BASED_TPR_SHADOW | 1009 opt = CPU_BASED_TPR_SHADOW |
1010 CPU_BASED_USE_MSR_BITMAPS |
962 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 1011 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
963 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 1012 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
964 &_cpu_based_exec_control) < 0) 1013 &_cpu_based_exec_control) < 0)
@@ -971,7 +1020,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
971 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 1020 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
972 min = 0; 1021 min = 0;
973 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 1022 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
974 SECONDARY_EXEC_WBINVD_EXITING; 1023 SECONDARY_EXEC_WBINVD_EXITING |
1024 SECONDARY_EXEC_ENABLE_VPID;
975 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, 1025 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
976 &_cpu_based_2nd_exec_control) < 0) 1026 &_cpu_based_2nd_exec_control) < 0)
977 return -EIO; 1027 return -EIO;
@@ -1080,6 +1130,10 @@ static __init int hardware_setup(void)
1080{ 1130{
1081 if (setup_vmcs_config(&vmcs_config) < 0) 1131 if (setup_vmcs_config(&vmcs_config) < 0)
1082 return -EIO; 1132 return -EIO;
1133
1134 if (boot_cpu_has(X86_FEATURE_NX))
1135 kvm_enable_efer_bits(EFER_NX);
1136
1083 return alloc_kvm_area(); 1137 return alloc_kvm_area();
1084} 1138}
1085 1139
@@ -1214,7 +1268,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1214 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 1268 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1215 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { 1269 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1216 printk(KERN_DEBUG "%s: tss fixup for long mode. \n", 1270 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1217 __FUNCTION__); 1271 __func__);
1218 vmcs_write32(GUEST_TR_AR_BYTES, 1272 vmcs_write32(GUEST_TR_AR_BYTES,
1219 (guest_tr_ar & ~AR_TYPE_MASK) 1273 (guest_tr_ar & ~AR_TYPE_MASK)
1220 | AR_TYPE_BUSY_64_TSS); 1274 | AR_TYPE_BUSY_64_TSS);
@@ -1239,6 +1293,11 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
1239 1293
1240#endif 1294#endif
1241 1295
1296static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1297{
1298 vpid_sync_vcpu_all(to_vmx(vcpu));
1299}
1300
1242static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1301static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1243{ 1302{
1244 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; 1303 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
@@ -1275,6 +1334,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1275 1334
1276static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1335static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1277{ 1336{
1337 vmx_flush_tlb(vcpu);
1278 vmcs_writel(GUEST_CR3, cr3); 1338 vmcs_writel(GUEST_CR3, cr3);
1279 if (vcpu->arch.cr0 & X86_CR0_PE) 1339 if (vcpu->arch.cr0 & X86_CR0_PE)
1280 vmx_fpu_deactivate(vcpu); 1340 vmx_fpu_deactivate(vcpu);
@@ -1288,14 +1348,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1288 vcpu->arch.cr4 = cr4; 1348 vcpu->arch.cr4 = cr4;
1289} 1349}
1290 1350
1291#ifdef CONFIG_X86_64
1292
1293static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1351static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1294{ 1352{
1295 struct vcpu_vmx *vmx = to_vmx(vcpu); 1353 struct vcpu_vmx *vmx = to_vmx(vcpu);
1296 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 1354 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1297 1355
1298 vcpu->arch.shadow_efer = efer; 1356 vcpu->arch.shadow_efer = efer;
1357 if (!msr)
1358 return;
1299 if (efer & EFER_LMA) { 1359 if (efer & EFER_LMA) {
1300 vmcs_write32(VM_ENTRY_CONTROLS, 1360 vmcs_write32(VM_ENTRY_CONTROLS,
1301 vmcs_read32(VM_ENTRY_CONTROLS) | 1361 vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1312,8 +1372,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1312 setup_msrs(vmx); 1372 setup_msrs(vmx);
1313} 1373}
1314 1374
1315#endif
1316
1317static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1375static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1318{ 1376{
1319 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1377 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1344,6 +1402,20 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
1344 var->unusable = (ar >> 16) & 1; 1402 var->unusable = (ar >> 16) & 1;
1345} 1403}
1346 1404
1405static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1406{
1407 struct kvm_segment kvm_seg;
1408
1409 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1410 return 0;
1411
1412 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1413 return 3;
1414
1415 vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
1416 return kvm_seg.selector & 3;
1417}
1418
1347static u32 vmx_segment_access_rights(struct kvm_segment *var) 1419static u32 vmx_segment_access_rights(struct kvm_segment *var)
1348{ 1420{
1349 u32 ar; 1421 u32 ar;
@@ -1433,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
1433 int ret = 0; 1505 int ret = 0;
1434 int r; 1506 int r;
1435 1507
1436 down_read(&kvm->slots_lock);
1437 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 1508 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1438 if (r < 0) 1509 if (r < 0)
1439 goto out; 1510 goto out;
@@ -1456,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
1456 1527
1457 ret = 1; 1528 ret = 1;
1458out: 1529out:
1459 up_read(&kvm->slots_lock);
1460 return ret; 1530 return ret;
1461} 1531}
1462 1532
@@ -1494,6 +1564,46 @@ out:
1494 return r; 1564 return r;
1495} 1565}
1496 1566
1567static void allocate_vpid(struct vcpu_vmx *vmx)
1568{
1569 int vpid;
1570
1571 vmx->vpid = 0;
1572 if (!enable_vpid || !cpu_has_vmx_vpid())
1573 return;
1574 spin_lock(&vmx_vpid_lock);
1575 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
1576 if (vpid < VMX_NR_VPIDS) {
1577 vmx->vpid = vpid;
1578 __set_bit(vpid, vmx_vpid_bitmap);
1579 }
1580 spin_unlock(&vmx_vpid_lock);
1581}
1582
1583void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
1584{
1585 void *va;
1586
1587 if (!cpu_has_vmx_msr_bitmap())
1588 return;
1589
1590 /*
1591 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
1592 * have the write-low and read-high bitmap offsets the wrong way round.
1593 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
1594 */
1595 va = kmap(msr_bitmap);
1596 if (msr <= 0x1fff) {
1597 __clear_bit(msr, va + 0x000); /* read-low */
1598 __clear_bit(msr, va + 0x800); /* write-low */
1599 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1600 msr &= 0x1fff;
1601 __clear_bit(msr, va + 0x400); /* read-high */
1602 __clear_bit(msr, va + 0xc00); /* write-high */
1603 }
1604 kunmap(msr_bitmap);
1605}
1606
1497/* 1607/*
1498 * Sets up the vmcs for emulated real mode. 1608 * Sets up the vmcs for emulated real mode.
1499 */ 1609 */
@@ -1511,6 +1621,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1511 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); 1621 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1512 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); 1622 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1513 1623
1624 if (cpu_has_vmx_msr_bitmap())
1625 vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
1626
1514 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 1627 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1515 1628
1516 /* Control */ 1629 /* Control */
@@ -1532,6 +1645,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1532 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) 1645 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1533 exec_control &= 1646 exec_control &=
1534 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 1647 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1648 if (vmx->vpid == 0)
1649 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
1535 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 1650 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
1536 } 1651 }
1537 1652
@@ -1613,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1613 u64 msr; 1728 u64 msr;
1614 int ret; 1729 int ret;
1615 1730
1731 down_read(&vcpu->kvm->slots_lock);
1616 if (!init_rmode_tss(vmx->vcpu.kvm)) { 1732 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1617 ret = -ENOMEM; 1733 ret = -ENOMEM;
1618 goto out; 1734 goto out;
@@ -1621,7 +1737,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1621 vmx->vcpu.arch.rmode.active = 0; 1737 vmx->vcpu.arch.rmode.active = 0;
1622 1738
1623 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 1739 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1624 set_cr8(&vmx->vcpu, 0); 1740 kvm_set_cr8(&vmx->vcpu, 0);
1625 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 1741 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1626 if (vmx->vcpu.vcpu_id == 0) 1742 if (vmx->vcpu.vcpu_id == 0)
1627 msr |= MSR_IA32_APICBASE_BSP; 1743 msr |= MSR_IA32_APICBASE_BSP;
@@ -1704,18 +1820,22 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1704 vmcs_write64(APIC_ACCESS_ADDR, 1820 vmcs_write64(APIC_ACCESS_ADDR,
1705 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); 1821 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
1706 1822
1823 if (vmx->vpid != 0)
1824 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1825
1707 vmx->vcpu.arch.cr0 = 0x60000010; 1826 vmx->vcpu.arch.cr0 = 0x60000010;
1708 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ 1827 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
1709 vmx_set_cr4(&vmx->vcpu, 0); 1828 vmx_set_cr4(&vmx->vcpu, 0);
1710#ifdef CONFIG_X86_64
1711 vmx_set_efer(&vmx->vcpu, 0); 1829 vmx_set_efer(&vmx->vcpu, 0);
1712#endif
1713 vmx_fpu_activate(&vmx->vcpu); 1830 vmx_fpu_activate(&vmx->vcpu);
1714 update_exception_bitmap(&vmx->vcpu); 1831 update_exception_bitmap(&vmx->vcpu);
1715 1832
1716 return 0; 1833 vpid_sync_vcpu_all(vmx);
1834
1835 ret = 0;
1717 1836
1718out: 1837out:
1838 up_read(&vcpu->kvm->slots_lock);
1719 return ret; 1839 return ret;
1720} 1840}
1721 1841
@@ -1723,6 +1843,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1723{ 1843{
1724 struct vcpu_vmx *vmx = to_vmx(vcpu); 1844 struct vcpu_vmx *vmx = to_vmx(vcpu);
1725 1845
1846 KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
1847
1726 if (vcpu->arch.rmode.active) { 1848 if (vcpu->arch.rmode.active) {
1727 vmx->rmode.irq.pending = true; 1849 vmx->rmode.irq.pending = true;
1728 vmx->rmode.irq.vector = irq; 1850 vmx->rmode.irq.vector = irq;
@@ -1844,7 +1966,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1844 if ((vect_info & VECTORING_INFO_VALID_MASK) && 1966 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1845 !is_page_fault(intr_info)) 1967 !is_page_fault(intr_info))
1846 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " 1968 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1847 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); 1969 "intr info 0x%x\n", __func__, vect_info, intr_info);
1848 1970
1849 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { 1971 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1850 int irq = vect_info & VECTORING_INFO_VECTOR_MASK; 1972 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1869,10 +1991,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1869 1991
1870 error_code = 0; 1992 error_code = 0;
1871 rip = vmcs_readl(GUEST_RIP); 1993 rip = vmcs_readl(GUEST_RIP);
1872 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK) 1994 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
1873 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 1995 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1874 if (is_page_fault(intr_info)) { 1996 if (is_page_fault(intr_info)) {
1875 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1997 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1998 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
1999 (u32)((u64)cr2 >> 32), handler);
1876 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2000 return kvm_mmu_page_fault(vcpu, cr2, error_code);
1877 } 2001 }
1878 2002
@@ -1901,6 +2025,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1901 struct kvm_run *kvm_run) 2025 struct kvm_run *kvm_run)
1902{ 2026{
1903 ++vcpu->stat.irq_exits; 2027 ++vcpu->stat.irq_exits;
2028 KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
1904 return 1; 2029 return 1;
1905} 2030}
1906 2031
@@ -1958,25 +2083,27 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1958 reg = (exit_qualification >> 8) & 15; 2083 reg = (exit_qualification >> 8) & 15;
1959 switch ((exit_qualification >> 4) & 3) { 2084 switch ((exit_qualification >> 4) & 3) {
1960 case 0: /* mov to cr */ 2085 case 0: /* mov to cr */
2086 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
2087 (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
1961 switch (cr) { 2088 switch (cr) {
1962 case 0: 2089 case 0:
1963 vcpu_load_rsp_rip(vcpu); 2090 vcpu_load_rsp_rip(vcpu);
1964 set_cr0(vcpu, vcpu->arch.regs[reg]); 2091 kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
1965 skip_emulated_instruction(vcpu); 2092 skip_emulated_instruction(vcpu);
1966 return 1; 2093 return 1;
1967 case 3: 2094 case 3:
1968 vcpu_load_rsp_rip(vcpu); 2095 vcpu_load_rsp_rip(vcpu);
1969 set_cr3(vcpu, vcpu->arch.regs[reg]); 2096 kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
1970 skip_emulated_instruction(vcpu); 2097 skip_emulated_instruction(vcpu);
1971 return 1; 2098 return 1;
1972 case 4: 2099 case 4:
1973 vcpu_load_rsp_rip(vcpu); 2100 vcpu_load_rsp_rip(vcpu);
1974 set_cr4(vcpu, vcpu->arch.regs[reg]); 2101 kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
1975 skip_emulated_instruction(vcpu); 2102 skip_emulated_instruction(vcpu);
1976 return 1; 2103 return 1;
1977 case 8: 2104 case 8:
1978 vcpu_load_rsp_rip(vcpu); 2105 vcpu_load_rsp_rip(vcpu);
1979 set_cr8(vcpu, vcpu->arch.regs[reg]); 2106 kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
1980 skip_emulated_instruction(vcpu); 2107 skip_emulated_instruction(vcpu);
1981 if (irqchip_in_kernel(vcpu->kvm)) 2108 if (irqchip_in_kernel(vcpu->kvm))
1982 return 1; 2109 return 1;
@@ -1990,6 +2117,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1990 vcpu->arch.cr0 &= ~X86_CR0_TS; 2117 vcpu->arch.cr0 &= ~X86_CR0_TS;
1991 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2118 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1992 vmx_fpu_activate(vcpu); 2119 vmx_fpu_activate(vcpu);
2120 KVMTRACE_0D(CLTS, vcpu, handler);
1993 skip_emulated_instruction(vcpu); 2121 skip_emulated_instruction(vcpu);
1994 return 1; 2122 return 1;
1995 case 1: /*mov from cr*/ 2123 case 1: /*mov from cr*/
@@ -1998,18 +2126,24 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1998 vcpu_load_rsp_rip(vcpu); 2126 vcpu_load_rsp_rip(vcpu);
1999 vcpu->arch.regs[reg] = vcpu->arch.cr3; 2127 vcpu->arch.regs[reg] = vcpu->arch.cr3;
2000 vcpu_put_rsp_rip(vcpu); 2128 vcpu_put_rsp_rip(vcpu);
2129 KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
2130 (u32)vcpu->arch.regs[reg],
2131 (u32)((u64)vcpu->arch.regs[reg] >> 32),
2132 handler);
2001 skip_emulated_instruction(vcpu); 2133 skip_emulated_instruction(vcpu);
2002 return 1; 2134 return 1;
2003 case 8: 2135 case 8:
2004 vcpu_load_rsp_rip(vcpu); 2136 vcpu_load_rsp_rip(vcpu);
2005 vcpu->arch.regs[reg] = get_cr8(vcpu); 2137 vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
2006 vcpu_put_rsp_rip(vcpu); 2138 vcpu_put_rsp_rip(vcpu);
2139 KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
2140 (u32)vcpu->arch.regs[reg], handler);
2007 skip_emulated_instruction(vcpu); 2141 skip_emulated_instruction(vcpu);
2008 return 1; 2142 return 1;
2009 } 2143 }
2010 break; 2144 break;
2011 case 3: /* lmsw */ 2145 case 3: /* lmsw */
2012 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); 2146 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
2013 2147
2014 skip_emulated_instruction(vcpu); 2148 skip_emulated_instruction(vcpu);
2015 return 1; 2149 return 1;
@@ -2049,6 +2183,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2049 val = 0; 2183 val = 0;
2050 } 2184 }
2051 vcpu->arch.regs[reg] = val; 2185 vcpu->arch.regs[reg] = val;
2186 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2052 } else { 2187 } else {
2053 /* mov to dr */ 2188 /* mov to dr */
2054 } 2189 }
@@ -2073,6 +2208,9 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2073 return 1; 2208 return 1;
2074 } 2209 }
2075 2210
2211 KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
2212 handler);
2213
2076 /* FIXME: handling of bits 32:63 of rax, rdx */ 2214 /* FIXME: handling of bits 32:63 of rax, rdx */
2077 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; 2215 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2078 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; 2216 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
@@ -2086,6 +2224,9 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2086 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 2224 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2087 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 2225 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2088 2226
2227 KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
2228 handler);
2229
2089 if (vmx_set_msr(vcpu, ecx, data) != 0) { 2230 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2090 kvm_inject_gp(vcpu, 0); 2231 kvm_inject_gp(vcpu, 0);
2091 return 1; 2232 return 1;
@@ -2110,6 +2251,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2110 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 2251 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2111 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 2252 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2112 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 2253 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2254
2255 KVMTRACE_0D(PEND_INTR, vcpu, handler);
2256
2113 /* 2257 /*
2114 * If the user space waits to inject interrupts, exit as soon as 2258 * If the user space waits to inject interrupts, exit as soon as
2115 * possible 2259 * possible
@@ -2152,6 +2296,8 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2152 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 2296 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2153 offset = exit_qualification & 0xffful; 2297 offset = exit_qualification & 0xffful;
2154 2298
2299 KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler);
2300
2155 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); 2301 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
2156 2302
2157 if (er != EMULATE_DONE) { 2303 if (er != EMULATE_DONE) {
@@ -2163,6 +2309,20 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2163 return 1; 2309 return 1;
2164} 2310}
2165 2311
2312static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2313{
2314 unsigned long exit_qualification;
2315 u16 tss_selector;
2316 int reason;
2317
2318 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2319
2320 reason = (u32)exit_qualification >> 30;
2321 tss_selector = exit_qualification;
2322
2323 return kvm_task_switch(vcpu, tss_selector, reason);
2324}
2325
2166/* 2326/*
2167 * The exit handlers return 1 if the exit was handled fully and guest execution 2327 * The exit handlers return 1 if the exit was handled fully and guest execution
2168 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 2328 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -2185,6 +2345,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2185 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 2345 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2186 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 2346 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
2187 [EXIT_REASON_WBINVD] = handle_wbinvd, 2347 [EXIT_REASON_WBINVD] = handle_wbinvd,
2348 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
2188}; 2349};
2189 2350
2190static const int kvm_vmx_max_exit_handlers = 2351static const int kvm_vmx_max_exit_handlers =
@@ -2200,6 +2361,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2200 struct vcpu_vmx *vmx = to_vmx(vcpu); 2361 struct vcpu_vmx *vmx = to_vmx(vcpu);
2201 u32 vectoring_info = vmx->idt_vectoring_info; 2362 u32 vectoring_info = vmx->idt_vectoring_info;
2202 2363
2364 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
2365 (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
2366
2203 if (unlikely(vmx->fail)) { 2367 if (unlikely(vmx->fail)) {
2204 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2368 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2205 kvm_run->fail_entry.hardware_entry_failure_reason 2369 kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2210,7 +2374,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2210 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 2374 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2211 exit_reason != EXIT_REASON_EXCEPTION_NMI) 2375 exit_reason != EXIT_REASON_EXCEPTION_NMI)
2212 printk(KERN_WARNING "%s: unexpected, valid vectoring info and " 2376 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2213 "exit reason is 0x%x\n", __FUNCTION__, exit_reason); 2377 "exit reason is 0x%x\n", __func__, exit_reason);
2214 if (exit_reason < kvm_vmx_max_exit_handlers 2378 if (exit_reason < kvm_vmx_max_exit_handlers
2215 && kvm_vmx_exit_handlers[exit_reason]) 2379 && kvm_vmx_exit_handlers[exit_reason])
2216 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); 2380 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
@@ -2221,10 +2385,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2221 return 0; 2385 return 0;
2222} 2386}
2223 2387
2224static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2225{
2226}
2227
2228static void update_tpr_threshold(struct kvm_vcpu *vcpu) 2388static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2229{ 2389{
2230 int max_irr, tpr; 2390 int max_irr, tpr;
@@ -2285,11 +2445,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2285 return; 2445 return;
2286 } 2446 }
2287 2447
2448 KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
2449
2288 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); 2450 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2289 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2451 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2290 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 2452 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2291 2453
2292 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK)) 2454 if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
2293 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2455 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2294 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 2456 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2295 if (unlikely(has_ext_irq)) 2457 if (unlikely(has_ext_irq))
@@ -2470,8 +2632,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2470 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 2632 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2471 2633
2472 /* We need to handle NMIs before interrupts are enabled */ 2634 /* We need to handle NMIs before interrupts are enabled */
2473 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ 2635 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
2636 KVMTRACE_0D(NMI, vcpu, handler);
2474 asm("int $2"); 2637 asm("int $2");
2638 }
2475} 2639}
2476 2640
2477static void vmx_free_vmcs(struct kvm_vcpu *vcpu) 2641static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -2489,6 +2653,10 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2489{ 2653{
2490 struct vcpu_vmx *vmx = to_vmx(vcpu); 2654 struct vcpu_vmx *vmx = to_vmx(vcpu);
2491 2655
2656 spin_lock(&vmx_vpid_lock);
2657 if (vmx->vpid != 0)
2658 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
2659 spin_unlock(&vmx_vpid_lock);
2492 vmx_free_vmcs(vcpu); 2660 vmx_free_vmcs(vcpu);
2493 kfree(vmx->host_msrs); 2661 kfree(vmx->host_msrs);
2494 kfree(vmx->guest_msrs); 2662 kfree(vmx->guest_msrs);
@@ -2505,6 +2673,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2505 if (!vmx) 2673 if (!vmx)
2506 return ERR_PTR(-ENOMEM); 2674 return ERR_PTR(-ENOMEM);
2507 2675
2676 allocate_vpid(vmx);
2677
2508 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 2678 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2509 if (err) 2679 if (err)
2510 goto free_vcpu; 2680 goto free_vcpu;
@@ -2591,14 +2761,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
2591 .get_segment_base = vmx_get_segment_base, 2761 .get_segment_base = vmx_get_segment_base,
2592 .get_segment = vmx_get_segment, 2762 .get_segment = vmx_get_segment,
2593 .set_segment = vmx_set_segment, 2763 .set_segment = vmx_set_segment,
2764 .get_cpl = vmx_get_cpl,
2594 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 2765 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2595 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 2766 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2596 .set_cr0 = vmx_set_cr0, 2767 .set_cr0 = vmx_set_cr0,
2597 .set_cr3 = vmx_set_cr3, 2768 .set_cr3 = vmx_set_cr3,
2598 .set_cr4 = vmx_set_cr4, 2769 .set_cr4 = vmx_set_cr4,
2599#ifdef CONFIG_X86_64
2600 .set_efer = vmx_set_efer, 2770 .set_efer = vmx_set_efer,
2601#endif
2602 .get_idt = vmx_get_idt, 2771 .get_idt = vmx_get_idt,
2603 .set_idt = vmx_set_idt, 2772 .set_idt = vmx_set_idt,
2604 .get_gdt = vmx_get_gdt, 2773 .get_gdt = vmx_get_gdt,
@@ -2626,7 +2795,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
2626 2795
2627static int __init vmx_init(void) 2796static int __init vmx_init(void)
2628{ 2797{
2629 void *iova; 2798 void *va;
2630 int r; 2799 int r;
2631 2800
2632 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 2801 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2639,28 +2808,48 @@ static int __init vmx_init(void)
2639 goto out; 2808 goto out;
2640 } 2809 }
2641 2810
2811 vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2812 if (!vmx_msr_bitmap) {
2813 r = -ENOMEM;
2814 goto out1;
2815 }
2816
2642 /* 2817 /*
2643 * Allow direct access to the PC debug port (it is often used for I/O 2818 * Allow direct access to the PC debug port (it is often used for I/O
2644 * delays, but the vmexits simply slow things down). 2819 * delays, but the vmexits simply slow things down).
2645 */ 2820 */
2646 iova = kmap(vmx_io_bitmap_a); 2821 va = kmap(vmx_io_bitmap_a);
2647 memset(iova, 0xff, PAGE_SIZE); 2822 memset(va, 0xff, PAGE_SIZE);
2648 clear_bit(0x80, iova); 2823 clear_bit(0x80, va);
2649 kunmap(vmx_io_bitmap_a); 2824 kunmap(vmx_io_bitmap_a);
2650 2825
2651 iova = kmap(vmx_io_bitmap_b); 2826 va = kmap(vmx_io_bitmap_b);
2652 memset(iova, 0xff, PAGE_SIZE); 2827 memset(va, 0xff, PAGE_SIZE);
2653 kunmap(vmx_io_bitmap_b); 2828 kunmap(vmx_io_bitmap_b);
2654 2829
2830 va = kmap(vmx_msr_bitmap);
2831 memset(va, 0xff, PAGE_SIZE);
2832 kunmap(vmx_msr_bitmap);
2833
2834 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
2835
2655 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); 2836 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
2656 if (r) 2837 if (r)
2657 goto out1; 2838 goto out2;
2839
2840 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
2841 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
2842 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
2843 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
2844 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
2658 2845
2659 if (bypass_guest_pf) 2846 if (bypass_guest_pf)
2660 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 2847 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
2661 2848
2662 return 0; 2849 return 0;
2663 2850
2851out2:
2852 __free_page(vmx_msr_bitmap);
2664out1: 2853out1:
2665 __free_page(vmx_io_bitmap_b); 2854 __free_page(vmx_io_bitmap_b);
2666out: 2855out:
@@ -2670,6 +2859,7 @@ out:
2670 2859
2671static void __exit vmx_exit(void) 2860static void __exit vmx_exit(void)
2672{ 2861{
2862 __free_page(vmx_msr_bitmap);
2673 __free_page(vmx_io_bitmap_b); 2863 __free_page(vmx_io_bitmap_b);
2674 __free_page(vmx_io_bitmap_a); 2864 __free_page(vmx_io_bitmap_a);
2675 2865
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index d52ae8d7303d..5dff4606b988 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -49,6 +49,7 @@
49 * Definitions of Secondary Processor-Based VM-Execution Controls. 49 * Definitions of Secondary Processor-Based VM-Execution Controls.
50 */ 50 */
51#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 51#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
52#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
52#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 53#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
53 54
54 55
@@ -65,6 +66,7 @@
65 66
66/* VMCS Encodings */ 67/* VMCS Encodings */
67enum vmcs_field { 68enum vmcs_field {
69 VIRTUAL_PROCESSOR_ID = 0x00000000,
68 GUEST_ES_SELECTOR = 0x00000800, 70 GUEST_ES_SELECTOR = 0x00000800,
69 GUEST_CS_SELECTOR = 0x00000802, 71 GUEST_CS_SELECTOR = 0x00000802,
70 GUEST_SS_SELECTOR = 0x00000804, 72 GUEST_SS_SELECTOR = 0x00000804,
@@ -231,12 +233,12 @@ enum vmcs_field {
231 */ 233 */
232#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ 234#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
233#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ 235#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
234#define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */ 236#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
235#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ 237#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
236 238
237#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK 239#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
238#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK 240#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
239#define VECTORING_INFO_DELIEVER_CODE_MASK INTR_INFO_DELIEVER_CODE_MASK 241#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
240#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK 242#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
241 243
242#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ 244#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
@@ -321,4 +323,8 @@ enum vmcs_field {
321 323
322#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 324#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9
323 325
326#define VMX_NR_VPIDS (1 << 16)
327#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
328#define VMX_VPID_EXTENT_ALL_CONTEXT 2
329
324#endif 330#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6b01552bd1f1..0ce556372a4d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -15,10 +15,12 @@
15 */ 15 */
16 16
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include "segment_descriptor.h"
19#include "irq.h" 18#include "irq.h"
20#include "mmu.h" 19#include "mmu.h"
20#include "i8254.h"
21#include "tss.h"
21 22
23#include <linux/clocksource.h>
22#include <linux/kvm.h> 24#include <linux/kvm.h>
23#include <linux/fs.h> 25#include <linux/fs.h>
24#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
@@ -28,6 +30,7 @@
28 30
29#include <asm/uaccess.h> 31#include <asm/uaccess.h>
30#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/desc.h>
31 34
32#define MAX_IO_MSRS 256 35#define MAX_IO_MSRS 256
33#define CR0_RESERVED_BITS \ 36#define CR0_RESERVED_BITS \
@@ -41,7 +44,15 @@
41 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 44 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42 45
43#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 46#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
44#define EFER_RESERVED_BITS 0xfffffffffffff2fe 47/* EFER defaults:
48 * - enable syscall per default because its emulated by KVM
49 * - enable LME and LMA per default on 64 bit KVM
50 */
51#ifdef CONFIG_X86_64
52static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
53#else
54static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
55#endif
45 56
46#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 57#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -63,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "irq_window", VCPU_STAT(irq_window_exits) }, 74 { "irq_window", VCPU_STAT(irq_window_exits) },
64 { "halt_exits", VCPU_STAT(halt_exits) }, 75 { "halt_exits", VCPU_STAT(halt_exits) },
65 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 76 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
77 { "hypercalls", VCPU_STAT(hypercalls) },
66 { "request_irq", VCPU_STAT(request_irq_exits) }, 78 { "request_irq", VCPU_STAT(request_irq_exits) },
67 { "irq_exits", VCPU_STAT(irq_exits) }, 79 { "irq_exits", VCPU_STAT(irq_exits) },
68 { "host_state_reload", VCPU_STAT(host_state_reload) }, 80 { "host_state_reload", VCPU_STAT(host_state_reload) },
@@ -78,6 +90,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
78 { "mmu_recycled", VM_STAT(mmu_recycled) }, 90 { "mmu_recycled", VM_STAT(mmu_recycled) },
79 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 91 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
80 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 92 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
93 { "largepages", VM_STAT(lpages) },
81 { NULL } 94 { NULL }
82}; 95};
83 96
@@ -85,7 +98,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
85unsigned long segment_base(u16 selector) 98unsigned long segment_base(u16 selector)
86{ 99{
87 struct descriptor_table gdt; 100 struct descriptor_table gdt;
88 struct segment_descriptor *d; 101 struct desc_struct *d;
89 unsigned long table_base; 102 unsigned long table_base;
90 unsigned long v; 103 unsigned long v;
91 104
@@ -101,13 +114,12 @@ unsigned long segment_base(u16 selector)
101 asm("sldt %0" : "=g"(ldt_selector)); 114 asm("sldt %0" : "=g"(ldt_selector));
102 table_base = segment_base(ldt_selector); 115 table_base = segment_base(ldt_selector);
103 } 116 }
104 d = (struct segment_descriptor *)(table_base + (selector & ~7)); 117 d = (struct desc_struct *)(table_base + (selector & ~7));
105 v = d->base_low | ((unsigned long)d->base_mid << 16) | 118 v = d->base0 | ((unsigned long)d->base1 << 16) |
106 ((unsigned long)d->base_high << 24); 119 ((unsigned long)d->base2 << 24);
107#ifdef CONFIG_X86_64 120#ifdef CONFIG_X86_64
108 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) 121 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
109 v |= ((unsigned long) \ 122 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
110 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
111#endif 123#endif
112 return v; 124 return v;
113} 125}
@@ -145,11 +157,16 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
145 u32 error_code) 157 u32 error_code)
146{ 158{
147 ++vcpu->stat.pf_guest; 159 ++vcpu->stat.pf_guest;
148 if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) { 160 if (vcpu->arch.exception.pending) {
149 printk(KERN_DEBUG "kvm: inject_page_fault:" 161 if (vcpu->arch.exception.nr == PF_VECTOR) {
150 " double fault 0x%lx\n", addr); 162 printk(KERN_DEBUG "kvm: inject_page_fault:"
151 vcpu->arch.exception.nr = DF_VECTOR; 163 " double fault 0x%lx\n", addr);
152 vcpu->arch.exception.error_code = 0; 164 vcpu->arch.exception.nr = DF_VECTOR;
165 vcpu->arch.exception.error_code = 0;
166 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
167 /* triple fault -> shutdown */
168 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
169 }
153 return; 170 return;
154 } 171 }
155 vcpu->arch.cr2 = addr; 172 vcpu->arch.cr2 = addr;
@@ -184,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
184 int ret; 201 int ret;
185 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
186 203
187 down_read(&vcpu->kvm->slots_lock);
188 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 204 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
189 offset * sizeof(u64), sizeof(pdpte)); 205 offset * sizeof(u64), sizeof(pdpte));
190 if (ret < 0) { 206 if (ret < 0) {
@@ -201,10 +217,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
201 217
202 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 218 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
203out: 219out:
204 up_read(&vcpu->kvm->slots_lock);
205 220
206 return ret; 221 return ret;
207} 222}
223EXPORT_SYMBOL_GPL(load_pdptrs);
208 224
209static bool pdptrs_changed(struct kvm_vcpu *vcpu) 225static bool pdptrs_changed(struct kvm_vcpu *vcpu)
210{ 226{
@@ -215,18 +231,16 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
215 if (is_long_mode(vcpu) || !is_pae(vcpu)) 231 if (is_long_mode(vcpu) || !is_pae(vcpu))
216 return false; 232 return false;
217 233
218 down_read(&vcpu->kvm->slots_lock);
219 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 234 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
220 if (r < 0) 235 if (r < 0)
221 goto out; 236 goto out;
222 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 237 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
223out: 238out:
224 up_read(&vcpu->kvm->slots_lock);
225 239
226 return changed; 240 return changed;
227} 241}
228 242
229void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 243void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
230{ 244{
231 if (cr0 & CR0_RESERVED_BITS) { 245 if (cr0 & CR0_RESERVED_BITS) {
232 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 246 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
@@ -284,15 +298,18 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
284 kvm_mmu_reset_context(vcpu); 298 kvm_mmu_reset_context(vcpu);
285 return; 299 return;
286} 300}
287EXPORT_SYMBOL_GPL(set_cr0); 301EXPORT_SYMBOL_GPL(kvm_set_cr0);
288 302
289void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 303void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
290{ 304{
291 set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 305 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
306 KVMTRACE_1D(LMSW, vcpu,
307 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
308 handler);
292} 309}
293EXPORT_SYMBOL_GPL(lmsw); 310EXPORT_SYMBOL_GPL(kvm_lmsw);
294 311
295void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 312void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
296{ 313{
297 if (cr4 & CR4_RESERVED_BITS) { 314 if (cr4 & CR4_RESERVED_BITS) {
298 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); 315 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
@@ -323,9 +340,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
323 vcpu->arch.cr4 = cr4; 340 vcpu->arch.cr4 = cr4;
324 kvm_mmu_reset_context(vcpu); 341 kvm_mmu_reset_context(vcpu);
325} 342}
326EXPORT_SYMBOL_GPL(set_cr4); 343EXPORT_SYMBOL_GPL(kvm_set_cr4);
327 344
328void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 345void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
329{ 346{
330 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { 347 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
331 kvm_mmu_flush_tlb(vcpu); 348 kvm_mmu_flush_tlb(vcpu);
@@ -359,7 +376,6 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
359 */ 376 */
360 } 377 }
361 378
362 down_read(&vcpu->kvm->slots_lock);
363 /* 379 /*
364 * Does the new cr3 value map to physical memory? (Note, we 380 * Does the new cr3 value map to physical memory? (Note, we
365 * catch an invalid cr3 even in real-mode, because it would 381 * catch an invalid cr3 even in real-mode, because it would
@@ -375,11 +391,10 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
375 vcpu->arch.cr3 = cr3; 391 vcpu->arch.cr3 = cr3;
376 vcpu->arch.mmu.new_cr3(vcpu); 392 vcpu->arch.mmu.new_cr3(vcpu);
377 } 393 }
378 up_read(&vcpu->kvm->slots_lock);
379} 394}
380EXPORT_SYMBOL_GPL(set_cr3); 395EXPORT_SYMBOL_GPL(kvm_set_cr3);
381 396
382void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 397void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
383{ 398{
384 if (cr8 & CR8_RESERVED_BITS) { 399 if (cr8 & CR8_RESERVED_BITS) {
385 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); 400 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
@@ -391,16 +406,16 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
391 else 406 else
392 vcpu->arch.cr8 = cr8; 407 vcpu->arch.cr8 = cr8;
393} 408}
394EXPORT_SYMBOL_GPL(set_cr8); 409EXPORT_SYMBOL_GPL(kvm_set_cr8);
395 410
396unsigned long get_cr8(struct kvm_vcpu *vcpu) 411unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
397{ 412{
398 if (irqchip_in_kernel(vcpu->kvm)) 413 if (irqchip_in_kernel(vcpu->kvm))
399 return kvm_lapic_get_cr8(vcpu); 414 return kvm_lapic_get_cr8(vcpu);
400 else 415 else
401 return vcpu->arch.cr8; 416 return vcpu->arch.cr8;
402} 417}
403EXPORT_SYMBOL_GPL(get_cr8); 418EXPORT_SYMBOL_GPL(kvm_get_cr8);
404 419
405/* 420/*
406 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 421 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
@@ -415,7 +430,8 @@ static u32 msrs_to_save[] = {
415#ifdef CONFIG_X86_64 430#ifdef CONFIG_X86_64
416 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 431 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
417#endif 432#endif
418 MSR_IA32_TIME_STAMP_COUNTER, 433 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
434 MSR_IA32_PERF_STATUS,
419}; 435};
420 436
421static unsigned num_msrs_to_save; 437static unsigned num_msrs_to_save;
@@ -424,11 +440,9 @@ static u32 emulated_msrs[] = {
424 MSR_IA32_MISC_ENABLE, 440 MSR_IA32_MISC_ENABLE,
425}; 441};
426 442
427#ifdef CONFIG_X86_64
428
429static void set_efer(struct kvm_vcpu *vcpu, u64 efer) 443static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
430{ 444{
431 if (efer & EFER_RESERVED_BITS) { 445 if (efer & efer_reserved_bits) {
432 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", 446 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
433 efer); 447 efer);
434 kvm_inject_gp(vcpu, 0); 448 kvm_inject_gp(vcpu, 0);
@@ -450,7 +464,12 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
450 vcpu->arch.shadow_efer = efer; 464 vcpu->arch.shadow_efer = efer;
451} 465}
452 466
453#endif 467void kvm_enable_efer_bits(u64 mask)
468{
469 efer_reserved_bits &= ~mask;
470}
471EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
472
454 473
455/* 474/*
456 * Writes msr value into into the appropriate "register". 475 * Writes msr value into into the appropriate "register".
@@ -470,26 +489,86 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
470 return kvm_set_msr(vcpu, index, *data); 489 return kvm_set_msr(vcpu, index, *data);
471} 490}
472 491
492static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
493{
494 static int version;
495 struct kvm_wall_clock wc;
496 struct timespec wc_ts;
497
498 if (!wall_clock)
499 return;
500
501 version++;
502
503 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
504
505 wc_ts = current_kernel_time();
506 wc.wc_sec = wc_ts.tv_sec;
507 wc.wc_nsec = wc_ts.tv_nsec;
508 wc.wc_version = version;
509
510 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
511
512 version++;
513 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
514}
515
516static void kvm_write_guest_time(struct kvm_vcpu *v)
517{
518 struct timespec ts;
519 unsigned long flags;
520 struct kvm_vcpu_arch *vcpu = &v->arch;
521 void *shared_kaddr;
522
523 if ((!vcpu->time_page))
524 return;
525
526 /* Keep irq disabled to prevent changes to the clock */
527 local_irq_save(flags);
528 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
529 &vcpu->hv_clock.tsc_timestamp);
530 ktime_get_ts(&ts);
531 local_irq_restore(flags);
532
533 /* With all the info we got, fill in the values */
534
535 vcpu->hv_clock.system_time = ts.tv_nsec +
536 (NSEC_PER_SEC * (u64)ts.tv_sec);
537 /*
538 * The interface expects us to write an even number signaling that the
539 * update is finished. Since the guest won't see the intermediate
540 * state, we just write "2" at the end
541 */
542 vcpu->hv_clock.version = 2;
543
544 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
545
546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
547 sizeof(vcpu->hv_clock));
548
549 kunmap_atomic(shared_kaddr, KM_USER0);
550
551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
552}
553
473 554
474int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 555int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
475{ 556{
476 switch (msr) { 557 switch (msr) {
477#ifdef CONFIG_X86_64
478 case MSR_EFER: 558 case MSR_EFER:
479 set_efer(vcpu, data); 559 set_efer(vcpu, data);
480 break; 560 break;
481#endif
482 case MSR_IA32_MC0_STATUS: 561 case MSR_IA32_MC0_STATUS:
483 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", 562 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
484 __FUNCTION__, data); 563 __func__, data);
485 break; 564 break;
486 case MSR_IA32_MCG_STATUS: 565 case MSR_IA32_MCG_STATUS:
487 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", 566 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
488 __FUNCTION__, data); 567 __func__, data);
489 break; 568 break;
490 case MSR_IA32_MCG_CTL: 569 case MSR_IA32_MCG_CTL:
491 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", 570 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
492 __FUNCTION__, data); 571 __func__, data);
493 break; 572 break;
494 case MSR_IA32_UCODE_REV: 573 case MSR_IA32_UCODE_REV:
495 case MSR_IA32_UCODE_WRITE: 574 case MSR_IA32_UCODE_WRITE:
@@ -501,6 +580,42 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
501 case MSR_IA32_MISC_ENABLE: 580 case MSR_IA32_MISC_ENABLE:
502 vcpu->arch.ia32_misc_enable_msr = data; 581 vcpu->arch.ia32_misc_enable_msr = data;
503 break; 582 break;
583 case MSR_KVM_WALL_CLOCK:
584 vcpu->kvm->arch.wall_clock = data;
585 kvm_write_wall_clock(vcpu->kvm, data);
586 break;
587 case MSR_KVM_SYSTEM_TIME: {
588 if (vcpu->arch.time_page) {
589 kvm_release_page_dirty(vcpu->arch.time_page);
590 vcpu->arch.time_page = NULL;
591 }
592
593 vcpu->arch.time = data;
594
595 /* we verify if the enable bit is set... */
596 if (!(data & 1))
597 break;
598
599 /* ...but clean it before doing the actual write */
600 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
601
602 vcpu->arch.hv_clock.tsc_to_system_mul =
603 clocksource_khz2mult(tsc_khz, 22);
604 vcpu->arch.hv_clock.tsc_shift = 22;
605
606 down_read(&current->mm->mmap_sem);
607 vcpu->arch.time_page =
608 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
609 up_read(&current->mm->mmap_sem);
610
611 if (is_error_page(vcpu->arch.time_page)) {
612 kvm_release_page_clean(vcpu->arch.time_page);
613 vcpu->arch.time_page = NULL;
614 }
615
616 kvm_write_guest_time(vcpu);
617 break;
618 }
504 default: 619 default:
505 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); 620 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
506 return 1; 621 return 1;
@@ -540,7 +655,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
540 case MSR_IA32_MC0_MISC+12: 655 case MSR_IA32_MC0_MISC+12:
541 case MSR_IA32_MC0_MISC+16: 656 case MSR_IA32_MC0_MISC+16:
542 case MSR_IA32_UCODE_REV: 657 case MSR_IA32_UCODE_REV:
543 case MSR_IA32_PERF_STATUS:
544 case MSR_IA32_EBL_CR_POWERON: 658 case MSR_IA32_EBL_CR_POWERON:
545 /* MTRR registers */ 659 /* MTRR registers */
546 case 0xfe: 660 case 0xfe:
@@ -556,11 +670,21 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
556 case MSR_IA32_MISC_ENABLE: 670 case MSR_IA32_MISC_ENABLE:
557 data = vcpu->arch.ia32_misc_enable_msr; 671 data = vcpu->arch.ia32_misc_enable_msr;
558 break; 672 break;
559#ifdef CONFIG_X86_64 673 case MSR_IA32_PERF_STATUS:
674 /* TSC increment by tick */
675 data = 1000ULL;
676 /* CPU multiplier */
677 data |= (((uint64_t)4ULL) << 40);
678 break;
560 case MSR_EFER: 679 case MSR_EFER:
561 data = vcpu->arch.shadow_efer; 680 data = vcpu->arch.shadow_efer;
562 break; 681 break;
563#endif 682 case MSR_KVM_WALL_CLOCK:
683 data = vcpu->kvm->arch.wall_clock;
684 break;
685 case MSR_KVM_SYSTEM_TIME:
686 data = vcpu->arch.time;
687 break;
564 default: 688 default:
565 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 689 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
566 return 1; 690 return 1;
@@ -584,9 +708,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
584 708
585 vcpu_load(vcpu); 709 vcpu_load(vcpu);
586 710
711 down_read(&vcpu->kvm->slots_lock);
587 for (i = 0; i < msrs->nmsrs; ++i) 712 for (i = 0; i < msrs->nmsrs; ++i)
588 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 713 if (do_msr(vcpu, entries[i].index, &entries[i].data))
589 break; 714 break;
715 up_read(&vcpu->kvm->slots_lock);
590 716
591 vcpu_put(vcpu); 717 vcpu_put(vcpu);
592 718
@@ -688,11 +814,24 @@ int kvm_dev_ioctl_check_extension(long ext)
688 case KVM_CAP_USER_MEMORY: 814 case KVM_CAP_USER_MEMORY:
689 case KVM_CAP_SET_TSS_ADDR: 815 case KVM_CAP_SET_TSS_ADDR:
690 case KVM_CAP_EXT_CPUID: 816 case KVM_CAP_EXT_CPUID:
817 case KVM_CAP_CLOCKSOURCE:
818 case KVM_CAP_PIT:
819 case KVM_CAP_NOP_IO_DELAY:
820 case KVM_CAP_MP_STATE:
691 r = 1; 821 r = 1;
692 break; 822 break;
693 case KVM_CAP_VAPIC: 823 case KVM_CAP_VAPIC:
694 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); 824 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
695 break; 825 break;
826 case KVM_CAP_NR_VCPUS:
827 r = KVM_MAX_VCPUS;
828 break;
829 case KVM_CAP_NR_MEMSLOTS:
830 r = KVM_MEMORY_SLOTS;
831 break;
832 case KVM_CAP_PV_MMU:
833 r = !tdp_enabled;
834 break;
696 default: 835 default:
697 r = 0; 836 r = 0;
698 break; 837 break;
@@ -763,6 +902,7 @@ out:
763void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 902void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
764{ 903{
765 kvm_x86_ops->vcpu_load(vcpu, cpu); 904 kvm_x86_ops->vcpu_load(vcpu, cpu);
905 kvm_write_guest_time(vcpu);
766} 906}
767 907
768void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 908void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -958,32 +1098,32 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
958 } 1098 }
959 /* function 4 and 0xb have additional index. */ 1099 /* function 4 and 0xb have additional index. */
960 case 4: { 1100 case 4: {
961 int index, cache_type; 1101 int i, cache_type;
962 1102
963 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1103 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
964 /* read more entries until cache_type is zero */ 1104 /* read more entries until cache_type is zero */
965 for (index = 1; *nent < maxnent; ++index) { 1105 for (i = 1; *nent < maxnent; ++i) {
966 cache_type = entry[index - 1].eax & 0x1f; 1106 cache_type = entry[i - 1].eax & 0x1f;
967 if (!cache_type) 1107 if (!cache_type)
968 break; 1108 break;
969 do_cpuid_1_ent(&entry[index], function, index); 1109 do_cpuid_1_ent(&entry[i], function, i);
970 entry[index].flags |= 1110 entry[i].flags |=
971 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1111 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
972 ++*nent; 1112 ++*nent;
973 } 1113 }
974 break; 1114 break;
975 } 1115 }
976 case 0xb: { 1116 case 0xb: {
977 int index, level_type; 1117 int i, level_type;
978 1118
979 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1119 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
980 /* read more entries until level_type is zero */ 1120 /* read more entries until level_type is zero */
981 for (index = 1; *nent < maxnent; ++index) { 1121 for (i = 1; *nent < maxnent; ++i) {
982 level_type = entry[index - 1].ecx & 0xff; 1122 level_type = entry[i - 1].ecx & 0xff;
983 if (!level_type) 1123 if (!level_type)
984 break; 1124 break;
985 do_cpuid_1_ent(&entry[index], function, index); 1125 do_cpuid_1_ent(&entry[i], function, i);
986 entry[index].flags |= 1126 entry[i].flags |=
987 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1127 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
988 ++*nent; 1128 ++*nent;
989 } 1129 }
@@ -1365,6 +1505,23 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1365 return r; 1505 return r;
1366} 1506}
1367 1507
1508static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1509{
1510 int r = 0;
1511
1512 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1513 return r;
1514}
1515
1516static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1517{
1518 int r = 0;
1519
1520 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1521 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1522 return r;
1523}
1524
1368/* 1525/*
1369 * Get (and clear) the dirty memory log for a memory slot. 1526 * Get (and clear) the dirty memory log for a memory slot.
1370 */ 1527 */
@@ -1457,6 +1614,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
1457 } else 1614 } else
1458 goto out; 1615 goto out;
1459 break; 1616 break;
1617 case KVM_CREATE_PIT:
1618 r = -ENOMEM;
1619 kvm->arch.vpit = kvm_create_pit(kvm);
1620 if (kvm->arch.vpit)
1621 r = 0;
1622 break;
1460 case KVM_IRQ_LINE: { 1623 case KVM_IRQ_LINE: {
1461 struct kvm_irq_level irq_event; 1624 struct kvm_irq_level irq_event;
1462 1625
@@ -1512,6 +1675,37 @@ long kvm_arch_vm_ioctl(struct file *filp,
1512 r = 0; 1675 r = 0;
1513 break; 1676 break;
1514 } 1677 }
1678 case KVM_GET_PIT: {
1679 struct kvm_pit_state ps;
1680 r = -EFAULT;
1681 if (copy_from_user(&ps, argp, sizeof ps))
1682 goto out;
1683 r = -ENXIO;
1684 if (!kvm->arch.vpit)
1685 goto out;
1686 r = kvm_vm_ioctl_get_pit(kvm, &ps);
1687 if (r)
1688 goto out;
1689 r = -EFAULT;
1690 if (copy_to_user(argp, &ps, sizeof ps))
1691 goto out;
1692 r = 0;
1693 break;
1694 }
1695 case KVM_SET_PIT: {
1696 struct kvm_pit_state ps;
1697 r = -EFAULT;
1698 if (copy_from_user(&ps, argp, sizeof ps))
1699 goto out;
1700 r = -ENXIO;
1701 if (!kvm->arch.vpit)
1702 goto out;
1703 r = kvm_vm_ioctl_set_pit(kvm, &ps);
1704 if (r)
1705 goto out;
1706 r = 0;
1707 break;
1708 }
1515 default: 1709 default:
1516 ; 1710 ;
1517 } 1711 }
@@ -1570,7 +1764,6 @@ int emulator_read_std(unsigned long addr,
1570 void *data = val; 1764 void *data = val;
1571 int r = X86EMUL_CONTINUE; 1765 int r = X86EMUL_CONTINUE;
1572 1766
1573 down_read(&vcpu->kvm->slots_lock);
1574 while (bytes) { 1767 while (bytes) {
1575 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1768 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1576 unsigned offset = addr & (PAGE_SIZE-1); 1769 unsigned offset = addr & (PAGE_SIZE-1);
@@ -1592,7 +1785,6 @@ int emulator_read_std(unsigned long addr,
1592 addr += tocopy; 1785 addr += tocopy;
1593 } 1786 }
1594out: 1787out:
1595 up_read(&vcpu->kvm->slots_lock);
1596 return r; 1788 return r;
1597} 1789}
1598EXPORT_SYMBOL_GPL(emulator_read_std); 1790EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1611,9 +1803,7 @@ static int emulator_read_emulated(unsigned long addr,
1611 return X86EMUL_CONTINUE; 1803 return X86EMUL_CONTINUE;
1612 } 1804 }
1613 1805
1614 down_read(&vcpu->kvm->slots_lock);
1615 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1806 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1616 up_read(&vcpu->kvm->slots_lock);
1617 1807
1618 /* For APIC access vmexit */ 1808 /* For APIC access vmexit */
1619 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1809 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1646,19 +1836,15 @@ mmio:
1646 return X86EMUL_UNHANDLEABLE; 1836 return X86EMUL_UNHANDLEABLE;
1647} 1837}
1648 1838
1649static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1839int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1650 const void *val, int bytes) 1840 const void *val, int bytes)
1651{ 1841{
1652 int ret; 1842 int ret;
1653 1843
1654 down_read(&vcpu->kvm->slots_lock);
1655 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); 1844 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1656 if (ret < 0) { 1845 if (ret < 0)
1657 up_read(&vcpu->kvm->slots_lock);
1658 return 0; 1846 return 0;
1659 }
1660 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 1847 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1661 up_read(&vcpu->kvm->slots_lock);
1662 return 1; 1848 return 1;
1663} 1849}
1664 1850
@@ -1670,9 +1856,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1670 struct kvm_io_device *mmio_dev; 1856 struct kvm_io_device *mmio_dev;
1671 gpa_t gpa; 1857 gpa_t gpa;
1672 1858
1673 down_read(&vcpu->kvm->slots_lock);
1674 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1859 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1675 up_read(&vcpu->kvm->slots_lock);
1676 1860
1677 if (gpa == UNMAPPED_GVA) { 1861 if (gpa == UNMAPPED_GVA) {
1678 kvm_inject_page_fault(vcpu, addr, 2); 1862 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1749,7 +1933,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1749 char *kaddr; 1933 char *kaddr;
1750 u64 val; 1934 u64 val;
1751 1935
1752 down_read(&vcpu->kvm->slots_lock);
1753 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1936 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1754 1937
1755 if (gpa == UNMAPPED_GVA || 1938 if (gpa == UNMAPPED_GVA ||
@@ -1769,9 +1952,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1769 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); 1952 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1770 kunmap_atomic(kaddr, KM_USER0); 1953 kunmap_atomic(kaddr, KM_USER0);
1771 kvm_release_page_dirty(page); 1954 kvm_release_page_dirty(page);
1772 emul_write:
1773 up_read(&vcpu->kvm->slots_lock);
1774 } 1955 }
1956emul_write:
1775#endif 1957#endif
1776 1958
1777 return emulator_write_emulated(addr, new, bytes, vcpu); 1959 return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -1802,7 +1984,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1802 *dest = kvm_x86_ops->get_dr(vcpu, dr); 1984 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1803 return X86EMUL_CONTINUE; 1985 return X86EMUL_CONTINUE;
1804 default: 1986 default:
1805 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); 1987 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
1806 return X86EMUL_UNHANDLEABLE; 1988 return X86EMUL_UNHANDLEABLE;
1807 } 1989 }
1808} 1990}
@@ -1840,7 +2022,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1840} 2022}
1841EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); 2023EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1842 2024
1843struct x86_emulate_ops emulate_ops = { 2025static struct x86_emulate_ops emulate_ops = {
1844 .read_std = emulator_read_std, 2026 .read_std = emulator_read_std,
1845 .read_emulated = emulator_read_emulated, 2027 .read_emulated = emulator_read_emulated,
1846 .write_emulated = emulator_write_emulated, 2028 .write_emulated = emulator_write_emulated,
@@ -2091,6 +2273,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2091 vcpu->arch.pio.guest_page_offset = 0; 2273 vcpu->arch.pio.guest_page_offset = 0;
2092 vcpu->arch.pio.rep = 0; 2274 vcpu->arch.pio.rep = 0;
2093 2275
2276 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2277 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2278 handler);
2279 else
2280 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2281 handler);
2282
2094 kvm_x86_ops->cache_regs(vcpu); 2283 kvm_x86_ops->cache_regs(vcpu);
2095 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); 2284 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
2096 kvm_x86_ops->decache_regs(vcpu); 2285 kvm_x86_ops->decache_regs(vcpu);
@@ -2129,6 +2318,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2129 vcpu->arch.pio.guest_page_offset = offset_in_page(address); 2318 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2130 vcpu->arch.pio.rep = rep; 2319 vcpu->arch.pio.rep = rep;
2131 2320
2321 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2322 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2323 handler);
2324 else
2325 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2326 handler);
2327
2132 if (!count) { 2328 if (!count) {
2133 kvm_x86_ops->skip_emulated_instruction(vcpu); 2329 kvm_x86_ops->skip_emulated_instruction(vcpu);
2134 return 1; 2330 return 1;
@@ -2163,10 +2359,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2163 kvm_x86_ops->skip_emulated_instruction(vcpu); 2359 kvm_x86_ops->skip_emulated_instruction(vcpu);
2164 2360
2165 for (i = 0; i < nr_pages; ++i) { 2361 for (i = 0; i < nr_pages; ++i) {
2166 down_read(&vcpu->kvm->slots_lock);
2167 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2362 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2168 vcpu->arch.pio.guest_pages[i] = page; 2363 vcpu->arch.pio.guest_pages[i] = page;
2169 up_read(&vcpu->kvm->slots_lock);
2170 if (!page) { 2364 if (!page) {
2171 kvm_inject_gp(vcpu, 0); 2365 kvm_inject_gp(vcpu, 0);
2172 free_pio_guest_pages(vcpu); 2366 free_pio_guest_pages(vcpu);
@@ -2238,10 +2432,13 @@ void kvm_arch_exit(void)
2238int kvm_emulate_halt(struct kvm_vcpu *vcpu) 2432int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2239{ 2433{
2240 ++vcpu->stat.halt_exits; 2434 ++vcpu->stat.halt_exits;
2435 KVMTRACE_0D(HLT, vcpu, handler);
2241 if (irqchip_in_kernel(vcpu->kvm)) { 2436 if (irqchip_in_kernel(vcpu->kvm)) {
2242 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 2437 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2438 up_read(&vcpu->kvm->slots_lock);
2243 kvm_vcpu_block(vcpu); 2439 kvm_vcpu_block(vcpu);
2244 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 2440 down_read(&vcpu->kvm->slots_lock);
2441 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
2245 return -EINTR; 2442 return -EINTR;
2246 return 1; 2443 return 1;
2247 } else { 2444 } else {
@@ -2251,9 +2448,19 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2251} 2448}
2252EXPORT_SYMBOL_GPL(kvm_emulate_halt); 2449EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2253 2450
2451static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2452 unsigned long a1)
2453{
2454 if (is_long_mode(vcpu))
2455 return a0;
2456 else
2457 return a0 | ((gpa_t)a1 << 32);
2458}
2459
2254int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 2460int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2255{ 2461{
2256 unsigned long nr, a0, a1, a2, a3, ret; 2462 unsigned long nr, a0, a1, a2, a3, ret;
2463 int r = 1;
2257 2464
2258 kvm_x86_ops->cache_regs(vcpu); 2465 kvm_x86_ops->cache_regs(vcpu);
2259 2466
@@ -2263,6 +2470,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2263 a2 = vcpu->arch.regs[VCPU_REGS_RDX]; 2470 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2264 a3 = vcpu->arch.regs[VCPU_REGS_RSI]; 2471 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
2265 2472
2473 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2474
2266 if (!is_long_mode(vcpu)) { 2475 if (!is_long_mode(vcpu)) {
2267 nr &= 0xFFFFFFFF; 2476 nr &= 0xFFFFFFFF;
2268 a0 &= 0xFFFFFFFF; 2477 a0 &= 0xFFFFFFFF;
@@ -2275,13 +2484,17 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2275 case KVM_HC_VAPIC_POLL_IRQ: 2484 case KVM_HC_VAPIC_POLL_IRQ:
2276 ret = 0; 2485 ret = 0;
2277 break; 2486 break;
2487 case KVM_HC_MMU_OP:
2488 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2489 break;
2278 default: 2490 default:
2279 ret = -KVM_ENOSYS; 2491 ret = -KVM_ENOSYS;
2280 break; 2492 break;
2281 } 2493 }
2282 vcpu->arch.regs[VCPU_REGS_RAX] = ret; 2494 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
2283 kvm_x86_ops->decache_regs(vcpu); 2495 kvm_x86_ops->decache_regs(vcpu);
2284 return 0; 2496 ++vcpu->stat.hypercalls;
2497 return r;
2285} 2498}
2286EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 2499EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2287 2500
@@ -2329,7 +2542,7 @@ void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2329void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 2542void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2330 unsigned long *rflags) 2543 unsigned long *rflags)
2331{ 2544{
2332 lmsw(vcpu, msw); 2545 kvm_lmsw(vcpu, msw);
2333 *rflags = kvm_x86_ops->get_rflags(vcpu); 2546 *rflags = kvm_x86_ops->get_rflags(vcpu);
2334} 2547}
2335 2548
@@ -2346,9 +2559,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2346 case 4: 2559 case 4:
2347 return vcpu->arch.cr4; 2560 return vcpu->arch.cr4;
2348 case 8: 2561 case 8:
2349 return get_cr8(vcpu); 2562 return kvm_get_cr8(vcpu);
2350 default: 2563 default:
2351 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); 2564 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2352 return 0; 2565 return 0;
2353 } 2566 }
2354} 2567}
@@ -2358,23 +2571,23 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2358{ 2571{
2359 switch (cr) { 2572 switch (cr) {
2360 case 0: 2573 case 0:
2361 set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 2574 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
2362 *rflags = kvm_x86_ops->get_rflags(vcpu); 2575 *rflags = kvm_x86_ops->get_rflags(vcpu);
2363 break; 2576 break;
2364 case 2: 2577 case 2:
2365 vcpu->arch.cr2 = val; 2578 vcpu->arch.cr2 = val;
2366 break; 2579 break;
2367 case 3: 2580 case 3:
2368 set_cr3(vcpu, val); 2581 kvm_set_cr3(vcpu, val);
2369 break; 2582 break;
2370 case 4: 2583 case 4:
2371 set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); 2584 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
2372 break; 2585 break;
2373 case 8: 2586 case 8:
2374 set_cr8(vcpu, val & 0xfUL); 2587 kvm_set_cr8(vcpu, val & 0xfUL);
2375 break; 2588 break;
2376 default: 2589 default:
2377 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); 2590 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2378 } 2591 }
2379} 2592}
2380 2593
@@ -2447,6 +2660,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2447 } 2660 }
2448 kvm_x86_ops->decache_regs(vcpu); 2661 kvm_x86_ops->decache_regs(vcpu);
2449 kvm_x86_ops->skip_emulated_instruction(vcpu); 2662 kvm_x86_ops->skip_emulated_instruction(vcpu);
2663 KVMTRACE_5D(CPUID, vcpu, function,
2664 (u32)vcpu->arch.regs[VCPU_REGS_RAX],
2665 (u32)vcpu->arch.regs[VCPU_REGS_RBX],
2666 (u32)vcpu->arch.regs[VCPU_REGS_RCX],
2667 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
2450} 2668}
2451EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 2669EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2452 2670
@@ -2469,7 +2687,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2469 struct kvm_run *kvm_run) 2687 struct kvm_run *kvm_run)
2470{ 2688{
2471 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 2689 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2472 kvm_run->cr8 = get_cr8(vcpu); 2690 kvm_run->cr8 = kvm_get_cr8(vcpu);
2473 kvm_run->apic_base = kvm_get_apic_base(vcpu); 2691 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2474 if (irqchip_in_kernel(vcpu->kvm)) 2692 if (irqchip_in_kernel(vcpu->kvm))
2475 kvm_run->ready_for_interrupt_injection = 1; 2693 kvm_run->ready_for_interrupt_injection = 1;
@@ -2509,16 +2727,17 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2509{ 2727{
2510 int r; 2728 int r;
2511 2729
2512 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2730 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
2513 pr_debug("vcpu %d received sipi with vector # %x\n", 2731 pr_debug("vcpu %d received sipi with vector # %x\n",
2514 vcpu->vcpu_id, vcpu->arch.sipi_vector); 2732 vcpu->vcpu_id, vcpu->arch.sipi_vector);
2515 kvm_lapic_reset(vcpu); 2733 kvm_lapic_reset(vcpu);
2516 r = kvm_x86_ops->vcpu_reset(vcpu); 2734 r = kvm_x86_ops->vcpu_reset(vcpu);
2517 if (r) 2735 if (r)
2518 return r; 2736 return r;
2519 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 2737 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2520 } 2738 }
2521 2739
2740 down_read(&vcpu->kvm->slots_lock);
2522 vapic_enter(vcpu); 2741 vapic_enter(vcpu);
2523 2742
2524preempted: 2743preempted:
@@ -2526,6 +2745,10 @@ preempted:
2526 kvm_x86_ops->guest_debug_pre(vcpu); 2745 kvm_x86_ops->guest_debug_pre(vcpu);
2527 2746
2528again: 2747again:
2748 if (vcpu->requests)
2749 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2750 kvm_mmu_unload(vcpu);
2751
2529 r = kvm_mmu_reload(vcpu); 2752 r = kvm_mmu_reload(vcpu);
2530 if (unlikely(r)) 2753 if (unlikely(r))
2531 goto out; 2754 goto out;
@@ -2539,6 +2762,11 @@ again:
2539 r = 0; 2762 r = 0;
2540 goto out; 2763 goto out;
2541 } 2764 }
2765 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
2766 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2767 r = 0;
2768 goto out;
2769 }
2542 } 2770 }
2543 2771
2544 kvm_inject_pending_timer_irqs(vcpu); 2772 kvm_inject_pending_timer_irqs(vcpu);
@@ -2557,6 +2785,14 @@ again:
2557 goto out; 2785 goto out;
2558 } 2786 }
2559 2787
2788 if (vcpu->requests)
2789 if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
2790 local_irq_enable();
2791 preempt_enable();
2792 r = 1;
2793 goto out;
2794 }
2795
2560 if (signal_pending(current)) { 2796 if (signal_pending(current)) {
2561 local_irq_enable(); 2797 local_irq_enable();
2562 preempt_enable(); 2798 preempt_enable();
@@ -2566,6 +2802,13 @@ again:
2566 goto out; 2802 goto out;
2567 } 2803 }
2568 2804
2805 vcpu->guest_mode = 1;
2806 /*
2807 * Make sure that guest_mode assignment won't happen after
2808 * testing the pending IRQ vector bitmap.
2809 */
2810 smp_wmb();
2811
2569 if (vcpu->arch.exception.pending) 2812 if (vcpu->arch.exception.pending)
2570 __queue_exception(vcpu); 2813 __queue_exception(vcpu);
2571 else if (irqchip_in_kernel(vcpu->kvm)) 2814 else if (irqchip_in_kernel(vcpu->kvm))
@@ -2575,13 +2818,15 @@ again:
2575 2818
2576 kvm_lapic_sync_to_vapic(vcpu); 2819 kvm_lapic_sync_to_vapic(vcpu);
2577 2820
2578 vcpu->guest_mode = 1; 2821 up_read(&vcpu->kvm->slots_lock);
2822
2579 kvm_guest_enter(); 2823 kvm_guest_enter();
2580 2824
2581 if (vcpu->requests) 2825 if (vcpu->requests)
2582 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) 2826 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2583 kvm_x86_ops->tlb_flush(vcpu); 2827 kvm_x86_ops->tlb_flush(vcpu);
2584 2828
2829 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
2585 kvm_x86_ops->run(vcpu, kvm_run); 2830 kvm_x86_ops->run(vcpu, kvm_run);
2586 2831
2587 vcpu->guest_mode = 0; 2832 vcpu->guest_mode = 0;
@@ -2601,6 +2846,8 @@ again:
2601 2846
2602 preempt_enable(); 2847 preempt_enable();
2603 2848
2849 down_read(&vcpu->kvm->slots_lock);
2850
2604 /* 2851 /*
2605 * Profile KVM exit RIPs: 2852 * Profile KVM exit RIPs:
2606 */ 2853 */
@@ -2628,14 +2875,18 @@ again:
2628 } 2875 }
2629 2876
2630out: 2877out:
2878 up_read(&vcpu->kvm->slots_lock);
2631 if (r > 0) { 2879 if (r > 0) {
2632 kvm_resched(vcpu); 2880 kvm_resched(vcpu);
2881 down_read(&vcpu->kvm->slots_lock);
2633 goto preempted; 2882 goto preempted;
2634 } 2883 }
2635 2884
2636 post_kvm_run_save(vcpu, kvm_run); 2885 post_kvm_run_save(vcpu, kvm_run);
2637 2886
2887 down_read(&vcpu->kvm->slots_lock);
2638 vapic_exit(vcpu); 2888 vapic_exit(vcpu);
2889 up_read(&vcpu->kvm->slots_lock);
2639 2890
2640 return r; 2891 return r;
2641} 2892}
@@ -2647,7 +2898,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2647 2898
2648 vcpu_load(vcpu); 2899 vcpu_load(vcpu);
2649 2900
2650 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 2901 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
2651 kvm_vcpu_block(vcpu); 2902 kvm_vcpu_block(vcpu);
2652 vcpu_put(vcpu); 2903 vcpu_put(vcpu);
2653 return -EAGAIN; 2904 return -EAGAIN;
@@ -2658,7 +2909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2658 2909
2659 /* re-sync apic's tpr */ 2910 /* re-sync apic's tpr */
2660 if (!irqchip_in_kernel(vcpu->kvm)) 2911 if (!irqchip_in_kernel(vcpu->kvm))
2661 set_cr8(vcpu, kvm_run->cr8); 2912 kvm_set_cr8(vcpu, kvm_run->cr8);
2662 2913
2663 if (vcpu->arch.pio.cur_count) { 2914 if (vcpu->arch.pio.cur_count) {
2664 r = complete_pio(vcpu); 2915 r = complete_pio(vcpu);
@@ -2670,9 +2921,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2670 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 2921 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2671 vcpu->mmio_read_completed = 1; 2922 vcpu->mmio_read_completed = 1;
2672 vcpu->mmio_needed = 0; 2923 vcpu->mmio_needed = 0;
2924
2925 down_read(&vcpu->kvm->slots_lock);
2673 r = emulate_instruction(vcpu, kvm_run, 2926 r = emulate_instruction(vcpu, kvm_run,
2674 vcpu->arch.mmio_fault_cr2, 0, 2927 vcpu->arch.mmio_fault_cr2, 0,
2675 EMULTYPE_NO_DECODE); 2928 EMULTYPE_NO_DECODE);
2929 up_read(&vcpu->kvm->slots_lock);
2676 if (r == EMULATE_DO_MMIO) { 2930 if (r == EMULATE_DO_MMIO) {
2677 /* 2931 /*
2678 * Read-modify-write. Back to userspace. 2932 * Read-modify-write. Back to userspace.
@@ -2773,7 +3027,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2773static void get_segment(struct kvm_vcpu *vcpu, 3027static void get_segment(struct kvm_vcpu *vcpu,
2774 struct kvm_segment *var, int seg) 3028 struct kvm_segment *var, int seg)
2775{ 3029{
2776 return kvm_x86_ops->get_segment(vcpu, var, seg); 3030 kvm_x86_ops->get_segment(vcpu, var, seg);
2777} 3031}
2778 3032
2779void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3033void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -2816,7 +3070,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2816 sregs->cr2 = vcpu->arch.cr2; 3070 sregs->cr2 = vcpu->arch.cr2;
2817 sregs->cr3 = vcpu->arch.cr3; 3071 sregs->cr3 = vcpu->arch.cr3;
2818 sregs->cr4 = vcpu->arch.cr4; 3072 sregs->cr4 = vcpu->arch.cr4;
2819 sregs->cr8 = get_cr8(vcpu); 3073 sregs->cr8 = kvm_get_cr8(vcpu);
2820 sregs->efer = vcpu->arch.shadow_efer; 3074 sregs->efer = vcpu->arch.shadow_efer;
2821 sregs->apic_base = kvm_get_apic_base(vcpu); 3075 sregs->apic_base = kvm_get_apic_base(vcpu);
2822 3076
@@ -2836,12 +3090,438 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2836 return 0; 3090 return 0;
2837} 3091}
2838 3092
3093int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3094 struct kvm_mp_state *mp_state)
3095{
3096 vcpu_load(vcpu);
3097 mp_state->mp_state = vcpu->arch.mp_state;
3098 vcpu_put(vcpu);
3099 return 0;
3100}
3101
3102int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3103 struct kvm_mp_state *mp_state)
3104{
3105 vcpu_load(vcpu);
3106 vcpu->arch.mp_state = mp_state->mp_state;
3107 vcpu_put(vcpu);
3108 return 0;
3109}
3110
2839static void set_segment(struct kvm_vcpu *vcpu, 3111static void set_segment(struct kvm_vcpu *vcpu,
2840 struct kvm_segment *var, int seg) 3112 struct kvm_segment *var, int seg)
2841{ 3113{
2842 return kvm_x86_ops->set_segment(vcpu, var, seg); 3114 kvm_x86_ops->set_segment(vcpu, var, seg);
3115}
3116
3117static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3118 struct kvm_segment *kvm_desct)
3119{
3120 kvm_desct->base = seg_desc->base0;
3121 kvm_desct->base |= seg_desc->base1 << 16;
3122 kvm_desct->base |= seg_desc->base2 << 24;
3123 kvm_desct->limit = seg_desc->limit0;
3124 kvm_desct->limit |= seg_desc->limit << 16;
3125 kvm_desct->selector = selector;
3126 kvm_desct->type = seg_desc->type;
3127 kvm_desct->present = seg_desc->p;
3128 kvm_desct->dpl = seg_desc->dpl;
3129 kvm_desct->db = seg_desc->d;
3130 kvm_desct->s = seg_desc->s;
3131 kvm_desct->l = seg_desc->l;
3132 kvm_desct->g = seg_desc->g;
3133 kvm_desct->avl = seg_desc->avl;
3134 if (!selector)
3135 kvm_desct->unusable = 1;
3136 else
3137 kvm_desct->unusable = 0;
3138 kvm_desct->padding = 0;
3139}
3140
3141static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3142 u16 selector,
3143 struct descriptor_table *dtable)
3144{
3145 if (selector & 1 << 2) {
3146 struct kvm_segment kvm_seg;
3147
3148 get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3149
3150 if (kvm_seg.unusable)
3151 dtable->limit = 0;
3152 else
3153 dtable->limit = kvm_seg.limit;
3154 dtable->base = kvm_seg.base;
3155 }
3156 else
3157 kvm_x86_ops->get_gdt(vcpu, dtable);
3158}
3159
3160/* allowed just for 8 bytes segments */
3161static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3162 struct desc_struct *seg_desc)
3163{
3164 struct descriptor_table dtable;
3165 u16 index = selector >> 3;
3166
3167 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3168
3169 if (dtable.limit < index * 8 + 7) {
3170 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3171 return 1;
3172 }
3173 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3174}
3175
3176/* allowed just for 8 bytes segments */
3177static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3178 struct desc_struct *seg_desc)
3179{
3180 struct descriptor_table dtable;
3181 u16 index = selector >> 3;
3182
3183 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3184
3185 if (dtable.limit < index * 8 + 7)
3186 return 1;
3187 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3188}
3189
3190static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3191 struct desc_struct *seg_desc)
3192{
3193 u32 base_addr;
3194
3195 base_addr = seg_desc->base0;
3196 base_addr |= (seg_desc->base1 << 16);
3197 base_addr |= (seg_desc->base2 << 24);
3198
3199 return base_addr;
3200}
3201
3202static int load_tss_segment32(struct kvm_vcpu *vcpu,
3203 struct desc_struct *seg_desc,
3204 struct tss_segment_32 *tss)
3205{
3206 u32 base_addr;
3207
3208 base_addr = get_tss_base_addr(vcpu, seg_desc);
3209
3210 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3211 sizeof(struct tss_segment_32));
3212}
3213
3214static int save_tss_segment32(struct kvm_vcpu *vcpu,
3215 struct desc_struct *seg_desc,
3216 struct tss_segment_32 *tss)
3217{
3218 u32 base_addr;
3219
3220 base_addr = get_tss_base_addr(vcpu, seg_desc);
3221
3222 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3223 sizeof(struct tss_segment_32));
3224}
3225
3226static int load_tss_segment16(struct kvm_vcpu *vcpu,
3227 struct desc_struct *seg_desc,
3228 struct tss_segment_16 *tss)
3229{
3230 u32 base_addr;
3231
3232 base_addr = get_tss_base_addr(vcpu, seg_desc);
3233
3234 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3235 sizeof(struct tss_segment_16));
3236}
3237
3238static int save_tss_segment16(struct kvm_vcpu *vcpu,
3239 struct desc_struct *seg_desc,
3240 struct tss_segment_16 *tss)
3241{
3242 u32 base_addr;
3243
3244 base_addr = get_tss_base_addr(vcpu, seg_desc);
3245
3246 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3247 sizeof(struct tss_segment_16));
3248}
3249
3250static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3251{
3252 struct kvm_segment kvm_seg;
3253
3254 get_segment(vcpu, &kvm_seg, seg);
3255 return kvm_seg.selector;
3256}
3257
3258static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3259 u16 selector,
3260 struct kvm_segment *kvm_seg)
3261{
3262 struct desc_struct seg_desc;
3263
3264 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3265 return 1;
3266 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3267 return 0;
3268}
3269
3270static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3271 int type_bits, int seg)
3272{
3273 struct kvm_segment kvm_seg;
3274
3275 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3276 return 1;
3277 kvm_seg.type |= type_bits;
3278
3279 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3280 seg != VCPU_SREG_LDTR)
3281 if (!kvm_seg.s)
3282 kvm_seg.unusable = 1;
3283
3284 set_segment(vcpu, &kvm_seg, seg);
3285 return 0;
3286}
3287
3288static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3289 struct tss_segment_32 *tss)
3290{
3291 tss->cr3 = vcpu->arch.cr3;
3292 tss->eip = vcpu->arch.rip;
3293 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3294 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
3295 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3296 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
3297 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
3298 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
3299 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
3300 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
3301 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
3302
3303 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3304 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3305 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3306 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3307 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3308 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3309 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3310 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3311}
3312
3313static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3314 struct tss_segment_32 *tss)
3315{
3316 kvm_set_cr3(vcpu, tss->cr3);
3317
3318 vcpu->arch.rip = tss->eip;
3319 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3320
3321 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
3322 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
3323 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
3324 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
3325 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
3326 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
3327 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3328 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3329
3330 if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3331 return 1;
3332
3333 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3334 return 1;
3335
3336 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3337 return 1;
3338
3339 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3340 return 1;
3341
3342 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3343 return 1;
3344
3345 if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3346 return 1;
3347
3348 if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3349 return 1;
3350 return 0;
3351}
3352
3353static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3354 struct tss_segment_16 *tss)
3355{
3356 tss->ip = vcpu->arch.rip;
3357 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3358 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
3359 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
3360 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
3361 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
3362 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
3363 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
3364 tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
3365 tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
3366
3367 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3368 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3369 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3370 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3371 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3372 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3373}
3374
3375static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3376 struct tss_segment_16 *tss)
3377{
3378 vcpu->arch.rip = tss->ip;
3379 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3380 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
3381 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
3382 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
3383 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
3384 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
3385 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
3386 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3387 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3388
3389 if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3390 return 1;
3391
3392 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3393 return 1;
3394
3395 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3396 return 1;
3397
3398 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3399 return 1;
3400
3401 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3402 return 1;
3403 return 0;
3404}
3405
3406int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3407 struct desc_struct *cseg_desc,
3408 struct desc_struct *nseg_desc)
3409{
3410 struct tss_segment_16 tss_segment_16;
3411 int ret = 0;
3412
3413 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
3414 goto out;
3415
3416 save_state_to_tss16(vcpu, &tss_segment_16);
3417 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3418
3419 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
3420 goto out;
3421 if (load_state_from_tss16(vcpu, &tss_segment_16))
3422 goto out;
3423
3424 ret = 1;
3425out:
3426 return ret;
3427}
3428
3429int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3430 struct desc_struct *cseg_desc,
3431 struct desc_struct *nseg_desc)
3432{
3433 struct tss_segment_32 tss_segment_32;
3434 int ret = 0;
3435
3436 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
3437 goto out;
3438
3439 save_state_to_tss32(vcpu, &tss_segment_32);
3440 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3441
3442 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
3443 goto out;
3444 if (load_state_from_tss32(vcpu, &tss_segment_32))
3445 goto out;
3446
3447 ret = 1;
3448out:
3449 return ret;
2843} 3450}
2844 3451
3452int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3453{
3454 struct kvm_segment tr_seg;
3455 struct desc_struct cseg_desc;
3456 struct desc_struct nseg_desc;
3457 int ret = 0;
3458
3459 get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3460
3461 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3462 goto out;
3463
3464 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
3465 goto out;
3466
3467
3468 if (reason != TASK_SWITCH_IRET) {
3469 int cpl;
3470
3471 cpl = kvm_x86_ops->get_cpl(vcpu);
3472 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3473 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3474 return 1;
3475 }
3476 }
3477
3478 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3479 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3480 return 1;
3481 }
3482
3483 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3484 cseg_desc.type &= ~(1 << 8); //clear the B flag
3485 save_guest_segment_descriptor(vcpu, tr_seg.selector,
3486 &cseg_desc);
3487 }
3488
3489 if (reason == TASK_SWITCH_IRET) {
3490 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3491 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3492 }
3493
3494 kvm_x86_ops->skip_emulated_instruction(vcpu);
3495 kvm_x86_ops->cache_regs(vcpu);
3496
3497 if (nseg_desc.type & 8)
3498 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
3499 &nseg_desc);
3500 else
3501 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
3502 &nseg_desc);
3503
3504 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3505 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3506 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3507 }
3508
3509 if (reason != TASK_SWITCH_IRET) {
3510 nseg_desc.type |= (1 << 8);
3511 save_guest_segment_descriptor(vcpu, tss_selector,
3512 &nseg_desc);
3513 }
3514
3515 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3516 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3517 tr_seg.type = 11;
3518 set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3519out:
3520 kvm_x86_ops->decache_regs(vcpu);
3521 return ret;
3522}
3523EXPORT_SYMBOL_GPL(kvm_task_switch);
3524
2845int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 3525int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2846 struct kvm_sregs *sregs) 3526 struct kvm_sregs *sregs)
2847{ 3527{
@@ -2862,12 +3542,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2862 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; 3542 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
2863 vcpu->arch.cr3 = sregs->cr3; 3543 vcpu->arch.cr3 = sregs->cr3;
2864 3544
2865 set_cr8(vcpu, sregs->cr8); 3545 kvm_set_cr8(vcpu, sregs->cr8);
2866 3546
2867 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; 3547 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
2868#ifdef CONFIG_X86_64
2869 kvm_x86_ops->set_efer(vcpu, sregs->efer); 3548 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2870#endif
2871 kvm_set_apic_base(vcpu, sregs->apic_base); 3549 kvm_set_apic_base(vcpu, sregs->apic_base);
2872 3550
2873 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 3551 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
@@ -3141,9 +3819,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3141 3819
3142 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 3820 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3143 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 3821 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3144 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 3822 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3145 else 3823 else
3146 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; 3824 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
3147 3825
3148 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3826 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3149 if (!page) { 3827 if (!page) {
@@ -3175,7 +3853,9 @@ fail:
3175void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 3853void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3176{ 3854{
3177 kvm_free_lapic(vcpu); 3855 kvm_free_lapic(vcpu);
3856 down_read(&vcpu->kvm->slots_lock);
3178 kvm_mmu_destroy(vcpu); 3857 kvm_mmu_destroy(vcpu);
3858 up_read(&vcpu->kvm->slots_lock);
3179 free_page((unsigned long)vcpu->arch.pio_data); 3859 free_page((unsigned long)vcpu->arch.pio_data);
3180} 3860}
3181 3861
@@ -3219,10 +3899,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
3219 3899
3220void kvm_arch_destroy_vm(struct kvm *kvm) 3900void kvm_arch_destroy_vm(struct kvm *kvm)
3221{ 3901{
3902 kvm_free_pit(kvm);
3222 kfree(kvm->arch.vpic); 3903 kfree(kvm->arch.vpic);
3223 kfree(kvm->arch.vioapic); 3904 kfree(kvm->arch.vioapic);
3224 kvm_free_vcpus(kvm); 3905 kvm_free_vcpus(kvm);
3225 kvm_free_physmem(kvm); 3906 kvm_free_physmem(kvm);
3907 if (kvm->arch.apic_access_page)
3908 put_page(kvm->arch.apic_access_page);
3226 kfree(kvm); 3909 kfree(kvm);
3227} 3910}
3228 3911
@@ -3278,8 +3961,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3278 3961
3279int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 3962int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3280{ 3963{
3281 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE 3964 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
3282 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED; 3965 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
3283} 3966}
3284 3967
3285static void vcpu_kick_intr(void *info) 3968static void vcpu_kick_intr(void *info)
@@ -3293,11 +3976,17 @@ static void vcpu_kick_intr(void *info)
3293void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3976void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3294{ 3977{
3295 int ipi_pcpu = vcpu->cpu; 3978 int ipi_pcpu = vcpu->cpu;
3979 int cpu = get_cpu();
3296 3980
3297 if (waitqueue_active(&vcpu->wq)) { 3981 if (waitqueue_active(&vcpu->wq)) {
3298 wake_up_interruptible(&vcpu->wq); 3982 wake_up_interruptible(&vcpu->wq);
3299 ++vcpu->stat.halt_wakeup; 3983 ++vcpu->stat.halt_wakeup;
3300 } 3984 }
3301 if (vcpu->guest_mode) 3985 /*
3986 * We may be called synchronously with irqs disabled in guest mode,
3987 * So need not to call smp_call_function_single() in that case.
3988 */
3989 if (vcpu->guest_mode && vcpu->cpu != cpu)
3302 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 3990 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
3991 put_cpu();
3303} 3992}
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 79586003397a..2ca08386f993 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -65,6 +65,14 @@
65#define MemAbs (1<<9) /* Memory operand is absolute displacement */ 65#define MemAbs (1<<9) /* Memory operand is absolute displacement */
66#define String (1<<10) /* String instruction (rep capable) */ 66#define String (1<<10) /* String instruction (rep capable) */
67#define Stack (1<<11) /* Stack instruction (push/pop) */ 67#define Stack (1<<11) /* Stack instruction (push/pop) */
68#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
69#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
70#define GroupMask 0xff /* Group number stored in bits 0:7 */
71
72enum {
73 Group1_80, Group1_81, Group1_82, Group1_83,
74 Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
75};
68 76
69static u16 opcode_table[256] = { 77static u16 opcode_table[256] = {
70 /* 0x00 - 0x07 */ 78 /* 0x00 - 0x07 */
@@ -123,14 +131,14 @@ static u16 opcode_table[256] = {
123 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 131 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
124 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 132 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
125 /* 0x80 - 0x87 */ 133 /* 0x80 - 0x87 */
126 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 134 Group | Group1_80, Group | Group1_81,
127 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 135 Group | Group1_82, Group | Group1_83,
128 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 136 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
129 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 137 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
130 /* 0x88 - 0x8F */ 138 /* 0x88 - 0x8F */
131 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, 139 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
132 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 140 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
133 0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov | Stack, 141 0, ModRM | DstReg, 0, Group | Group1A,
134 /* 0x90 - 0x9F */ 142 /* 0x90 - 0x9F */
135 0, 0, 0, 0, 0, 0, 0, 0, 143 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, 144 0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
@@ -164,16 +172,15 @@ static u16 opcode_table[256] = {
164 0, 0, 0, 0, 172 0, 0, 0, 0,
165 /* 0xF0 - 0xF7 */ 173 /* 0xF0 - 0xF7 */
166 0, 0, 0, 0, 174 0, 0, 0, 0,
167 ImplicitOps, ImplicitOps, 175 ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
168 ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
169 /* 0xF8 - 0xFF */ 176 /* 0xF8 - 0xFF */
170 ImplicitOps, 0, ImplicitOps, ImplicitOps, 177 ImplicitOps, 0, ImplicitOps, ImplicitOps,
171 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM 178 0, 0, Group | Group4, Group | Group5,
172}; 179};
173 180
174static u16 twobyte_table[256] = { 181static u16 twobyte_table[256] = {
175 /* 0x00 - 0x0F */ 182 /* 0x00 - 0x0F */
176 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0, 183 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0,
177 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 184 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
178 /* 0x10 - 0x1F */ 185 /* 0x10 - 0x1F */
179 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 186 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -229,6 +236,56 @@ static u16 twobyte_table[256] = {
229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 236 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
230}; 237};
231 238
239static u16 group_table[] = {
240 [Group1_80*8] =
241 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
242 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
243 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
244 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
245 [Group1_81*8] =
246 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
247 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
248 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
249 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
250 [Group1_82*8] =
251 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
252 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
253 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
254 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
255 [Group1_83*8] =
256 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
257 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
258 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
259 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
260 [Group1A*8] =
261 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
262 [Group3_Byte*8] =
263 ByteOp | SrcImm | DstMem | ModRM, 0,
264 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
265 0, 0, 0, 0,
266 [Group3*8] =
267 DstMem | SrcImm | ModRM | SrcImm, 0,
268 DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
269 0, 0, 0, 0,
270 [Group4*8] =
271 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
272 0, 0, 0, 0, 0, 0,
273 [Group5*8] =
274 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0,
275 SrcMem | ModRM, 0, SrcMem | ModRM | Stack, 0,
276 [Group7*8] =
277 0, 0, ModRM | SrcMem, ModRM | SrcMem,
278 SrcNone | ModRM | DstMem | Mov, 0,
279 SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
280};
281
282static u16 group2_table[] = {
283 [Group7*8] =
284 SrcNone | ModRM, 0, 0, 0,
285 SrcNone | ModRM | DstMem | Mov, 0,
286 SrcMem16 | ModRM | Mov, 0,
287};
288
232/* EFLAGS bit definitions. */ 289/* EFLAGS bit definitions. */
233#define EFLG_OF (1<<11) 290#define EFLG_OF (1<<11)
234#define EFLG_DF (1<<10) 291#define EFLG_DF (1<<10)
@@ -317,7 +374,7 @@ static u16 twobyte_table[256] = {
317 374
318#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ 375#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
319 do { \ 376 do { \
320 unsigned long _tmp; \ 377 unsigned long __tmp; \
321 switch ((_dst).bytes) { \ 378 switch ((_dst).bytes) { \
322 case 1: \ 379 case 1: \
323 __asm__ __volatile__ ( \ 380 __asm__ __volatile__ ( \
@@ -325,7 +382,7 @@ static u16 twobyte_table[256] = {
325 _op"b %"_bx"3,%1; " \ 382 _op"b %"_bx"3,%1; " \
326 _POST_EFLAGS("0", "4", "2") \ 383 _POST_EFLAGS("0", "4", "2") \
327 : "=m" (_eflags), "=m" ((_dst).val), \ 384 : "=m" (_eflags), "=m" ((_dst).val), \
328 "=&r" (_tmp) \ 385 "=&r" (__tmp) \
329 : _by ((_src).val), "i" (EFLAGS_MASK)); \ 386 : _by ((_src).val), "i" (EFLAGS_MASK)); \
330 break; \ 387 break; \
331 default: \ 388 default: \
@@ -426,29 +483,40 @@ static u16 twobyte_table[256] = {
426 (_type)_x; \ 483 (_type)_x; \
427}) 484})
428 485
486static inline unsigned long ad_mask(struct decode_cache *c)
487{
488 return (1UL << (c->ad_bytes << 3)) - 1;
489}
490
429/* Access/update address held in a register, based on addressing mode. */ 491/* Access/update address held in a register, based on addressing mode. */
430#define address_mask(reg) \ 492static inline unsigned long
431 ((c->ad_bytes == sizeof(unsigned long)) ? \ 493address_mask(struct decode_cache *c, unsigned long reg)
432 (reg) : ((reg) & ((1UL << (c->ad_bytes << 3)) - 1))) 494{
433#define register_address(base, reg) \ 495 if (c->ad_bytes == sizeof(unsigned long))
434 ((base) + address_mask(reg)) 496 return reg;
435#define register_address_increment(reg, inc) \ 497 else
436 do { \ 498 return reg & ad_mask(c);
437 /* signed type ensures sign extension to long */ \ 499}
438 int _inc = (inc); \
439 if (c->ad_bytes == sizeof(unsigned long)) \
440 (reg) += _inc; \
441 else \
442 (reg) = ((reg) & \
443 ~((1UL << (c->ad_bytes << 3)) - 1)) | \
444 (((reg) + _inc) & \
445 ((1UL << (c->ad_bytes << 3)) - 1)); \
446 } while (0)
447 500
448#define JMP_REL(rel) \ 501static inline unsigned long
449 do { \ 502register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
450 register_address_increment(c->eip, rel); \ 503{
451 } while (0) 504 return base + address_mask(c, reg);
505}
506
507static inline void
508register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
509{
510 if (c->ad_bytes == sizeof(unsigned long))
511 *reg += inc;
512 else
513 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
514}
515
516static inline void jmp_rel(struct decode_cache *c, int rel)
517{
518 register_address_increment(c, &c->eip, rel);
519}
452 520
453static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 521static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
454 struct x86_emulate_ops *ops, 522 struct x86_emulate_ops *ops,
@@ -763,7 +831,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
763 struct decode_cache *c = &ctxt->decode; 831 struct decode_cache *c = &ctxt->decode;
764 int rc = 0; 832 int rc = 0;
765 int mode = ctxt->mode; 833 int mode = ctxt->mode;
766 int def_op_bytes, def_ad_bytes; 834 int def_op_bytes, def_ad_bytes, group;
767 835
768 /* Shadow copy of register state. Committed on successful emulation. */ 836 /* Shadow copy of register state. Committed on successful emulation. */
769 837
@@ -864,12 +932,24 @@ done_prefixes:
864 c->b = insn_fetch(u8, 1, c->eip); 932 c->b = insn_fetch(u8, 1, c->eip);
865 c->d = twobyte_table[c->b]; 933 c->d = twobyte_table[c->b];
866 } 934 }
935 }
867 936
868 /* Unrecognised? */ 937 if (c->d & Group) {
869 if (c->d == 0) { 938 group = c->d & GroupMask;
870 DPRINTF("Cannot emulate %02x\n", c->b); 939 c->modrm = insn_fetch(u8, 1, c->eip);
871 return -1; 940 --c->eip;
872 } 941
942 group = (group << 3) + ((c->modrm >> 3) & 7);
943 if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
944 c->d = group2_table[group];
945 else
946 c->d = group_table[group];
947 }
948
949 /* Unrecognised? */
950 if (c->d == 0) {
951 DPRINTF("Cannot emulate %02x\n", c->b);
952 return -1;
873 } 953 }
874 954
875 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) 955 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
@@ -924,6 +1004,7 @@ done_prefixes:
924 */ 1004 */
925 if ((c->d & ModRM) && c->modrm_mod == 3) { 1005 if ((c->d & ModRM) && c->modrm_mod == 3) {
926 c->src.type = OP_REG; 1006 c->src.type = OP_REG;
1007 c->src.val = c->modrm_val;
927 break; 1008 break;
928 } 1009 }
929 c->src.type = OP_MEM; 1010 c->src.type = OP_MEM;
@@ -967,6 +1048,7 @@ done_prefixes:
967 case DstMem: 1048 case DstMem:
968 if ((c->d & ModRM) && c->modrm_mod == 3) { 1049 if ((c->d & ModRM) && c->modrm_mod == 3) {
969 c->dst.type = OP_REG; 1050 c->dst.type = OP_REG;
1051 c->dst.val = c->dst.orig_val = c->modrm_val;
970 break; 1052 break;
971 } 1053 }
972 c->dst.type = OP_MEM; 1054 c->dst.type = OP_MEM;
@@ -984,8 +1066,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
984 c->dst.type = OP_MEM; 1066 c->dst.type = OP_MEM;
985 c->dst.bytes = c->op_bytes; 1067 c->dst.bytes = c->op_bytes;
986 c->dst.val = c->src.val; 1068 c->dst.val = c->src.val;
987 register_address_increment(c->regs[VCPU_REGS_RSP], -c->op_bytes); 1069 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
988 c->dst.ptr = (void *) register_address(ctxt->ss_base, 1070 c->dst.ptr = (void *) register_address(c, ctxt->ss_base,
989 c->regs[VCPU_REGS_RSP]); 1071 c->regs[VCPU_REGS_RSP]);
990} 1072}
991 1073
@@ -995,13 +1077,13 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
995 struct decode_cache *c = &ctxt->decode; 1077 struct decode_cache *c = &ctxt->decode;
996 int rc; 1078 int rc;
997 1079
998 rc = ops->read_std(register_address(ctxt->ss_base, 1080 rc = ops->read_std(register_address(c, ctxt->ss_base,
999 c->regs[VCPU_REGS_RSP]), 1081 c->regs[VCPU_REGS_RSP]),
1000 &c->dst.val, c->dst.bytes, ctxt->vcpu); 1082 &c->dst.val, c->dst.bytes, ctxt->vcpu);
1001 if (rc != 0) 1083 if (rc != 0)
1002 return rc; 1084 return rc;
1003 1085
1004 register_address_increment(c->regs[VCPU_REGS_RSP], c->dst.bytes); 1086 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes);
1005 1087
1006 return 0; 1088 return 0;
1007} 1089}
@@ -1043,26 +1125,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1043 1125
1044 switch (c->modrm_reg) { 1126 switch (c->modrm_reg) {
1045 case 0 ... 1: /* test */ 1127 case 0 ... 1: /* test */
1046 /*
1047 * Special case in Grp3: test has an immediate
1048 * source operand.
1049 */
1050 c->src.type = OP_IMM;
1051 c->src.ptr = (unsigned long *)c->eip;
1052 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1053 if (c->src.bytes == 8)
1054 c->src.bytes = 4;
1055 switch (c->src.bytes) {
1056 case 1:
1057 c->src.val = insn_fetch(s8, 1, c->eip);
1058 break;
1059 case 2:
1060 c->src.val = insn_fetch(s16, 2, c->eip);
1061 break;
1062 case 4:
1063 c->src.val = insn_fetch(s32, 4, c->eip);
1064 break;
1065 }
1066 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); 1128 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1067 break; 1129 break;
1068 case 2: /* not */ 1130 case 2: /* not */
@@ -1076,7 +1138,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1076 rc = X86EMUL_UNHANDLEABLE; 1138 rc = X86EMUL_UNHANDLEABLE;
1077 break; 1139 break;
1078 } 1140 }
1079done:
1080 return rc; 1141 return rc;
1081} 1142}
1082 1143
@@ -1084,7 +1145,6 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1084 struct x86_emulate_ops *ops) 1145 struct x86_emulate_ops *ops)
1085{ 1146{
1086 struct decode_cache *c = &ctxt->decode; 1147 struct decode_cache *c = &ctxt->decode;
1087 int rc;
1088 1148
1089 switch (c->modrm_reg) { 1149 switch (c->modrm_reg) {
1090 case 0: /* inc */ 1150 case 0: /* inc */
@@ -1094,36 +1154,11 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1094 emulate_1op("dec", c->dst, ctxt->eflags); 1154 emulate_1op("dec", c->dst, ctxt->eflags);
1095 break; 1155 break;
1096 case 4: /* jmp abs */ 1156 case 4: /* jmp abs */
1097 if (c->b == 0xff) 1157 c->eip = c->src.val;
1098 c->eip = c->dst.val;
1099 else {
1100 DPRINTF("Cannot emulate %02x\n", c->b);
1101 return X86EMUL_UNHANDLEABLE;
1102 }
1103 break; 1158 break;
1104 case 6: /* push */ 1159 case 6: /* push */
1105 1160 emulate_push(ctxt);
1106 /* 64-bit mode: PUSH always pushes a 64-bit operand. */
1107
1108 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1109 c->dst.bytes = 8;
1110 rc = ops->read_std((unsigned long)c->dst.ptr,
1111 &c->dst.val, 8, ctxt->vcpu);
1112 if (rc != 0)
1113 return rc;
1114 }
1115 register_address_increment(c->regs[VCPU_REGS_RSP],
1116 -c->dst.bytes);
1117 rc = ops->write_emulated(register_address(ctxt->ss_base,
1118 c->regs[VCPU_REGS_RSP]), &c->dst.val,
1119 c->dst.bytes, ctxt->vcpu);
1120 if (rc != 0)
1121 return rc;
1122 c->dst.type = OP_NONE;
1123 break; 1161 break;
1124 default:
1125 DPRINTF("Cannot emulate %02x\n", c->b);
1126 return X86EMUL_UNHANDLEABLE;
1127 } 1162 }
1128 return 0; 1163 return 0;
1129} 1164}
@@ -1361,19 +1396,19 @@ special_insn:
1361 c->dst.type = OP_MEM; 1396 c->dst.type = OP_MEM;
1362 c->dst.bytes = c->op_bytes; 1397 c->dst.bytes = c->op_bytes;
1363 c->dst.val = c->src.val; 1398 c->dst.val = c->src.val;
1364 register_address_increment(c->regs[VCPU_REGS_RSP], 1399 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1365 -c->op_bytes); 1400 -c->op_bytes);
1366 c->dst.ptr = (void *) register_address( 1401 c->dst.ptr = (void *) register_address(
1367 ctxt->ss_base, c->regs[VCPU_REGS_RSP]); 1402 c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
1368 break; 1403 break;
1369 case 0x58 ... 0x5f: /* pop reg */ 1404 case 0x58 ... 0x5f: /* pop reg */
1370 pop_instruction: 1405 pop_instruction:
1371 if ((rc = ops->read_std(register_address(ctxt->ss_base, 1406 if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
1372 c->regs[VCPU_REGS_RSP]), c->dst.ptr, 1407 c->regs[VCPU_REGS_RSP]), c->dst.ptr,
1373 c->op_bytes, ctxt->vcpu)) != 0) 1408 c->op_bytes, ctxt->vcpu)) != 0)
1374 goto done; 1409 goto done;
1375 1410
1376 register_address_increment(c->regs[VCPU_REGS_RSP], 1411 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1377 c->op_bytes); 1412 c->op_bytes);
1378 c->dst.type = OP_NONE; /* Disable writeback. */ 1413 c->dst.type = OP_NONE; /* Disable writeback. */
1379 break; 1414 break;
@@ -1393,9 +1428,9 @@ special_insn:
1393 1, 1428 1,
1394 (c->d & ByteOp) ? 1 : c->op_bytes, 1429 (c->d & ByteOp) ? 1 : c->op_bytes,
1395 c->rep_prefix ? 1430 c->rep_prefix ?
1396 address_mask(c->regs[VCPU_REGS_RCX]) : 1, 1431 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
1397 (ctxt->eflags & EFLG_DF), 1432 (ctxt->eflags & EFLG_DF),
1398 register_address(ctxt->es_base, 1433 register_address(c, ctxt->es_base,
1399 c->regs[VCPU_REGS_RDI]), 1434 c->regs[VCPU_REGS_RDI]),
1400 c->rep_prefix, 1435 c->rep_prefix,
1401 c->regs[VCPU_REGS_RDX]) == 0) { 1436 c->regs[VCPU_REGS_RDX]) == 0) {
@@ -1409,9 +1444,9 @@ special_insn:
1409 0, 1444 0,
1410 (c->d & ByteOp) ? 1 : c->op_bytes, 1445 (c->d & ByteOp) ? 1 : c->op_bytes,
1411 c->rep_prefix ? 1446 c->rep_prefix ?
1412 address_mask(c->regs[VCPU_REGS_RCX]) : 1, 1447 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
1413 (ctxt->eflags & EFLG_DF), 1448 (ctxt->eflags & EFLG_DF),
1414 register_address(c->override_base ? 1449 register_address(c, c->override_base ?
1415 *c->override_base : 1450 *c->override_base :
1416 ctxt->ds_base, 1451 ctxt->ds_base,
1417 c->regs[VCPU_REGS_RSI]), 1452 c->regs[VCPU_REGS_RSI]),
@@ -1425,7 +1460,7 @@ special_insn:
1425 int rel = insn_fetch(s8, 1, c->eip); 1460 int rel = insn_fetch(s8, 1, c->eip);
1426 1461
1427 if (test_cc(c->b, ctxt->eflags)) 1462 if (test_cc(c->b, ctxt->eflags))
1428 JMP_REL(rel); 1463 jmp_rel(c, rel);
1429 break; 1464 break;
1430 } 1465 }
1431 case 0x80 ... 0x83: /* Grp1 */ 1466 case 0x80 ... 0x83: /* Grp1 */
@@ -1477,7 +1512,7 @@ special_insn:
1477 case 0x88 ... 0x8b: /* mov */ 1512 case 0x88 ... 0x8b: /* mov */
1478 goto mov; 1513 goto mov;
1479 case 0x8d: /* lea r16/r32, m */ 1514 case 0x8d: /* lea r16/r32, m */
1480 c->dst.val = c->modrm_val; 1515 c->dst.val = c->modrm_ea;
1481 break; 1516 break;
1482 case 0x8f: /* pop (sole member of Grp1a) */ 1517 case 0x8f: /* pop (sole member of Grp1a) */
1483 rc = emulate_grp1a(ctxt, ops); 1518 rc = emulate_grp1a(ctxt, ops);
@@ -1501,27 +1536,27 @@ special_insn:
1501 case 0xa4 ... 0xa5: /* movs */ 1536 case 0xa4 ... 0xa5: /* movs */
1502 c->dst.type = OP_MEM; 1537 c->dst.type = OP_MEM;
1503 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1538 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1504 c->dst.ptr = (unsigned long *)register_address( 1539 c->dst.ptr = (unsigned long *)register_address(c,
1505 ctxt->es_base, 1540 ctxt->es_base,
1506 c->regs[VCPU_REGS_RDI]); 1541 c->regs[VCPU_REGS_RDI]);
1507 if ((rc = ops->read_emulated(register_address( 1542 if ((rc = ops->read_emulated(register_address(c,
1508 c->override_base ? *c->override_base : 1543 c->override_base ? *c->override_base :
1509 ctxt->ds_base, 1544 ctxt->ds_base,
1510 c->regs[VCPU_REGS_RSI]), 1545 c->regs[VCPU_REGS_RSI]),
1511 &c->dst.val, 1546 &c->dst.val,
1512 c->dst.bytes, ctxt->vcpu)) != 0) 1547 c->dst.bytes, ctxt->vcpu)) != 0)
1513 goto done; 1548 goto done;
1514 register_address_increment(c->regs[VCPU_REGS_RSI], 1549 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
1515 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 1550 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
1516 : c->dst.bytes); 1551 : c->dst.bytes);
1517 register_address_increment(c->regs[VCPU_REGS_RDI], 1552 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
1518 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 1553 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
1519 : c->dst.bytes); 1554 : c->dst.bytes);
1520 break; 1555 break;
1521 case 0xa6 ... 0xa7: /* cmps */ 1556 case 0xa6 ... 0xa7: /* cmps */
1522 c->src.type = OP_NONE; /* Disable writeback. */ 1557 c->src.type = OP_NONE; /* Disable writeback. */
1523 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1558 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1524 c->src.ptr = (unsigned long *)register_address( 1559 c->src.ptr = (unsigned long *)register_address(c,
1525 c->override_base ? *c->override_base : 1560 c->override_base ? *c->override_base :
1526 ctxt->ds_base, 1561 ctxt->ds_base,
1527 c->regs[VCPU_REGS_RSI]); 1562 c->regs[VCPU_REGS_RSI]);
@@ -1533,7 +1568,7 @@ special_insn:
1533 1568
1534 c->dst.type = OP_NONE; /* Disable writeback. */ 1569 c->dst.type = OP_NONE; /* Disable writeback. */
1535 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1570 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1536 c->dst.ptr = (unsigned long *)register_address( 1571 c->dst.ptr = (unsigned long *)register_address(c,
1537 ctxt->es_base, 1572 ctxt->es_base,
1538 c->regs[VCPU_REGS_RDI]); 1573 c->regs[VCPU_REGS_RDI]);
1539 if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, 1574 if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
@@ -1546,10 +1581,10 @@ special_insn:
1546 1581
1547 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); 1582 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
1548 1583
1549 register_address_increment(c->regs[VCPU_REGS_RSI], 1584 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
1550 (ctxt->eflags & EFLG_DF) ? -c->src.bytes 1585 (ctxt->eflags & EFLG_DF) ? -c->src.bytes
1551 : c->src.bytes); 1586 : c->src.bytes);
1552 register_address_increment(c->regs[VCPU_REGS_RDI], 1587 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
1553 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 1588 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
1554 : c->dst.bytes); 1589 : c->dst.bytes);
1555 1590
@@ -1557,11 +1592,11 @@ special_insn:
1557 case 0xaa ... 0xab: /* stos */ 1592 case 0xaa ... 0xab: /* stos */
1558 c->dst.type = OP_MEM; 1593 c->dst.type = OP_MEM;
1559 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1594 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1560 c->dst.ptr = (unsigned long *)register_address( 1595 c->dst.ptr = (unsigned long *)register_address(c,
1561 ctxt->es_base, 1596 ctxt->es_base,
1562 c->regs[VCPU_REGS_RDI]); 1597 c->regs[VCPU_REGS_RDI]);
1563 c->dst.val = c->regs[VCPU_REGS_RAX]; 1598 c->dst.val = c->regs[VCPU_REGS_RAX];
1564 register_address_increment(c->regs[VCPU_REGS_RDI], 1599 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
1565 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 1600 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
1566 : c->dst.bytes); 1601 : c->dst.bytes);
1567 break; 1602 break;
@@ -1569,7 +1604,7 @@ special_insn:
1569 c->dst.type = OP_REG; 1604 c->dst.type = OP_REG;
1570 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1605 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1571 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; 1606 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
1572 if ((rc = ops->read_emulated(register_address( 1607 if ((rc = ops->read_emulated(register_address(c,
1573 c->override_base ? *c->override_base : 1608 c->override_base ? *c->override_base :
1574 ctxt->ds_base, 1609 ctxt->ds_base,
1575 c->regs[VCPU_REGS_RSI]), 1610 c->regs[VCPU_REGS_RSI]),
@@ -1577,7 +1612,7 @@ special_insn:
1577 c->dst.bytes, 1612 c->dst.bytes,
1578 ctxt->vcpu)) != 0) 1613 ctxt->vcpu)) != 0)
1579 goto done; 1614 goto done;
1580 register_address_increment(c->regs[VCPU_REGS_RSI], 1615 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
1581 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 1616 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
1582 : c->dst.bytes); 1617 : c->dst.bytes);
1583 break; 1618 break;
@@ -1616,14 +1651,14 @@ special_insn:
1616 goto cannot_emulate; 1651 goto cannot_emulate;
1617 } 1652 }
1618 c->src.val = (unsigned long) c->eip; 1653 c->src.val = (unsigned long) c->eip;
1619 JMP_REL(rel); 1654 jmp_rel(c, rel);
1620 c->op_bytes = c->ad_bytes; 1655 c->op_bytes = c->ad_bytes;
1621 emulate_push(ctxt); 1656 emulate_push(ctxt);
1622 break; 1657 break;
1623 } 1658 }
1624 case 0xe9: /* jmp rel */ 1659 case 0xe9: /* jmp rel */
1625 case 0xeb: /* jmp rel short */ 1660 case 0xeb: /* jmp rel short */
1626 JMP_REL(c->src.val); 1661 jmp_rel(c, c->src.val);
1627 c->dst.type = OP_NONE; /* Disable writeback. */ 1662 c->dst.type = OP_NONE; /* Disable writeback. */
1628 break; 1663 break;
1629 case 0xf4: /* hlt */ 1664 case 0xf4: /* hlt */
@@ -1690,6 +1725,8 @@ twobyte_insn:
1690 goto done; 1725 goto done;
1691 1726
1692 kvm_emulate_hypercall(ctxt->vcpu); 1727 kvm_emulate_hypercall(ctxt->vcpu);
1728 /* Disable writeback. */
1729 c->dst.type = OP_NONE;
1693 break; 1730 break;
1694 case 2: /* lgdt */ 1731 case 2: /* lgdt */
1695 rc = read_descriptor(ctxt, ops, c->src.ptr, 1732 rc = read_descriptor(ctxt, ops, c->src.ptr,
@@ -1697,6 +1734,8 @@ twobyte_insn:
1697 if (rc) 1734 if (rc)
1698 goto done; 1735 goto done;
1699 realmode_lgdt(ctxt->vcpu, size, address); 1736 realmode_lgdt(ctxt->vcpu, size, address);
1737 /* Disable writeback. */
1738 c->dst.type = OP_NONE;
1700 break; 1739 break;
1701 case 3: /* lidt/vmmcall */ 1740 case 3: /* lidt/vmmcall */
1702 if (c->modrm_mod == 3 && c->modrm_rm == 1) { 1741 if (c->modrm_mod == 3 && c->modrm_rm == 1) {
@@ -1712,27 +1751,25 @@ twobyte_insn:
1712 goto done; 1751 goto done;
1713 realmode_lidt(ctxt->vcpu, size, address); 1752 realmode_lidt(ctxt->vcpu, size, address);
1714 } 1753 }
1754 /* Disable writeback. */
1755 c->dst.type = OP_NONE;
1715 break; 1756 break;
1716 case 4: /* smsw */ 1757 case 4: /* smsw */
1717 if (c->modrm_mod != 3) 1758 c->dst.bytes = 2;
1718 goto cannot_emulate; 1759 c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
1719 *(u16 *)&c->regs[c->modrm_rm]
1720 = realmode_get_cr(ctxt->vcpu, 0);
1721 break; 1760 break;
1722 case 6: /* lmsw */ 1761 case 6: /* lmsw */
1723 if (c->modrm_mod != 3) 1762 realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
1724 goto cannot_emulate; 1763 &ctxt->eflags);
1725 realmode_lmsw(ctxt->vcpu, (u16)c->modrm_val,
1726 &ctxt->eflags);
1727 break; 1764 break;
1728 case 7: /* invlpg*/ 1765 case 7: /* invlpg*/
1729 emulate_invlpg(ctxt->vcpu, memop); 1766 emulate_invlpg(ctxt->vcpu, memop);
1767 /* Disable writeback. */
1768 c->dst.type = OP_NONE;
1730 break; 1769 break;
1731 default: 1770 default:
1732 goto cannot_emulate; 1771 goto cannot_emulate;
1733 } 1772 }
1734 /* Disable writeback. */
1735 c->dst.type = OP_NONE;
1736 break; 1773 break;
1737 case 0x06: 1774 case 0x06:
1738 emulate_clts(ctxt->vcpu); 1775 emulate_clts(ctxt->vcpu);
@@ -1823,7 +1860,7 @@ twobyte_insn:
1823 goto cannot_emulate; 1860 goto cannot_emulate;
1824 } 1861 }
1825 if (test_cc(c->b, ctxt->eflags)) 1862 if (test_cc(c->b, ctxt->eflags))
1826 JMP_REL(rel); 1863 jmp_rel(c, rel);
1827 c->dst.type = OP_NONE; 1864 c->dst.type = OP_NONE;
1828 break; 1865 break;
1829 } 1866 }
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 25df1c1989fe..76f60f52a885 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
11ifeq ($(CONFIG_X86_32),y) 11ifeq ($(CONFIG_X86_32),y)
12 lib-y += checksum_32.o 12 lib-y += checksum_32.o
13 lib-y += strstr_32.o 13 lib-y += strstr_32.o
14 lib-y += bitops_32.o semaphore_32.o string_32.o 14 lib-y += semaphore_32.o string_32.o
15 15
16 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o 16 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
17else 17else
@@ -21,7 +21,6 @@ else
21 21
22 lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o 22 lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
23 lib-y += thunk_64.o clear_page_64.o copy_page_64.o 23 lib-y += thunk_64.o clear_page_64.o copy_page_64.o
24 lib-y += bitops_64.o
25 lib-y += memmove_64.o memset_64.o 24 lib-y += memmove_64.o memset_64.o
26 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o 25 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
27endif 26endif
diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
deleted file mode 100644
index b65440459859..000000000000
--- a/arch/x86/lib/bitops_32.c
+++ /dev/null
@@ -1,70 +0,0 @@
1#include <linux/bitops.h>
2#include <linux/module.h>
3
4/**
5 * find_next_bit - find the next set bit in a memory region
6 * @addr: The address to base the search on
7 * @offset: The bitnumber to start searching at
8 * @size: The maximum size to search
9 */
10int find_next_bit(const unsigned long *addr, int size, int offset)
11{
12 const unsigned long *p = addr + (offset >> 5);
13 int set = 0, bit = offset & 31, res;
14
15 if (bit) {
16 /*
17 * Look for nonzero in the first 32 bits:
18 */
19 __asm__("bsfl %1,%0\n\t"
20 "jne 1f\n\t"
21 "movl $32, %0\n"
22 "1:"
23 : "=r" (set)
24 : "r" (*p >> bit));
25 if (set < (32 - bit))
26 return set + offset;
27 set = 32 - bit;
28 p++;
29 }
30 /*
31 * No set bit yet, search remaining full words for a bit
32 */
33 res = find_first_bit (p, size - 32 * (p - addr));
34 return (offset + set + res);
35}
36EXPORT_SYMBOL(find_next_bit);
37
38/**
39 * find_next_zero_bit - find the first zero bit in a memory region
40 * @addr: The address to base the search on
41 * @offset: The bitnumber to start searching at
42 * @size: The maximum size to search
43 */
44int find_next_zero_bit(const unsigned long *addr, int size, int offset)
45{
46 const unsigned long *p = addr + (offset >> 5);
47 int set = 0, bit = offset & 31, res;
48
49 if (bit) {
50 /*
51 * Look for zero in the first 32 bits.
52 */
53 __asm__("bsfl %1,%0\n\t"
54 "jne 1f\n\t"
55 "movl $32, %0\n"
56 "1:"
57 : "=r" (set)
58 : "r" (~(*p >> bit)));
59 if (set < (32 - bit))
60 return set + offset;
61 set = 32 - bit;
62 p++;
63 }
64 /*
65 * No zero yet, search remaining full bytes for a zero
66 */
67 res = find_first_zero_bit(p, size - 32 * (p - addr));
68 return (offset + set + res);
69}
70EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
deleted file mode 100644
index 0e8f491e6ccc..000000000000
--- a/arch/x86/lib/bitops_64.c
+++ /dev/null
@@ -1,175 +0,0 @@
1#include <linux/bitops.h>
2
3#undef find_first_zero_bit
4#undef find_next_zero_bit
5#undef find_first_bit
6#undef find_next_bit
7
8static inline long
9__find_first_zero_bit(const unsigned long * addr, unsigned long size)
10{
11 long d0, d1, d2;
12 long res;
13
14 /*
15 * We must test the size in words, not in bits, because
16 * otherwise incoming sizes in the range -63..-1 will not run
17 * any scasq instructions, and then the flags used by the je
18 * instruction will have whatever random value was in place
19 * before. Nobody should call us like that, but
20 * find_next_zero_bit() does when offset and size are at the
21 * same word and it fails to find a zero itself.
22 */
23 size += 63;
24 size >>= 6;
25 if (!size)
26 return 0;
27 asm volatile(
28 " repe; scasq\n"
29 " je 1f\n"
30 " xorq -8(%%rdi),%%rax\n"
31 " subq $8,%%rdi\n"
32 " bsfq %%rax,%%rdx\n"
33 "1: subq %[addr],%%rdi\n"
34 " shlq $3,%%rdi\n"
35 " addq %%rdi,%%rdx"
36 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
37 :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
38 [addr] "S" (addr) : "memory");
39 /*
40 * Any register would do for [addr] above, but GCC tends to
41 * prefer rbx over rsi, even though rsi is readily available
42 * and doesn't have to be saved.
43 */
44 return res;
45}
46
47/**
48 * find_first_zero_bit - find the first zero bit in a memory region
49 * @addr: The address to start the search at
50 * @size: The maximum size to search
51 *
52 * Returns the bit-number of the first zero bit, not the number of the byte
53 * containing a bit.
54 */
55long find_first_zero_bit(const unsigned long * addr, unsigned long size)
56{
57 return __find_first_zero_bit (addr, size);
58}
59
60/**
61 * find_next_zero_bit - find the next zero bit in a memory region
62 * @addr: The address to base the search on
63 * @offset: The bitnumber to start searching at
64 * @size: The maximum size to search
65 */
66long find_next_zero_bit (const unsigned long * addr, long size, long offset)
67{
68 const unsigned long * p = addr + (offset >> 6);
69 unsigned long set = 0;
70 unsigned long res, bit = offset&63;
71
72 if (bit) {
73 /*
74 * Look for zero in first word
75 */
76 asm("bsfq %1,%0\n\t"
77 "cmoveq %2,%0"
78 : "=r" (set)
79 : "r" (~(*p >> bit)), "r"(64L));
80 if (set < (64 - bit))
81 return set + offset;
82 set = 64 - bit;
83 p++;
84 }
85 /*
86 * No zero yet, search remaining full words for a zero
87 */
88 res = __find_first_zero_bit (p, size - 64 * (p - addr));
89
90 return (offset + set + res);
91}
92
93static inline long
94__find_first_bit(const unsigned long * addr, unsigned long size)
95{
96 long d0, d1;
97 long res;
98
99 /*
100 * We must test the size in words, not in bits, because
101 * otherwise incoming sizes in the range -63..-1 will not run
102 * any scasq instructions, and then the flags used by the jz
103 * instruction will have whatever random value was in place
104 * before. Nobody should call us like that, but
105 * find_next_bit() does when offset and size are at the same
106 * word and it fails to find a one itself.
107 */
108 size += 63;
109 size >>= 6;
110 if (!size)
111 return 0;
112 asm volatile(
113 " repe; scasq\n"
114 " jz 1f\n"
115 " subq $8,%%rdi\n"
116 " bsfq (%%rdi),%%rax\n"
117 "1: subq %[addr],%%rdi\n"
118 " shlq $3,%%rdi\n"
119 " addq %%rdi,%%rax"
120 :"=a" (res), "=&c" (d0), "=&D" (d1)
121 :"0" (0ULL), "1" (size), "2" (addr),
122 [addr] "r" (addr) : "memory");
123 return res;
124}
125
126/**
127 * find_first_bit - find the first set bit in a memory region
128 * @addr: The address to start the search at
129 * @size: The maximum size to search
130 *
131 * Returns the bit-number of the first set bit, not the number of the byte
132 * containing a bit.
133 */
134long find_first_bit(const unsigned long * addr, unsigned long size)
135{
136 return __find_first_bit(addr,size);
137}
138
139/**
140 * find_next_bit - find the first set bit in a memory region
141 * @addr: The address to base the search on
142 * @offset: The bitnumber to start searching at
143 * @size: The maximum size to search
144 */
145long find_next_bit(const unsigned long * addr, long size, long offset)
146{
147 const unsigned long * p = addr + (offset >> 6);
148 unsigned long set = 0, bit = offset & 63, res;
149
150 if (bit) {
151 /*
152 * Look for nonzero in the first 64 bits:
153 */
154 asm("bsfq %1,%0\n\t"
155 "cmoveq %2,%0\n\t"
156 : "=r" (set)
157 : "r" (*p >> bit), "r" (64L));
158 if (set < (64 - bit))
159 return set + offset;
160 set = 64 - bit;
161 p++;
162 }
163 /*
164 * No set bit yet, search remaining full words for a bit
165 */
166 res = __find_first_bit (p, size - 64 * (p - addr));
167 return (offset + set + res);
168}
169
170#include <linux/module.h>
171
172EXPORT_SYMBOL(find_next_bit);
173EXPORT_SYMBOL(find_first_bit);
174EXPORT_SYMBOL(find_first_zero_bit);
175EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 6e2c4efce0ef..8acbf0cdf1a5 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -113,7 +113,7 @@ static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
113 for_each_online_cpu(cpu) { 113 for_each_online_cpu(cpu) {
114 if (cpuset & (1 << cpu)) { 114 if (cpuset & (1 << cpu)) {
115#ifdef VOYAGER_DEBUG 115#ifdef VOYAGER_DEBUG
116 if (!cpu_isset(cpu, cpu_online_map)) 116 if (!cpu_online(cpu))
117 VDEBUG(("CPU%d sending cpi %d to CPU%d not in " 117 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
118 "cpu_online_map\n", 118 "cpu_online_map\n",
119 hard_smp_processor_id(), cpi, cpu)); 119 hard_smp_processor_id(), cpi, cpu));
@@ -683,9 +683,9 @@ void __init smp_boot_cpus(void)
683 * Code added from smpboot.c */ 683 * Code added from smpboot.c */
684 { 684 {
685 unsigned long bogosum = 0; 685 unsigned long bogosum = 0;
686 for (i = 0; i < NR_CPUS; i++) 686
687 if (cpu_isset(i, cpu_online_map)) 687 for_each_online_cpu(i)
688 bogosum += cpu_data(i).loops_per_jiffy; 688 bogosum += cpu_data(i).loops_per_jiffy;
689 printk(KERN_INFO "Total of %d processors activated " 689 printk(KERN_INFO "Total of %d processors activated "
690 "(%lu.%02lu BogoMIPS).\n", 690 "(%lu.%02lu BogoMIPS).\n",
691 cpucount + 1, bogosum / (500000 / HZ), 691 cpucount + 1, bogosum / (500000 / HZ),
@@ -1838,7 +1838,7 @@ static int __cpuinit voyager_cpu_up(unsigned int cpu)
1838 return -EIO; 1838 return -EIO;
1839 /* Unleash the CPU! */ 1839 /* Unleash the CPU! */
1840 cpu_set(cpu, smp_commenced_mask); 1840 cpu_set(cpu, smp_commenced_mask);
1841 while (!cpu_isset(cpu, cpu_online_map)) 1841 while (!cpu_online(cpu))
1842 mb(); 1842 mb();
1843 return 0; 1843 return 0;
1844} 1844}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index baf7c4f643c8..4a4761892951 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -566,9 +566,9 @@ void __init paging_init(void)
566 566
567/* 567/*
568 * Test if the WP bit works in supervisor mode. It isn't supported on 386's 568 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
569 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This 569 * and also on some strange 486's. All 586+'s are OK. This used to involve
570 * used to involve black magic jumps to work around some nasty CPU bugs, 570 * black magic jumps to work around some nasty CPU bugs, but fortunately the
571 * but fortunately the switch to using exceptions got rid of all that. 571 * switch to using exceptions got rid of all that.
572 */ 572 */
573static void __init test_wp_bit(void) 573static void __init test_wp_bit(void)
574{ 574{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0cca62663037..5fbb8652cf59 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -810,7 +810,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
810void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 810void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
811{ 811{
812#ifdef CONFIG_NUMA 812#ifdef CONFIG_NUMA
813 int nid = phys_to_nid(phys); 813 int nid, next_nid;
814#endif 814#endif
815 unsigned long pfn = phys >> PAGE_SHIFT; 815 unsigned long pfn = phys >> PAGE_SHIFT;
816 816
@@ -829,10 +829,16 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
829 829
830 /* Should check here against the e820 map to avoid double free */ 830 /* Should check here against the e820 map to avoid double free */
831#ifdef CONFIG_NUMA 831#ifdef CONFIG_NUMA
832 reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT); 832 nid = phys_to_nid(phys);
833 next_nid = phys_to_nid(phys + len - 1);
834 if (nid == next_nid)
835 reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
836 else
837 reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
833#else 838#else
834 reserve_bootmem(phys, len, BOOTMEM_DEFAULT); 839 reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
835#endif 840#endif
841
836 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { 842 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
837 dma_reserve += len / PAGE_SIZE; 843 dma_reserve += len / PAGE_SIZE;
838 set_dma_reserve(dma_reserve); 844 set_dma_reserve(dma_reserve);
@@ -926,6 +932,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
926/* 932/*
927 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 933 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
928 */ 934 */
935static long __meminitdata addr_start, addr_end;
936static void __meminitdata *p_start, *p_end;
937static int __meminitdata node_start;
938
929int __meminit 939int __meminit
930vmemmap_populate(struct page *start_page, unsigned long size, int node) 940vmemmap_populate(struct page *start_page, unsigned long size, int node)
931{ 941{
@@ -960,12 +970,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
960 PAGE_KERNEL_LARGE); 970 PAGE_KERNEL_LARGE);
961 set_pmd(pmd, __pmd(pte_val(entry))); 971 set_pmd(pmd, __pmd(pte_val(entry)));
962 972
963 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", 973 /* check to see if we have contiguous blocks */
964 addr, addr + PMD_SIZE - 1, p, node); 974 if (p_end != p || node_start != node) {
975 if (p_start)
976 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
977 addr_start, addr_end-1, p_start, p_end-1, node_start);
978 addr_start = addr;
979 node_start = node;
980 p_start = p;
981 }
982 addr_end = addr + PMD_SIZE;
983 p_end = p + PMD_SIZE;
965 } else { 984 } else {
966 vmemmap_verify((pte_t *)pmd, node, addr, next); 985 vmemmap_verify((pte_t *)pmd, node, addr, next);
967 } 986 }
968 } 987 }
969 return 0; 988 return 0;
970} 989}
990
991void __meminit vmemmap_populate_print_last(void)
992{
993 if (p_start) {
994 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
995 addr_start, addr_end-1, p_start, p_end-1, node_start);
996 p_start = NULL;
997 p_end = NULL;
998 node_start = 0;
999 }
1000}
971#endif 1001#endif
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 9a6892200b27..c5066d519e5d 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -196,6 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
196 unsigned long bootmap_start, nodedata_phys; 196 unsigned long bootmap_start, nodedata_phys;
197 void *bootmap; 197 void *bootmap;
198 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 198 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
199 int nid;
199 200
200 start = round_up(start, ZONE_ALIGN); 201 start = round_up(start, ZONE_ALIGN);
201 202
@@ -218,9 +219,19 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
218 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 219 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
219 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; 220 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
220 221
221 /* Find a place for the bootmem map */ 222 /*
223 * Find a place for the bootmem map
224 * nodedata_phys could be on other nodes by alloc_bootmem,
225 * so need to sure bootmap_start not to be small, otherwise
226 * early_node_mem will get that with find_e820_area instead
227 * of alloc_bootmem, that could clash with reserved range
228 */
222 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 229 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
223 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 230 nid = phys_to_nid(nodedata_phys);
231 if (nid == nodeid)
232 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
233 else
234 bootmap_start = round_up(start, PAGE_SIZE);
224 /* 235 /*
225 * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like 236 * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like
226 * to use that to align to PAGE_SIZE 237 * to use that to align to PAGE_SIZE
@@ -245,10 +256,29 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
245 256
246 free_bootmem_with_active_regions(nodeid, end); 257 free_bootmem_with_active_regions(nodeid, end);
247 258
248 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size, 259 /*
249 BOOTMEM_DEFAULT); 260 * convert early reserve to bootmem reserve earlier
250 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, 261 * otherwise early_node_mem could use early reserved mem
251 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT); 262 * on previous node
263 */
264 early_res_to_bootmem(start, end);
265
266 /*
267 * in some case early_node_mem could use alloc_bootmem
268 * to get range on other node, don't reserve that again
269 */
270 if (nid != nodeid)
271 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
272 else
273 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
274 pgdat_size, BOOTMEM_DEFAULT);
275 nid = phys_to_nid(bootmap_start);
276 if (nid != nodeid)
277 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
278 else
279 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
280 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
281
252#ifdef CONFIG_ACPI_NUMA 282#ifdef CONFIG_ACPI_NUMA
253 srat_reserve_add_area(nodeid); 283 srat_reserve_add_area(nodeid);
254#endif 284#endif
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ef8b64b89c7d..e7ca7fc48d12 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -16,6 +16,7 @@
16#include <asm/msr.h> 16#include <asm/msr.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/page.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/pat.h> 21#include <asm/pat.h>
21#include <asm/e820.h> 22#include <asm/e820.h>
@@ -334,7 +335,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
334 break; 335 break;
335 } 336 }
336 337
337 printk("Overlap at 0x%Lx-0x%Lx\n", 338 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
338 saved_ptr->start, saved_ptr->end); 339 saved_ptr->start, saved_ptr->end);
339 /* No conflict. Go ahead and add this new entry */ 340 /* No conflict. Go ahead and add this new entry */
340 list_add(&new_entry->nd, saved_ptr->nd.prev); 341 list_add(&new_entry->nd, saved_ptr->nd.prev);
@@ -477,6 +478,33 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
477 return vma_prot; 478 return vma_prot;
478} 479}
479 480
481#ifdef CONFIG_NONPROMISC_DEVMEM
482/* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
483static inline int range_is_allowed(unsigned long pfn, unsigned long size)
484{
485 return 1;
486}
487#else
488static inline int range_is_allowed(unsigned long pfn, unsigned long size)
489{
490 u64 from = ((u64)pfn) << PAGE_SHIFT;
491 u64 to = from + size;
492 u64 cursor = from;
493
494 while (cursor < to) {
495 if (!devmem_is_allowed(pfn)) {
496 printk(KERN_INFO
497 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
498 current->comm, from, to);
499 return 0;
500 }
501 cursor += PAGE_SIZE;
502 pfn++;
503 }
504 return 1;
505}
506#endif /* CONFIG_NONPROMISC_DEVMEM */
507
480int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 508int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
481 unsigned long size, pgprot_t *vma_prot) 509 unsigned long size, pgprot_t *vma_prot)
482{ 510{
@@ -485,6 +513,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
485 unsigned long ret_flags; 513 unsigned long ret_flags;
486 int retval; 514 int retval;
487 515
516 if (!range_is_allowed(pfn, size))
517 return 0;
518
488 if (file->f_flags & O_SYNC) { 519 if (file->f_flags & O_SYNC) {
489 flags = _PAGE_CACHE_UC; 520 flags = _PAGE_CACHE_UC;
490 } 521 }
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 92dd3dbf3ffb..94e69000f982 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -193,7 +193,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
193 193
194 /* Restrict the possible_map according to max_cpus. */ 194 /* Restrict the possible_map according to max_cpus. */
195 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 195 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
196 for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) 196 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
197 continue; 197 continue;
198 cpu_clear(cpu, cpu_possible_map); 198 cpu_clear(cpu, cpu_possible_map);
199 } 199 }
diff --git a/block/bsg.c b/block/bsg.c
index f51172ed27c2..23ea4fd1a66d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -699,14 +699,26 @@ static struct bsg_device *bsg_alloc_device(void)
699 return bd; 699 return bd;
700} 700}
701 701
702static void bsg_kref_release_function(struct kref *kref)
703{
704 struct bsg_class_device *bcd =
705 container_of(kref, struct bsg_class_device, ref);
706
707 if (bcd->release)
708 bcd->release(bcd->parent);
709
710 put_device(bcd->parent);
711}
712
702static int bsg_put_device(struct bsg_device *bd) 713static int bsg_put_device(struct bsg_device *bd)
703{ 714{
704 int ret = 0; 715 int ret = 0, do_free;
705 struct device *dev = bd->queue->bsg_dev.dev; 716 struct request_queue *q = bd->queue;
706 717
707 mutex_lock(&bsg_mutex); 718 mutex_lock(&bsg_mutex);
708 719
709 if (!atomic_dec_and_test(&bd->ref_count)) 720 do_free = atomic_dec_and_test(&bd->ref_count);
721 if (!do_free)
710 goto out; 722 goto out;
711 723
712 dprintk("%s: tearing down\n", bd->name); 724 dprintk("%s: tearing down\n", bd->name);
@@ -723,12 +735,13 @@ static int bsg_put_device(struct bsg_device *bd)
723 */ 735 */
724 ret = bsg_complete_all_commands(bd); 736 ret = bsg_complete_all_commands(bd);
725 737
726 blk_put_queue(bd->queue);
727 hlist_del(&bd->dev_list); 738 hlist_del(&bd->dev_list);
728 kfree(bd); 739 kfree(bd);
729out: 740out:
730 mutex_unlock(&bsg_mutex); 741 mutex_unlock(&bsg_mutex);
731 put_device(dev); 742 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
743 if (do_free)
744 blk_put_queue(q);
732 return ret; 745 return ret;
733} 746}
734 747
@@ -796,7 +809,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
796 mutex_lock(&bsg_mutex); 809 mutex_lock(&bsg_mutex);
797 bcd = idr_find(&bsg_minor_idr, iminor(inode)); 810 bcd = idr_find(&bsg_minor_idr, iminor(inode));
798 if (bcd) 811 if (bcd)
799 get_device(bcd->dev); 812 kref_get(&bcd->ref);
800 mutex_unlock(&bsg_mutex); 813 mutex_unlock(&bsg_mutex);
801 814
802 if (!bcd) 815 if (!bcd)
@@ -808,7 +821,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
808 821
809 bd = bsg_add_device(inode, bcd->queue, file); 822 bd = bsg_add_device(inode, bcd->queue, file);
810 if (IS_ERR(bd)) 823 if (IS_ERR(bd))
811 put_device(bcd->dev); 824 kref_put(&bcd->ref, bsg_kref_release_function);
812 825
813 return bd; 826 return bd;
814} 827}
@@ -947,14 +960,14 @@ void bsg_unregister_queue(struct request_queue *q)
947 idr_remove(&bsg_minor_idr, bcd->minor); 960 idr_remove(&bsg_minor_idr, bcd->minor);
948 sysfs_remove_link(&q->kobj, "bsg"); 961 sysfs_remove_link(&q->kobj, "bsg");
949 device_unregister(bcd->class_dev); 962 device_unregister(bcd->class_dev);
950 put_device(bcd->dev);
951 bcd->class_dev = NULL; 963 bcd->class_dev = NULL;
964 kref_put(&bcd->ref, bsg_kref_release_function);
952 mutex_unlock(&bsg_mutex); 965 mutex_unlock(&bsg_mutex);
953} 966}
954EXPORT_SYMBOL_GPL(bsg_unregister_queue); 967EXPORT_SYMBOL_GPL(bsg_unregister_queue);
955 968
956int bsg_register_queue(struct request_queue *q, struct device *gdev, 969int bsg_register_queue(struct request_queue *q, struct device *parent,
957 const char *name) 970 const char *name, void (*release)(struct device *))
958{ 971{
959 struct bsg_class_device *bcd; 972 struct bsg_class_device *bcd;
960 dev_t dev; 973 dev_t dev;
@@ -965,7 +978,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
965 if (name) 978 if (name)
966 devname = name; 979 devname = name;
967 else 980 else
968 devname = gdev->bus_id; 981 devname = parent->bus_id;
969 982
970 /* 983 /*
971 * we need a proper transport to send commands, not a stacked device 984 * we need a proper transport to send commands, not a stacked device
@@ -996,9 +1009,11 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
996 1009
997 bcd->minor = minor; 1010 bcd->minor = minor;
998 bcd->queue = q; 1011 bcd->queue = q;
999 bcd->dev = get_device(gdev); 1012 bcd->parent = get_device(parent);
1013 bcd->release = release;
1014 kref_init(&bcd->ref);
1000 dev = MKDEV(bsg_major, bcd->minor); 1015 dev = MKDEV(bsg_major, bcd->minor);
1001 class_dev = device_create(bsg_class, gdev, dev, "%s", devname); 1016 class_dev = device_create(bsg_class, parent, dev, "%s", devname);
1002 if (IS_ERR(class_dev)) { 1017 if (IS_ERR(class_dev)) {
1003 ret = PTR_ERR(class_dev); 1018 ret = PTR_ERR(class_dev);
1004 goto put_dev; 1019 goto put_dev;
@@ -1017,7 +1032,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
1017unregister_class_dev: 1032unregister_class_dev:
1018 device_unregister(class_dev); 1033 device_unregister(class_dev);
1019put_dev: 1034put_dev:
1020 put_device(gdev); 1035 put_device(parent);
1021remove_idr: 1036remove_idr:
1022 idr_remove(&bsg_minor_idr, minor); 1037 idr_remove(&bsg_minor_idr, minor);
1023unlock: 1038unlock:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 788da9781f80..0d90ff5fd117 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void)
418 418
419 cx = pr->power.state; 419 cx = pr->power.state;
420 if (!cx || acpi_idle_suspend) { 420 if (!cx || acpi_idle_suspend) {
421 if (pm_idle_save) 421 if (pm_idle_save) {
422 pm_idle_save(); 422 pm_idle_save(); /* enables IRQs */
423 else 423 } else {
424 acpi_safe_halt(); 424 acpi_safe_halt();
425
426 if (irqs_disabled())
427 local_irq_enable(); 425 local_irq_enable();
426 }
428 427
429 return; 428 return;
430 } 429 }
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void)
520 * Use the appropriate idle routine, the one that would 519 * Use the appropriate idle routine, the one that would
521 * be used without acpi C-states. 520 * be used without acpi C-states.
522 */ 521 */
523 if (pm_idle_save) 522 if (pm_idle_save) {
524 pm_idle_save(); 523 pm_idle_save(); /* enables IRQs */
525 else 524 } else {
526 acpi_safe_halt(); 525 acpi_safe_halt();
526 local_irq_enable();
527 }
527 528
528 /* 529 /*
529 * TBD: Can't get time duration while in C1, as resumes 530 * TBD: Can't get time duration while in C1, as resumes
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void)
534 * skew otherwise. 535 * skew otherwise.
535 */ 536 */
536 sleep_ticks = 0xFFFFFFFF; 537 sleep_ticks = 0xFFFFFFFF;
537 if (irqs_disabled())
538 local_irq_enable();
539 538
540 break; 539 break;
541 540
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index d28669992147..96bdb9296b07 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -436,8 +436,9 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
436 system controller may experience noise due to strong drive strengths 436 system controller may experience noise due to strong drive strengths
437 */ 437 */
438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { 438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
439 u8 cap_ptr=0;
440 struct pci_dev *gfxcard=NULL; 439 struct pci_dev *gfxcard=NULL;
440
441 cap_ptr = 0;
441 while (!cap_ptr) { 442 while (!cap_ptr) {
442 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); 443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
443 if (!gfxcard) { 444 if (!gfxcard) {
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 55d7a82bd071..857b26227d87 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -967,7 +967,7 @@ int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
967 return 0; 967 return 0;
968} 968}
969 969
970static int agp_ioctl(struct inode *inode, struct file *file, 970static long agp_ioctl(struct file *file,
971 unsigned int cmd, unsigned long arg) 971 unsigned int cmd, unsigned long arg)
972{ 972{
973 struct agp_file_private *curr_priv = file->private_data; 973 struct agp_file_private *curr_priv = file->private_data;
@@ -1058,7 +1058,7 @@ static const struct file_operations agp_fops =
1058 .llseek = no_llseek, 1058 .llseek = no_llseek,
1059 .read = agp_read, 1059 .read = agp_read,
1060 .write = agp_write, 1060 .write = agp_write,
1061 .ioctl = agp_ioctl, 1061 .unlocked_ioctl = agp_ioctl,
1062#ifdef CONFIG_COMPAT 1062#ifdef CONFIG_COMPAT
1063 .compat_ioctl = compat_agp_ioctl, 1063 .compat_ioctl = compat_agp_ioctl,
1064#endif 1064#endif
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 141f4dfa0a11..b710426bab3e 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -167,13 +167,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
167 page_base += ATI_PCIGART_PAGE_SIZE; 167 page_base += ATI_PCIGART_PAGE_SIZE;
168 } 168 }
169 } 169 }
170
171 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
172 dma_sync_single_for_device(&dev->pdev->dev,
173 bus_address,
174 max_pages * sizeof(u32),
175 PCI_DMA_TODEVICE);
176
177 ret = 1; 170 ret = 1;
178 171
179#if defined(__i386__) || defined(__x86_64__) 172#if defined(__i386__) || defined(__x86_64__)
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 3a05c6d5ebe1..6874f31ca8ca 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -471,6 +471,7 @@ struct drm_irq_busid {
471enum drm_vblank_seq_type { 471enum drm_vblank_seq_type {
472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
474 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 475 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
475 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 476 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
476 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ 477 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
@@ -503,6 +504,21 @@ union drm_wait_vblank {
503 struct drm_wait_vblank_reply reply; 504 struct drm_wait_vblank_reply reply;
504}; 505};
505 506
507enum drm_modeset_ctl_cmd {
508 _DRM_PRE_MODESET = 1,
509 _DRM_POST_MODESET = 2,
510};
511
512/**
513 * DRM_IOCTL_MODESET_CTL ioctl argument type
514 *
515 * \sa drmModesetCtl().
516 */
517struct drm_modeset_ctl {
518 unsigned long arg;
519 enum drm_modeset_ctl_cmd cmd;
520};
521
506/** 522/**
507 * DRM_IOCTL_AGP_ENABLE ioctl argument type. 523 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
508 * 524 *
@@ -587,6 +603,7 @@ struct drm_set_version {
587#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 603#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
588#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 604#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
589#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 605#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
606#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
590 607
591#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 608#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
592#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 609#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 6540948d5176..ecee3547a13f 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -100,10 +100,8 @@ struct drm_device;
100#define DRIVER_HAVE_DMA 0x20 100#define DRIVER_HAVE_DMA 0x20
101#define DRIVER_HAVE_IRQ 0x40 101#define DRIVER_HAVE_IRQ 0x40
102#define DRIVER_IRQ_SHARED 0x80 102#define DRIVER_IRQ_SHARED 0x80
103#define DRIVER_IRQ_VBL 0x100
104#define DRIVER_DMA_QUEUE 0x200 103#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400 104#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800
107 105
108/***********************************************************************/ 106/***********************************************************************/
109/** \name Begin the DRM... */ 107/** \name Begin the DRM... */
@@ -379,13 +377,12 @@ struct drm_buf_entry {
379struct drm_file { 377struct drm_file {
380 int authenticated; 378 int authenticated;
381 int master; 379 int master;
382 int minor;
383 pid_t pid; 380 pid_t pid;
384 uid_t uid; 381 uid_t uid;
385 drm_magic_t magic; 382 drm_magic_t magic;
386 unsigned long ioctl_count; 383 unsigned long ioctl_count;
387 struct list_head lhead; 384 struct list_head lhead;
388 struct drm_head *head; 385 struct drm_minor *minor;
389 int remove_auth_on_close; 386 int remove_auth_on_close;
390 unsigned long lock_count; 387 unsigned long lock_count;
391 struct file *filp; 388 struct file *filp;
@@ -580,10 +577,52 @@ struct drm_driver {
580 int (*context_dtor) (struct drm_device *dev, int context); 577 int (*context_dtor) (struct drm_device *dev, int context);
581 int (*kernel_context_switch) (struct drm_device *dev, int old, 578 int (*kernel_context_switch) (struct drm_device *dev, int old,
582 int new); 579 int new);
583 void (*kernel_context_switch_unlock) (struct drm_device *dev); 580 void (*kernel_context_switch_unlock) (struct drm_device * dev);
584 int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence); 581 /**
585 int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence); 582 * get_vblank_counter - get raw hardware vblank counter
586 int (*dri_library_name) (struct drm_device *dev, char *buf); 583 * @dev: DRM device
584 * @crtc: counter to fetch
585 *
586 * Driver callback for fetching a raw hardware vblank counter
587 * for @crtc. If a device doesn't have a hardware counter, the
588 * driver can simply return the value of drm_vblank_count and
589 * make the enable_vblank() and disable_vblank() hooks into no-ops,
590 * leaving interrupts enabled at all times.
591 *
592 * Wraparound handling and loss of events due to modesetting is dealt
593 * with in the DRM core code.
594 *
595 * RETURNS
596 * Raw vblank counter value.
597 */
598 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
599
600 /**
601 * enable_vblank - enable vblank interrupt events
602 * @dev: DRM device
603 * @crtc: which irq to enable
604 *
605 * Enable vblank interrupts for @crtc. If the device doesn't have
606 * a hardware vblank counter, this routine should be a no-op, since
607 * interrupts will have to stay on to keep the count accurate.
608 *
609 * RETURNS
610 * Zero on success, appropriate errno if the given @crtc's vblank
611 * interrupt cannot be enabled.
612 */
613 int (*enable_vblank) (struct drm_device *dev, int crtc);
614
615 /**
616 * disable_vblank - disable vblank interrupt events
617 * @dev: DRM device
618 * @crtc: which irq to enable
619 *
620 * Disable vblank interrupts for @crtc. If the device doesn't have
621 * a hardware vblank counter, this routine should be a no-op, since
622 * interrupts will have to stay on to keep the count accurate.
623 */
624 void (*disable_vblank) (struct drm_device *dev, int crtc);
625 int (*dri_library_name) (struct drm_device *dev, char * buf);
587 626
588 /** 627 /**
589 * Called by \c drm_device_is_agp. Typically used to determine if a 628 * Called by \c drm_device_is_agp. Typically used to determine if a
@@ -602,7 +641,7 @@ struct drm_driver {
602 641
603 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 642 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
604 void (*irq_preinstall) (struct drm_device *dev); 643 void (*irq_preinstall) (struct drm_device *dev);
605 void (*irq_postinstall) (struct drm_device *dev); 644 int (*irq_postinstall) (struct drm_device *dev);
606 void (*irq_uninstall) (struct drm_device *dev); 645 void (*irq_uninstall) (struct drm_device *dev);
607 void (*reclaim_buffers) (struct drm_device *dev, 646 void (*reclaim_buffers) (struct drm_device *dev,
608 struct drm_file * file_priv); 647 struct drm_file * file_priv);
@@ -630,16 +669,19 @@ struct drm_driver {
630 struct pci_driver pci_driver; 669 struct pci_driver pci_driver;
631}; 670};
632 671
672#define DRM_MINOR_UNASSIGNED 0
673#define DRM_MINOR_LEGACY 1
674
633/** 675/**
634 * DRM head structure. This structure represent a video head on a card 676 * DRM minor structure. This structure represents a drm minor number.
635 * that may contain multiple heads. Embed one per head of these in the
636 * private drm_device structure.
637 */ 677 */
638struct drm_head { 678struct drm_minor {
639 int minor; /**< Minor device number */ 679 int index; /**< Minor device number */
680 int type; /**< Control or render */
681 dev_t device; /**< Device number for mknod */
682 struct device kdev; /**< Linux device */
640 struct drm_device *dev; 683 struct drm_device *dev;
641 struct proc_dir_entry *dev_root; /**< proc directory entry */ 684 struct proc_dir_entry *dev_root; /**< proc directory entry */
642 dev_t device; /**< Device number for mknod */
643}; 685};
644 686
645/** 687/**
@@ -647,7 +689,6 @@ struct drm_head {
647 * may contain multiple heads. 689 * may contain multiple heads.
648 */ 690 */
649struct drm_device { 691struct drm_device {
650 struct device dev; /**< Linux device */
651 char *unique; /**< Unique identifier: e.g., busid */ 692 char *unique; /**< Unique identifier: e.g., busid */
652 int unique_len; /**< Length of unique field */ 693 int unique_len; /**< Length of unique field */
653 char *devname; /**< For /proc/interrupts */ 694 char *devname; /**< For /proc/interrupts */
@@ -729,13 +770,21 @@ struct drm_device {
729 /** \name VBLANK IRQ support */ 770 /** \name VBLANK IRQ support */
730 /*@{ */ 771 /*@{ */
731 772
732 wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ 773 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
733 atomic_t vbl_received; 774 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
734 atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
735 spinlock_t vbl_lock; 775 spinlock_t vbl_lock;
736 struct list_head vbl_sigs; /**< signal list to send on VBLANK */ 776 struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
737 struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ 777 atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
738 unsigned int vbl_pending; 778 atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
779 u32 *last_vblank; /* protected by dev->vbl_lock, used */
780 /* for wraparound handling */
781 u32 *vblank_offset; /* used to track how many vblanks */
782 int *vblank_enabled; /* so we don't call enable more than
783 once per disable */
784 u32 *vblank_premodeset; /* were lost during modeset */
785 struct timer_list vblank_disable_timer;
786
787 unsigned long max_vblank_count; /**< size of vblank counter register */
739 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 788 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
740 void (*locked_tasklet_func)(struct drm_device *dev); 789 void (*locked_tasklet_func)(struct drm_device *dev);
741 790
@@ -755,6 +804,7 @@ struct drm_device {
755#ifdef __alpha__ 804#ifdef __alpha__
756 struct pci_controller *hose; 805 struct pci_controller *hose;
757#endif 806#endif
807 int num_crtcs; /**< Number of CRTCs on this device */
758 struct drm_sg_mem *sg; /**< Scatter gather memory */ 808 struct drm_sg_mem *sg; /**< Scatter gather memory */
759 void *dev_private; /**< device private data */ 809 void *dev_private; /**< device private data */
760 struct drm_sigdata sigdata; /**< For block_all_signals */ 810 struct drm_sigdata sigdata; /**< For block_all_signals */
@@ -763,7 +813,7 @@ struct drm_device {
763 struct drm_driver *driver; 813 struct drm_driver *driver;
764 drm_local_map_t *agp_buffer_map; 814 drm_local_map_t *agp_buffer_map;
765 unsigned int agp_buffer_token; 815 unsigned int agp_buffer_token;
766 struct drm_head primary; /**< primary screen head */ 816 struct drm_minor *primary; /**< render type primary screen head */
767 817
768 /** \name Drawable information */ 818 /** \name Drawable information */
769 /*@{ */ 819 /*@{ */
@@ -989,11 +1039,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
989extern void drm_driver_irq_postinstall(struct drm_device *dev); 1039extern void drm_driver_irq_postinstall(struct drm_device *dev);
990extern void drm_driver_irq_uninstall(struct drm_device *dev); 1040extern void drm_driver_irq_uninstall(struct drm_device *dev);
991 1041
992extern int drm_wait_vblank(struct drm_device *dev, void *data, 1042extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
993 struct drm_file *file_priv); 1043extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
994extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1044extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
995extern void drm_vbl_send_signals(struct drm_device *dev);
996extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1045extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
1046extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1047extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
1048extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1049extern int drm_vblank_get(struct drm_device *dev, int crtc);
1050extern void drm_vblank_put(struct drm_device *dev, int crtc);
1051
1052 /* Modesetting support */
1053extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv);
997 1055
998 /* AGP/GART support (drm_agpsupport.h) */ 1056 /* AGP/GART support (drm_agpsupport.h) */
999extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1057extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
@@ -1030,23 +1088,20 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1030extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1088extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
1031 struct drm_driver *driver); 1089 struct drm_driver *driver);
1032extern int drm_put_dev(struct drm_device *dev); 1090extern int drm_put_dev(struct drm_device *dev);
1033extern int drm_put_head(struct drm_head *head); 1091extern int drm_put_minor(struct drm_minor **minor);
1034extern unsigned int drm_debug; 1092extern unsigned int drm_debug;
1035extern unsigned int drm_cards_limit; 1093
1036extern struct drm_head **drm_heads;
1037extern struct class *drm_class; 1094extern struct class *drm_class;
1038extern struct proc_dir_entry *drm_proc_root; 1095extern struct proc_dir_entry *drm_proc_root;
1039 1096
1097extern struct idr drm_minors_idr;
1098
1040extern drm_local_map_t *drm_getsarea(struct drm_device *dev); 1099extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
1041 1100
1042 /* Proc support (drm_proc.h) */ 1101 /* Proc support (drm_proc.h) */
1043extern int drm_proc_init(struct drm_device *dev, 1102extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1044 int minor, 1103 struct proc_dir_entry *root);
1045 struct proc_dir_entry *root, 1104extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1046 struct proc_dir_entry **dev_root);
1047extern int drm_proc_cleanup(int minor,
1048 struct proc_dir_entry *root,
1049 struct proc_dir_entry *dev_root);
1050 1105
1051 /* Scatter Gather Support (drm_scatter.h) */ 1106 /* Scatter Gather Support (drm_scatter.h) */
1052extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1107extern void drm_sg_cleanup(struct drm_sg_mem * entry);
@@ -1071,8 +1126,8 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1071struct drm_sysfs_class; 1126struct drm_sysfs_class;
1072extern struct class *drm_sysfs_create(struct module *owner, char *name); 1127extern struct class *drm_sysfs_create(struct module *owner, char *name);
1073extern void drm_sysfs_destroy(void); 1128extern void drm_sysfs_destroy(void);
1074extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head); 1129extern int drm_sysfs_device_add(struct drm_minor *minor);
1075extern void drm_sysfs_device_remove(struct drm_device *dev); 1130extern void drm_sysfs_device_remove(struct drm_minor *minor);
1076 1131
1077/* 1132/*
1078 * Basic memory manager support (drm_mm.c) 1133 * Basic memory manager support (drm_mm.c)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 9468c7889ff1..aefa5ac4c0b1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -122,7 +122,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv) 123 struct drm_file *file_priv)
124{ 124{
125 return drm_agp_acquire((struct drm_device *) file_priv->head->dev); 125 return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
126} 126}
127 127
128/** 128/**
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 0e7af53c87de..fc54140551a7 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -313,35 +313,36 @@ static void drm_cleanup(struct drm_device * dev)
313 drm_ht_remove(&dev->map_hash); 313 drm_ht_remove(&dev->map_hash);
314 drm_ctxbitmap_cleanup(dev); 314 drm_ctxbitmap_cleanup(dev);
315 315
316 drm_put_head(&dev->primary); 316 drm_put_minor(&dev->primary);
317 if (drm_put_dev(dev)) 317 if (drm_put_dev(dev))
318 DRM_ERROR("Cannot unload module\n"); 318 DRM_ERROR("Cannot unload module\n");
319} 319}
320 320
321void drm_exit(struct drm_driver *driver) 321int drm_minors_cleanup(int id, void *ptr, void *data)
322{ 322{
323 int i; 323 struct drm_minor *minor = ptr;
324 struct drm_device *dev = NULL; 324 struct drm_device *dev;
325 struct drm_head *head; 325 struct drm_driver *driver = data;
326
327 dev = minor->dev;
328 if (minor->dev->driver != driver)
329 return 0;
330
331 if (minor->type != DRM_MINOR_LEGACY)
332 return 0;
326 333
334 if (dev)
335 pci_dev_put(dev->pdev);
336 drm_cleanup(dev);
337 return 1;
338}
339
340void drm_exit(struct drm_driver *driver)
341{
327 DRM_DEBUG("\n"); 342 DRM_DEBUG("\n");
328 343
329 for (i = 0; i < drm_cards_limit; i++) { 344 idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
330 head = drm_heads[i]; 345
331 if (!head)
332 continue;
333 if (!head->dev)
334 continue;
335 if (head->dev->driver != driver)
336 continue;
337 dev = head->dev;
338 if (dev) {
339 /* release the pci driver */
340 if (dev->pdev)
341 pci_dev_put(dev->pdev);
342 drm_cleanup(dev);
343 }
344 }
345 DRM_INFO("Module unloaded\n"); 346 DRM_INFO("Module unloaded\n");
346} 347}
347 348
@@ -357,13 +358,7 @@ static int __init drm_core_init(void)
357{ 358{
358 int ret = -ENOMEM; 359 int ret = -ENOMEM;
359 360
360 drm_cards_limit = 361 idr_init(&drm_minors_idr);
361 (drm_cards_limit <
362 DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
363 drm_heads =
364 drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
365 if (!drm_heads)
366 goto err_p1;
367 362
368 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 363 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
369 goto err_p1; 364 goto err_p1;
@@ -391,7 +386,8 @@ err_p3:
391 drm_sysfs_destroy(); 386 drm_sysfs_destroy();
392err_p2: 387err_p2:
393 unregister_chrdev(DRM_MAJOR, "drm"); 388 unregister_chrdev(DRM_MAJOR, "drm");
394 drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); 389
390 idr_destroy(&drm_minors_idr);
395err_p1: 391err_p1:
396 return ret; 392 return ret;
397} 393}
@@ -403,7 +399,7 @@ static void __exit drm_core_exit(void)
403 399
404 unregister_chrdev(DRM_MAJOR, "drm"); 400 unregister_chrdev(DRM_MAJOR, "drm");
405 401
406 drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); 402 idr_destroy(&drm_minors_idr);
407} 403}
408 404
409module_init(drm_core_init); 405module_init(drm_core_init);
@@ -452,7 +448,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
452 unsigned int cmd, unsigned long arg) 448 unsigned int cmd, unsigned long arg)
453{ 449{
454 struct drm_file *file_priv = filp->private_data; 450 struct drm_file *file_priv = filp->private_data;
455 struct drm_device *dev = file_priv->head->dev; 451 struct drm_device *dev = file_priv->minor->dev;
456 struct drm_ioctl_desc *ioctl; 452 struct drm_ioctl_desc *ioctl;
457 drm_ioctl_t *func; 453 drm_ioctl_t *func;
458 unsigned int nr = DRM_IOCTL_NR(cmd); 454 unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -465,7 +461,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
465 461
466 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", 462 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
467 task_pid_nr(current), cmd, nr, 463 task_pid_nr(current), cmd, nr,
468 (long)old_encode_dev(file_priv->head->device), 464 (long)old_encode_dev(file_priv->minor->device),
469 file_priv->authenticated); 465 file_priv->authenticated);
470 466
471 if ((nr >= DRM_CORE_IOCTL_COUNT) && 467 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index f09d4b5002b0..68f0da801ed8 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -129,16 +129,15 @@ static int drm_setup(struct drm_device * dev)
129int drm_open(struct inode *inode, struct file *filp) 129int drm_open(struct inode *inode, struct file *filp)
130{ 130{
131 struct drm_device *dev = NULL; 131 struct drm_device *dev = NULL;
132 int minor = iminor(inode); 132 int minor_id = iminor(inode);
133 struct drm_minor *minor;
133 int retcode = 0; 134 int retcode = 0;
134 135
135 if (!((minor >= 0) && (minor < drm_cards_limit))) 136 minor = idr_find(&drm_minors_idr, minor_id);
137 if (!minor)
136 return -ENODEV; 138 return -ENODEV;
137 139
138 if (!drm_heads[minor]) 140 if (!(dev = minor->dev))
139 return -ENODEV;
140
141 if (!(dev = drm_heads[minor]->dev))
142 return -ENODEV; 141 return -ENODEV;
143 142
144 retcode = drm_open_helper(inode, filp, dev); 143 retcode = drm_open_helper(inode, filp, dev);
@@ -168,19 +167,18 @@ EXPORT_SYMBOL(drm_open);
168int drm_stub_open(struct inode *inode, struct file *filp) 167int drm_stub_open(struct inode *inode, struct file *filp)
169{ 168{
170 struct drm_device *dev = NULL; 169 struct drm_device *dev = NULL;
171 int minor = iminor(inode); 170 struct drm_minor *minor;
171 int minor_id = iminor(inode);
172 int err = -ENODEV; 172 int err = -ENODEV;
173 const struct file_operations *old_fops; 173 const struct file_operations *old_fops;
174 174
175 DRM_DEBUG("\n"); 175 DRM_DEBUG("\n");
176 176
177 if (!((minor >= 0) && (minor < drm_cards_limit))) 177 minor = idr_find(&drm_minors_idr, minor_id);
178 return -ENODEV; 178 if (!minor)
179
180 if (!drm_heads[minor])
181 return -ENODEV; 179 return -ENODEV;
182 180
183 if (!(dev = drm_heads[minor]->dev)) 181 if (!(dev = minor->dev))
184 return -ENODEV; 182 return -ENODEV;
185 183
186 old_fops = filp->f_op; 184 old_fops = filp->f_op;
@@ -225,7 +223,7 @@ static int drm_cpu_valid(void)
225static int drm_open_helper(struct inode *inode, struct file *filp, 223static int drm_open_helper(struct inode *inode, struct file *filp,
226 struct drm_device * dev) 224 struct drm_device * dev)
227{ 225{
228 int minor = iminor(inode); 226 int minor_id = iminor(inode);
229 struct drm_file *priv; 227 struct drm_file *priv;
230 int ret; 228 int ret;
231 229
@@ -234,7 +232,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
234 if (!drm_cpu_valid()) 232 if (!drm_cpu_valid())
235 return -EINVAL; 233 return -EINVAL;
236 234
237 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor); 235 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
238 236
239 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 237 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
240 if (!priv) 238 if (!priv)
@@ -245,8 +243,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
245 priv->filp = filp; 243 priv->filp = filp;
246 priv->uid = current->euid; 244 priv->uid = current->euid;
247 priv->pid = task_pid_nr(current); 245 priv->pid = task_pid_nr(current);
248 priv->minor = minor; 246 priv->minor = idr_find(&drm_minors_idr, minor_id);
249 priv->head = drm_heads[minor];
250 priv->ioctl_count = 0; 247 priv->ioctl_count = 0;
251 /* for compatibility root is always authenticated */ 248 /* for compatibility root is always authenticated */
252 priv->authenticated = capable(CAP_SYS_ADMIN); 249 priv->authenticated = capable(CAP_SYS_ADMIN);
@@ -297,11 +294,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
297int drm_fasync(int fd, struct file *filp, int on) 294int drm_fasync(int fd, struct file *filp, int on)
298{ 295{
299 struct drm_file *priv = filp->private_data; 296 struct drm_file *priv = filp->private_data;
300 struct drm_device *dev = priv->head->dev; 297 struct drm_device *dev = priv->minor->dev;
301 int retcode; 298 int retcode;
302 299
303 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, 300 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
304 (long)old_encode_dev(priv->head->device)); 301 (long)old_encode_dev(priv->minor->device));
305 retcode = fasync_helper(fd, filp, on, &dev->buf_async); 302 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
306 if (retcode < 0) 303 if (retcode < 0)
307 return retcode; 304 return retcode;
@@ -324,7 +321,7 @@ EXPORT_SYMBOL(drm_fasync);
324int drm_release(struct inode *inode, struct file *filp) 321int drm_release(struct inode *inode, struct file *filp)
325{ 322{
326 struct drm_file *file_priv = filp->private_data; 323 struct drm_file *file_priv = filp->private_data;
327 struct drm_device *dev = file_priv->head->dev; 324 struct drm_device *dev = file_priv->minor->dev;
328 int retcode = 0; 325 int retcode = 0;
329 unsigned long irqflags; 326 unsigned long irqflags;
330 327
@@ -341,14 +338,14 @@ int drm_release(struct inode *inode, struct file *filp)
341 338
342 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 339 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
343 task_pid_nr(current), 340 task_pid_nr(current),
344 (long)old_encode_dev(file_priv->head->device), 341 (long)old_encode_dev(file_priv->minor->device),
345 dev->open_count); 342 dev->open_count);
346 343
347 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 344 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
348 if (drm_i_have_hw_lock(dev, file_priv)) { 345 if (drm_i_have_hw_lock(dev, file_priv)) {
349 dev->driver->reclaim_buffers_locked(dev, file_priv); 346 dev->driver->reclaim_buffers_locked(dev, file_priv);
350 } else { 347 } else {
351 unsigned long _end=jiffies + 3*DRM_HZ; 348 unsigned long endtime = jiffies + 3 * DRM_HZ;
352 int locked = 0; 349 int locked = 0;
353 350
354 drm_idlelock_take(&dev->lock); 351 drm_idlelock_take(&dev->lock);
@@ -366,7 +363,7 @@ int drm_release(struct inode *inode, struct file *filp)
366 if (locked) 363 if (locked)
367 break; 364 break;
368 schedule(); 365 schedule();
369 } while (!time_after_eq(jiffies, _end)); 366 } while (!time_after_eq(jiffies, endtime));
370 367
371 if (!locked) { 368 if (!locked) {
372 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" 369 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 089c015c01d1..286f9d61e7d5 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -71,6 +71,117 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
71 return 0; 71 return 0;
72} 72}
73 73
74static void vblank_disable_fn(unsigned long arg)
75{
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
78 int i;
79
80 for (i = 0; i < dev->num_crtcs; i++) {
81 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83 dev->vblank_enabled[i]) {
84 dev->driver->disable_vblank(dev, i);
85 dev->vblank_enabled[i] = 0;
86 }
87 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
88 }
89}
90
91static void drm_vblank_cleanup(struct drm_device *dev)
92{
93 /* Bail if the driver didn't call drm_vblank_init() */
94 if (dev->num_crtcs == 0)
95 return;
96
97 del_timer(&dev->vblank_disable_timer);
98
99 vblank_disable_fn((unsigned long)dev);
100
101 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
102 DRM_MEM_DRIVER);
103 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
104 DRM_MEM_DRIVER);
105 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
106 dev->num_crtcs, DRM_MEM_DRIVER);
107 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
108 dev->num_crtcs, DRM_MEM_DRIVER);
109 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
110 dev->num_crtcs, DRM_MEM_DRIVER);
111 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
112 DRM_MEM_DRIVER);
113 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
116 DRM_MEM_DRIVER);
117
118 dev->num_crtcs = 0;
119}
120
121int drm_vblank_init(struct drm_device *dev, int num_crtcs)
122{
123 int i, ret = -ENOMEM;
124
125 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
126 (unsigned long)dev);
127 spin_lock_init(&dev->vbl_lock);
128 atomic_set(&dev->vbl_signal_pending, 0);
129 dev->num_crtcs = num_crtcs;
130
131 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
132 DRM_MEM_DRIVER);
133 if (!dev->vbl_queue)
134 goto err;
135
136 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
137 DRM_MEM_DRIVER);
138 if (!dev->vbl_sigs)
139 goto err;
140
141 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
142 DRM_MEM_DRIVER);
143 if (!dev->_vblank_count)
144 goto err;
145
146 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
147 DRM_MEM_DRIVER);
148 if (!dev->vblank_refcount)
149 goto err;
150
151 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
152 DRM_MEM_DRIVER);
153 if (!dev->vblank_enabled)
154 goto err;
155
156 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
157 if (!dev->last_vblank)
158 goto err;
159
160 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
161 DRM_MEM_DRIVER);
162 if (!dev->vblank_premodeset)
163 goto err;
164
165 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
166 if (!dev->vblank_offset)
167 goto err;
168
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
175 }
176
177 return 0;
178
179err:
180 drm_vblank_cleanup(dev);
181 return ret;
182}
183EXPORT_SYMBOL(drm_vblank_init);
184
74/** 185/**
75 * Install IRQ handler. 186 * Install IRQ handler.
76 * 187 *
@@ -109,17 +220,6 @@ static int drm_irq_install(struct drm_device * dev)
109 220
110 DRM_DEBUG("irq=%d\n", dev->irq); 221 DRM_DEBUG("irq=%d\n", dev->irq);
111 222
112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 init_waitqueue_head(&dev->vbl_queue);
114
115 spin_lock_init(&dev->vbl_lock);
116
117 INIT_LIST_HEAD(&dev->vbl_sigs);
118 INIT_LIST_HEAD(&dev->vbl_sigs2);
119
120 dev->vbl_pending = 0;
121 }
122
123 /* Before installing handler */ 223 /* Before installing handler */
124 dev->driver->irq_preinstall(dev); 224 dev->driver->irq_preinstall(dev);
125 225
@@ -137,9 +237,14 @@ static int drm_irq_install(struct drm_device * dev)
137 } 237 }
138 238
139 /* After installing handler */ 239 /* After installing handler */
140 dev->driver->irq_postinstall(dev); 240 ret = dev->driver->irq_postinstall(dev);
241 if (ret < 0) {
242 mutex_lock(&dev->struct_mutex);
243 dev->irq_enabled = 0;
244 mutex_unlock(&dev->struct_mutex);
245 }
141 246
142 return 0; 247 return ret;
143} 248}
144 249
145/** 250/**
@@ -170,6 +275,8 @@ int drm_irq_uninstall(struct drm_device * dev)
170 275
171 free_irq(dev->irq, dev); 276 free_irq(dev->irq, dev);
172 277
278 drm_vblank_cleanup(dev);
279
173 dev->locked_tasklet_func = NULL; 280 dev->locked_tasklet_func = NULL;
174 281
175 return 0; 282 return 0;
@@ -214,6 +321,148 @@ int drm_control(struct drm_device *dev, void *data,
214} 321}
215 322
216/** 323/**
324 * drm_vblank_count - retrieve "cooked" vblank counter value
325 * @dev: DRM device
326 * @crtc: which counter to retrieve
327 *
328 * Fetches the "cooked" vblank count value that represents the number of
329 * vblank events since the system was booted, including lost events due to
330 * modesetting activity.
331 */
332u32 drm_vblank_count(struct drm_device *dev, int crtc)
333{
334 return atomic_read(&dev->_vblank_count[crtc]) +
335 dev->vblank_offset[crtc];
336}
337EXPORT_SYMBOL(drm_vblank_count);
338
339/**
340 * drm_update_vblank_count - update the master vblank counter
341 * @dev: DRM device
342 * @crtc: counter to update
343 *
344 * Call back into the driver to update the appropriate vblank counter
345 * (specified by @crtc). Deal with wraparound, if it occurred, and
346 * update the last read value so we can deal with wraparound on the next
347 * call if necessary.
348 */
349void drm_update_vblank_count(struct drm_device *dev, int crtc)
350{
351 unsigned long irqflags;
352 u32 cur_vblank, diff;
353
354 /*
355 * Interrupts were disabled prior to this call, so deal with counter
356 * wrap if needed.
357 * NOTE! It's possible we lost a full dev->max_vblank_count events
358 * here if the register is small or we had vblank interrupts off for
359 * a long time.
360 */
361 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
362 spin_lock_irqsave(&dev->vbl_lock, irqflags);
363 if (cur_vblank < dev->last_vblank[crtc]) {
364 diff = dev->max_vblank_count -
365 dev->last_vblank[crtc];
366 diff += cur_vblank;
367 } else {
368 diff = cur_vblank - dev->last_vblank[crtc];
369 }
370 dev->last_vblank[crtc] = cur_vblank;
371 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
372
373 atomic_add(diff, &dev->_vblank_count[crtc]);
374}
375EXPORT_SYMBOL(drm_update_vblank_count);
376
377/**
378 * drm_vblank_get - get a reference count on vblank events
379 * @dev: DRM device
380 * @crtc: which CRTC to own
381 *
382 * Acquire a reference count on vblank events to avoid having them disabled
383 * while in use. Note callers will probably want to update the master counter
384 * using drm_update_vblank_count() above before calling this routine so that
385 * wakeups occur on the right vblank event.
386 *
387 * RETURNS
388 * Zero on success, nonzero on failure.
389 */
390int drm_vblank_get(struct drm_device *dev, int crtc)
391{
392 unsigned long irqflags;
393 int ret = 0;
394
395 spin_lock_irqsave(&dev->vbl_lock, irqflags);
396 /* Going from 0->1 means we have to enable interrupts again */
397 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
398 !dev->vblank_enabled[crtc]) {
399 ret = dev->driver->enable_vblank(dev, crtc);
400 if (ret)
401 atomic_dec(&dev->vblank_refcount[crtc]);
402 else
403 dev->vblank_enabled[crtc] = 1;
404 }
405 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
406
407 return ret;
408}
409EXPORT_SYMBOL(drm_vblank_get);
410
411/**
412 * drm_vblank_put - give up ownership of vblank events
413 * @dev: DRM device
414 * @crtc: which counter to give up
415 *
416 * Release ownership of a given vblank counter, turning off interrupts
417 * if possible.
418 */
419void drm_vblank_put(struct drm_device *dev, int crtc)
420{
421 /* Last user schedules interrupt disable */
422 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
423 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
424}
425EXPORT_SYMBOL(drm_vblank_put);
426
427/**
428 * drm_modeset_ctl - handle vblank event counter changes across mode switch
429 * @DRM_IOCTL_ARGS: standard ioctl arguments
430 *
431 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
432 * ioctls around modesetting so that any lost vblank events are accounted for.
433 */
434int drm_modeset_ctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_modeset_ctl *modeset = data;
438 int crtc, ret = 0;
439 u32 new;
440
441 crtc = modeset->arg;
442 if (crtc >= dev->num_crtcs) {
443 ret = -EINVAL;
444 goto out;
445 }
446
447 switch (modeset->cmd) {
448 case _DRM_PRE_MODESET:
449 dev->vblank_premodeset[crtc] =
450 dev->driver->get_vblank_counter(dev, crtc);
451 break;
452 case _DRM_POST_MODESET:
453 new = dev->driver->get_vblank_counter(dev, crtc);
454 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
455 break;
456 default:
457 ret = -EINVAL;
458 break;
459 }
460
461out:
462 return ret;
463}
464
465/**
217 * Wait for VBLANK. 466 * Wait for VBLANK.
218 * 467 *
219 * \param inode device inode. 468 * \param inode device inode.
@@ -232,12 +481,13 @@ int drm_control(struct drm_device *dev, void *data,
232 * 481 *
233 * If a signal is not requested, then calls vblank_wait(). 482 * If a signal is not requested, then calls vblank_wait().
234 */ 483 */
235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 484int drm_wait_vblank(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
236{ 486{
237 union drm_wait_vblank *vblwait = data; 487 union drm_wait_vblank *vblwait = data;
238 struct timeval now; 488 struct timeval now;
239 int ret = 0; 489 int ret = 0;
240 unsigned int flags, seq; 490 unsigned int flags, seq, crtc;
241 491
242 if ((!dev->irq) || (!dev->irq_enabled)) 492 if ((!dev->irq) || (!dev->irq_enabled))
243 return -EINVAL; 493 return -EINVAL;
@@ -251,13 +501,13 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
251 } 501 }
252 502
253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 503 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
504 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
254 505
255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 506 if (crtc >= dev->num_crtcs)
256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
257 return -EINVAL; 507 return -EINVAL;
258 508
259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 509 drm_update_vblank_count(dev, crtc);
260 : &dev->vbl_received); 510 seq = drm_vblank_count(dev, crtc);
261 511
262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 512 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
263 case _DRM_VBLANK_RELATIVE: 513 case _DRM_VBLANK_RELATIVE:
@@ -276,8 +526,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
276 526
277 if (flags & _DRM_VBLANK_SIGNAL) { 527 if (flags & _DRM_VBLANK_SIGNAL) {
278 unsigned long irqflags; 528 unsigned long irqflags;
279 struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) 529 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
280 ? &dev->vbl_sigs2 : &dev->vbl_sigs;
281 struct drm_vbl_sig *vbl_sig; 530 struct drm_vbl_sig *vbl_sig;
282 531
283 spin_lock_irqsave(&dev->vbl_lock, irqflags); 532 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +547,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
298 } 547 }
299 } 548 }
300 549
301 if (dev->vbl_pending >= 100) { 550 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 551 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
303 return -EBUSY; 552 return -EBUSY;
304 } 553 }
305 554
306 dev->vbl_pending++;
307
308 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 555 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
309 556
310 if (! 557 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
311 (vbl_sig = 558 DRM_MEM_DRIVER);
312 drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { 559 if (!vbl_sig)
313 return -ENOMEM; 560 return -ENOMEM;
561
562 ret = drm_vblank_get(dev, crtc);
563 if (ret) {
564 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
565 DRM_MEM_DRIVER);
566 return ret;
314 } 567 }
315 568
316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 569 atomic_inc(&dev->vbl_signal_pending);
317 570
318 vbl_sig->sequence = vblwait->request.sequence; 571 vbl_sig->sequence = vblwait->request.sequence;
319 vbl_sig->info.si_signo = vblwait->request.signal; 572 vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,17 +580,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
327 580
328 vblwait->reply.sequence = seq; 581 vblwait->reply.sequence = seq;
329 } else { 582 } else {
330 if (flags & _DRM_VBLANK_SECONDARY) { 583 unsigned long cur_vblank;
331 if (dev->driver->vblank_wait2) 584
332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 585 ret = drm_vblank_get(dev, crtc);
333 } else if (dev->driver->vblank_wait) 586 if (ret)
334 ret = 587 return ret;
335 dev->driver->vblank_wait(dev, 588 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
336 &vblwait->request.sequence); 589 (((cur_vblank = drm_vblank_count(dev, crtc))
337 590 - vblwait->request.sequence) <= (1 << 23)));
591 drm_vblank_put(dev, crtc);
338 do_gettimeofday(&now); 592 do_gettimeofday(&now);
593
339 vblwait->reply.tval_sec = now.tv_sec; 594 vblwait->reply.tval_sec = now.tv_sec;
340 vblwait->reply.tval_usec = now.tv_usec; 595 vblwait->reply.tval_usec = now.tv_usec;
596 vblwait->reply.sequence = cur_vblank;
341 } 597 }
342 598
343 done: 599 done:
@@ -348,44 +604,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
348 * Send the VBLANK signals. 604 * Send the VBLANK signals.
349 * 605 *
350 * \param dev DRM device. 606 * \param dev DRM device.
607 * \param crtc CRTC where the vblank event occurred
351 * 608 *
352 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 609 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
353 * 610 *
354 * If a signal is not requested, then calls vblank_wait(). 611 * If a signal is not requested, then calls vblank_wait().
355 */ 612 */
356void drm_vbl_send_signals(struct drm_device * dev) 613static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
357{ 614{
615 struct drm_vbl_sig *vbl_sig, *tmp;
616 struct list_head *vbl_sigs;
617 unsigned int vbl_seq;
358 unsigned long flags; 618 unsigned long flags;
359 int i;
360 619
361 spin_lock_irqsave(&dev->vbl_lock, flags); 620 spin_lock_irqsave(&dev->vbl_lock, flags);
362 621
363 for (i = 0; i < 2; i++) { 622 vbl_sigs = &dev->vbl_sigs[crtc];
364 struct drm_vbl_sig *vbl_sig, *tmp; 623 vbl_seq = drm_vblank_count(dev, crtc);
365 struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
366 unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
367 &dev->vbl_received);
368 624
369 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 625 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
370 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 626 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
371 vbl_sig->info.si_code = vbl_seq; 627 vbl_sig->info.si_code = vbl_seq;
372 send_sig_info(vbl_sig->info.si_signo, 628 send_sig_info(vbl_sig->info.si_signo,
373 &vbl_sig->info, vbl_sig->task); 629 &vbl_sig->info, vbl_sig->task);
374 630
375 list_del(&vbl_sig->head); 631 list_del(&vbl_sig->head);
376 632
377 drm_free(vbl_sig, sizeof(*vbl_sig), 633 drm_free(vbl_sig, sizeof(*vbl_sig),
378 DRM_MEM_DRIVER); 634 DRM_MEM_DRIVER);
379 635 atomic_dec(&dev->vbl_signal_pending);
380 dev->vbl_pending--; 636 drm_vblank_put(dev, crtc);
381 } 637 }
382 }
383 } 638 }
384 639
385 spin_unlock_irqrestore(&dev->vbl_lock, flags); 640 spin_unlock_irqrestore(&dev->vbl_lock, flags);
386} 641}
387 642
388EXPORT_SYMBOL(drm_vbl_send_signals); 643/**
644 * drm_handle_vblank - handle a vblank event
645 * @dev: DRM device
646 * @crtc: where this event occurred
647 *
648 * Drivers should call this routine in their vblank interrupt handlers to
649 * update the vblank counter and send any signals that may be pending.
650 */
651void drm_handle_vblank(struct drm_device *dev, int crtc)
652{
653 drm_update_vblank_count(dev, crtc);
654 DRM_WAKEUP(&dev->vbl_queue[crtc]);
655 drm_vbl_send_signals(dev, crtc);
656}
657EXPORT_SYMBOL(drm_handle_vblank);
389 658
390/** 659/**
391 * Tasklet wrapper function. 660 * Tasklet wrapper function.
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index d9b560fe9bbe..93b1e0475c93 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -87,34 +87,35 @@ static struct drm_proc_list {
87 * "/proc/dri/%minor%/", and each entry in proc_list as 87 * "/proc/dri/%minor%/", and each entry in proc_list as
88 * "/proc/dri/%minor%/%name%". 88 * "/proc/dri/%minor%/%name%".
89 */ 89 */
90int drm_proc_init(struct drm_device * dev, int minor, 90int drm_proc_init(struct drm_minor *minor, int minor_id,
91 struct proc_dir_entry *root, struct proc_dir_entry **dev_root) 91 struct proc_dir_entry *root)
92{ 92{
93 struct proc_dir_entry *ent; 93 struct proc_dir_entry *ent;
94 int i, j; 94 int i, j;
95 char name[64]; 95 char name[64];
96 96
97 sprintf(name, "%d", minor); 97 sprintf(name, "%d", minor_id);
98 *dev_root = proc_mkdir(name, root); 98 minor->dev_root = proc_mkdir(name, root);
99 if (!*dev_root) { 99 if (!minor->dev_root) {
100 DRM_ERROR("Cannot create /proc/dri/%s\n", name); 100 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
101 return -1; 101 return -1;
102 } 102 }
103 103
104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 104 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
105 ent = create_proc_entry(drm_proc_list[i].name, 105 ent = create_proc_entry(drm_proc_list[i].name,
106 S_IFREG | S_IRUGO, *dev_root); 106 S_IFREG | S_IRUGO, minor->dev_root);
107 if (!ent) { 107 if (!ent) {
108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
109 name, drm_proc_list[i].name); 109 name, drm_proc_list[i].name);
110 for (j = 0; j < i; j++) 110 for (j = 0; j < i; j++)
111 remove_proc_entry(drm_proc_list[i].name, 111 remove_proc_entry(drm_proc_list[i].name,
112 *dev_root); 112 minor->dev_root);
113 remove_proc_entry(name, root); 113 remove_proc_entry(name, root);
114 minor->dev_root = NULL;
114 return -1; 115 return -1;
115 } 116 }
116 ent->read_proc = drm_proc_list[i].f; 117 ent->read_proc = drm_proc_list[i].f;
117 ent->data = dev; 118 ent->data = minor;
118 } 119 }
119 120
120 return 0; 121 return 0;
@@ -130,18 +131,17 @@ int drm_proc_init(struct drm_device * dev, int minor,
130 * 131 *
131 * Remove all proc entries created by proc_init(). 132 * Remove all proc entries created by proc_init().
132 */ 133 */
133int drm_proc_cleanup(int minor, struct proc_dir_entry *root, 134int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
134 struct proc_dir_entry *dev_root)
135{ 135{
136 int i; 136 int i;
137 char name[64]; 137 char name[64];
138 138
139 if (!root || !dev_root) 139 if (!root || !minor->dev_root)
140 return 0; 140 return 0;
141 141
142 for (i = 0; i < DRM_PROC_ENTRIES; i++) 142 for (i = 0; i < DRM_PROC_ENTRIES; i++)
143 remove_proc_entry(drm_proc_list[i].name, dev_root); 143 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
144 sprintf(name, "%d", minor); 144 sprintf(name, "%d", minor->index);
145 remove_proc_entry(name, root); 145 remove_proc_entry(name, root);
146 146
147 return 0; 147 return 0;
@@ -163,7 +163,8 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
163static int drm_name_info(char *buf, char **start, off_t offset, int request, 163static int drm_name_info(char *buf, char **start, off_t offset, int request,
164 int *eof, void *data) 164 int *eof, void *data)
165{ 165{
166 struct drm_device *dev = (struct drm_device *) data; 166 struct drm_minor *minor = (struct drm_minor *) data;
167 struct drm_device *dev = minor->dev;
167 int len = 0; 168 int len = 0;
168 169
169 if (offset > DRM_PROC_LIMIT) { 170 if (offset > DRM_PROC_LIMIT) {
@@ -205,7 +206,8 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
205static int drm__vm_info(char *buf, char **start, off_t offset, int request, 206static int drm__vm_info(char *buf, char **start, off_t offset, int request,
206 int *eof, void *data) 207 int *eof, void *data)
207{ 208{
208 struct drm_device *dev = (struct drm_device *) data; 209 struct drm_minor *minor = (struct drm_minor *) data;
210 struct drm_device *dev = minor->dev;
209 int len = 0; 211 int len = 0;
210 struct drm_map *map; 212 struct drm_map *map;
211 struct drm_map_list *r_list; 213 struct drm_map_list *r_list;
@@ -261,7 +263,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
261static int drm_vm_info(char *buf, char **start, off_t offset, int request, 263static int drm_vm_info(char *buf, char **start, off_t offset, int request,
262 int *eof, void *data) 264 int *eof, void *data)
263{ 265{
264 struct drm_device *dev = (struct drm_device *) data; 266 struct drm_minor *minor = (struct drm_minor *) data;
267 struct drm_device *dev = minor->dev;
265 int ret; 268 int ret;
266 269
267 mutex_lock(&dev->struct_mutex); 270 mutex_lock(&dev->struct_mutex);
@@ -284,7 +287,8 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
284static int drm__queues_info(char *buf, char **start, off_t offset, 287static int drm__queues_info(char *buf, char **start, off_t offset,
285 int request, int *eof, void *data) 288 int request, int *eof, void *data)
286{ 289{
287 struct drm_device *dev = (struct drm_device *) data; 290 struct drm_minor *minor = (struct drm_minor *) data;
291 struct drm_device *dev = minor->dev;
288 int len = 0; 292 int len = 0;
289 int i; 293 int i;
290 struct drm_queue *q; 294 struct drm_queue *q;
@@ -334,7 +338,8 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
334static int drm_queues_info(char *buf, char **start, off_t offset, int request, 338static int drm_queues_info(char *buf, char **start, off_t offset, int request,
335 int *eof, void *data) 339 int *eof, void *data)
336{ 340{
337 struct drm_device *dev = (struct drm_device *) data; 341 struct drm_minor *minor = (struct drm_minor *) data;
342 struct drm_device *dev = minor->dev;
338 int ret; 343 int ret;
339 344
340 mutex_lock(&dev->struct_mutex); 345 mutex_lock(&dev->struct_mutex);
@@ -357,7 +362,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
357static int drm__bufs_info(char *buf, char **start, off_t offset, int request, 362static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
358 int *eof, void *data) 363 int *eof, void *data)
359{ 364{
360 struct drm_device *dev = (struct drm_device *) data; 365 struct drm_minor *minor = (struct drm_minor *) data;
366 struct drm_device *dev = minor->dev;
361 int len = 0; 367 int len = 0;
362 struct drm_device_dma *dma = dev->dma; 368 struct drm_device_dma *dma = dev->dma;
363 int i; 369 int i;
@@ -406,7 +412,8 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
406static int drm_bufs_info(char *buf, char **start, off_t offset, int request, 412static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
407 int *eof, void *data) 413 int *eof, void *data)
408{ 414{
409 struct drm_device *dev = (struct drm_device *) data; 415 struct drm_minor *minor = (struct drm_minor *) data;
416 struct drm_device *dev = minor->dev;
410 int ret; 417 int ret;
411 418
412 mutex_lock(&dev->struct_mutex); 419 mutex_lock(&dev->struct_mutex);
@@ -429,7 +436,8 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
429static int drm__clients_info(char *buf, char **start, off_t offset, 436static int drm__clients_info(char *buf, char **start, off_t offset,
430 int request, int *eof, void *data) 437 int request, int *eof, void *data)
431{ 438{
432 struct drm_device *dev = (struct drm_device *) data; 439 struct drm_minor *minor = (struct drm_minor *) data;
440 struct drm_device *dev = minor->dev;
433 int len = 0; 441 int len = 0;
434 struct drm_file *priv; 442 struct drm_file *priv;
435 443
@@ -445,7 +453,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
445 list_for_each_entry(priv, &dev->filelist, lhead) { 453 list_for_each_entry(priv, &dev->filelist, lhead) {
446 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", 454 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
447 priv->authenticated ? 'y' : 'n', 455 priv->authenticated ? 'y' : 'n',
448 priv->minor, 456 priv->minor->index,
449 priv->pid, 457 priv->pid,
450 priv->uid, priv->magic, priv->ioctl_count); 458 priv->uid, priv->magic, priv->ioctl_count);
451 } 459 }
@@ -462,7 +470,8 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
462static int drm_clients_info(char *buf, char **start, off_t offset, 470static int drm_clients_info(char *buf, char **start, off_t offset,
463 int request, int *eof, void *data) 471 int request, int *eof, void *data)
464{ 472{
465 struct drm_device *dev = (struct drm_device *) data; 473 struct drm_minor *minor = (struct drm_minor *) data;
474 struct drm_device *dev = minor->dev;
466 int ret; 475 int ret;
467 476
468 mutex_lock(&dev->struct_mutex); 477 mutex_lock(&dev->struct_mutex);
@@ -476,7 +485,8 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
476static int drm__vma_info(char *buf, char **start, off_t offset, int request, 485static int drm__vma_info(char *buf, char **start, off_t offset, int request,
477 int *eof, void *data) 486 int *eof, void *data)
478{ 487{
479 struct drm_device *dev = (struct drm_device *) data; 488 struct drm_minor *minor = (struct drm_minor *) data;
489 struct drm_device *dev = minor->dev;
480 int len = 0; 490 int len = 0;
481 struct drm_vma_entry *pt; 491 struct drm_vma_entry *pt;
482 struct vm_area_struct *vma; 492 struct vm_area_struct *vma;
@@ -535,7 +545,8 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
535static int drm_vma_info(char *buf, char **start, off_t offset, int request, 545static int drm_vma_info(char *buf, char **start, off_t offset, int request,
536 int *eof, void *data) 546 int *eof, void *data)
537{ 547{
538 struct drm_device *dev = (struct drm_device *) data; 548 struct drm_minor *minor = (struct drm_minor *) data;
549 struct drm_device *dev = minor->dev;
539 int ret; 550 int ret;
540 551
541 mutex_lock(&dev->struct_mutex); 552 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index d93a217f856a..c2f584f3b46c 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -36,23 +36,49 @@
36#include "drmP.h" 36#include "drmP.h"
37#include "drm_core.h" 37#include "drm_core.h"
38 38
39unsigned int drm_cards_limit = 16; /* Enough for one machine */
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 39unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 40EXPORT_SYMBOL(drm_debug);
42 41
43MODULE_AUTHOR(CORE_AUTHOR); 42MODULE_AUTHOR(CORE_AUTHOR);
44MODULE_DESCRIPTION(CORE_DESC); 43MODULE_DESCRIPTION(CORE_DESC);
45MODULE_LICENSE("GPL and additional rights"); 44MODULE_LICENSE("GPL and additional rights");
46MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
47MODULE_PARM_DESC(debug, "Enable debug output"); 45MODULE_PARM_DESC(debug, "Enable debug output");
48 46
49module_param_named(cards_limit, drm_cards_limit, int, 0444);
50module_param_named(debug, drm_debug, int, 0600); 47module_param_named(debug, drm_debug, int, 0600);
51 48
52struct drm_head **drm_heads; 49struct idr drm_minors_idr;
50
53struct class *drm_class; 51struct class *drm_class;
54struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
55 53
54static int drm_minor_get_id(struct drm_device *dev, int type)
55{
56 int new_id;
57 int ret;
58 int base = 0, limit = 63;
59
60again:
61 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
62 DRM_ERROR("Out of memory expanding drawable idr\n");
63 return -ENOMEM;
64 }
65 mutex_lock(&dev->struct_mutex);
66 ret = idr_get_new_above(&drm_minors_idr, NULL,
67 base, &new_id);
68 mutex_unlock(&dev->struct_mutex);
69 if (ret == -EAGAIN) {
70 goto again;
71 } else if (ret) {
72 return ret;
73 }
74
75 if (new_id >= limit) {
76 idr_remove(&drm_minors_idr, new_id);
77 return -EINVAL;
78 }
79 return new_id;
80}
81
56static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, 82static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
57 const struct pci_device_id *ent, 83 const struct pci_device_id *ent,
58 struct drm_driver *driver) 84 struct drm_driver *driver)
@@ -145,48 +171,60 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
145 * create the proc init entry via proc_init(). This routines assigns 171 * create the proc init entry via proc_init(). This routines assigns
146 * minor numbers to secondary heads of multi-headed cards 172 * minor numbers to secondary heads of multi-headed cards
147 */ 173 */
148static int drm_get_head(struct drm_device * dev, struct drm_head * head) 174static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
149{ 175{
150 struct drm_head **heads = drm_heads; 176 struct drm_minor *new_minor;
151 int ret; 177 int ret;
152 int minor; 178 int minor_id;
153 179
154 DRM_DEBUG("\n"); 180 DRM_DEBUG("\n");
155 181
156 for (minor = 0; minor < drm_cards_limit; minor++, heads++) { 182 minor_id = drm_minor_get_id(dev, type);
157 if (!*heads) { 183 if (minor_id < 0)
158 184 return minor_id;
159 *head = (struct drm_head) { 185
160 .dev = dev,.device = 186 new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
161 MKDEV(DRM_MAJOR, minor),.minor = minor,}; 187 if (!new_minor) {
162 188 ret = -ENOMEM;
163 if ((ret = 189 goto err_idr;
164 drm_proc_init(dev, minor, drm_proc_root, 190 }
165 &head->dev_root))) { 191
166 printk(KERN_ERR 192 new_minor->type = type;
167 "DRM: Failed to initialize /proc/dri.\n"); 193 new_minor->device = MKDEV(DRM_MAJOR, minor_id);
168 goto err_g1; 194 new_minor->dev = dev;
169 } 195 new_minor->index = minor_id;
170 196
171 ret = drm_sysfs_device_add(dev, head); 197 idr_replace(&drm_minors_idr, new_minor, minor_id);
172 if (ret) { 198
173 printk(KERN_ERR 199 if (type == DRM_MINOR_LEGACY) {
174 "DRM: Error sysfs_device_add.\n"); 200 ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
175 goto err_g2; 201 if (ret) {
176 } 202 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
177 *heads = head; 203 goto err_mem;
178
179 DRM_DEBUG("new minor assigned %d\n", minor);
180 return 0;
181 } 204 }
205 } else
206 new_minor->dev_root = NULL;
207
208 ret = drm_sysfs_device_add(new_minor);
209 if (ret) {
210 printk(KERN_ERR
211 "DRM: Error sysfs_device_add.\n");
212 goto err_g2;
182 } 213 }
183 DRM_ERROR("out of minors\n"); 214 *minor = new_minor;
184 return -ENOMEM; 215
185 err_g2: 216 DRM_DEBUG("new minor assigned %d\n", minor_id);
186 drm_proc_cleanup(minor, drm_proc_root, head->dev_root); 217 return 0;
187 err_g1: 218
188 *head = (struct drm_head) { 219
189 .dev = NULL}; 220err_g2:
221 if (new_minor->type == DRM_MINOR_LEGACY)
222 drm_proc_cleanup(new_minor, drm_proc_root);
223err_mem:
224 kfree(new_minor);
225err_idr:
226 idr_remove(&drm_minors_idr, minor_id);
227 *minor = NULL;
190 return ret; 228 return ret;
191} 229}
192 230
@@ -222,12 +260,12 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
222 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 260 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
223 goto err_g2; 261 goto err_g2;
224 } 262 }
225 if ((ret = drm_get_head(dev, &dev->primary))) 263 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
226 goto err_g2; 264 goto err_g2;
227 265
228 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 266 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
229 driver->name, driver->major, driver->minor, driver->patchlevel, 267 driver->name, driver->major, driver->minor, driver->patchlevel,
230 driver->date, dev->primary.minor); 268 driver->date, dev->primary->index);
231 269
232 return 0; 270 return 0;
233 271
@@ -276,18 +314,18 @@ int drm_put_dev(struct drm_device * dev)
276 * last minor released. 314 * last minor released.
277 * 315 *
278 */ 316 */
279int drm_put_head(struct drm_head * head) 317int drm_put_minor(struct drm_minor **minor_p)
280{ 318{
281 int minor = head->minor; 319 struct drm_minor *minor = *minor_p;
282 320 DRM_DEBUG("release secondary minor %d\n", minor->index);
283 DRM_DEBUG("release secondary minor %d\n", minor);
284
285 drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
286 drm_sysfs_device_remove(head->dev);
287 321
288 *head = (struct drm_head) {.dev = NULL}; 322 if (minor->type == DRM_MINOR_LEGACY)
323 drm_proc_cleanup(minor, drm_proc_root);
324 drm_sysfs_device_remove(minor);
289 325
290 drm_heads[minor] = NULL; 326 idr_remove(&drm_minors_idr, minor->index);
291 327
328 kfree(minor);
329 *minor_p = NULL;
292 return 0; 330 return 0;
293} 331}
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 05ed5043254f..7a1d9a782ddb 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -19,7 +19,7 @@
19#include "drm_core.h" 19#include "drm_core.h"
20#include "drmP.h" 20#include "drmP.h"
21 21
22#define to_drm_device(d) container_of(d, struct drm_device, dev) 22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
23 23
24/** 24/**
25 * drm_sysfs_suspend - DRM class suspend hook 25 * drm_sysfs_suspend - DRM class suspend hook
@@ -31,7 +31,8 @@
31 */ 31 */
32static int drm_sysfs_suspend(struct device *dev, pm_message_t state) 32static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
33{ 33{
34 struct drm_device *drm_dev = to_drm_device(dev); 34 struct drm_minor *drm_minor = to_drm_minor(dev);
35 struct drm_device *drm_dev = drm_minor->dev;
35 36
36 printk(KERN_ERR "%s\n", __FUNCTION__); 37 printk(KERN_ERR "%s\n", __FUNCTION__);
37 38
@@ -50,7 +51,8 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
50 */ 51 */
51static int drm_sysfs_resume(struct device *dev) 52static int drm_sysfs_resume(struct device *dev)
52{ 53{
53 struct drm_device *drm_dev = to_drm_device(dev); 54 struct drm_minor *drm_minor = to_drm_minor(dev);
55 struct drm_device *drm_dev = drm_minor->dev;
54 56
55 if (drm_dev->driver->resume) 57 if (drm_dev->driver->resume)
56 return drm_dev->driver->resume(drm_dev); 58 return drm_dev->driver->resume(drm_dev);
@@ -120,10 +122,11 @@ void drm_sysfs_destroy(void)
120static ssize_t show_dri(struct device *device, struct device_attribute *attr, 122static ssize_t show_dri(struct device *device, struct device_attribute *attr,
121 char *buf) 123 char *buf)
122{ 124{
123 struct drm_device *dev = to_drm_device(device); 125 struct drm_minor *drm_minor = to_drm_minor(device);
124 if (dev->driver->dri_library_name) 126 struct drm_device *drm_dev = drm_minor->dev;
125 return dev->driver->dri_library_name(dev, buf); 127 if (drm_dev->driver->dri_library_name)
126 return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); 128 return drm_dev->driver->dri_library_name(drm_dev, buf);
129 return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
127} 130}
128 131
129static struct device_attribute device_attrs[] = { 132static struct device_attribute device_attrs[] = {
@@ -152,25 +155,28 @@ static void drm_sysfs_device_release(struct device *dev)
152 * as the parent for the Linux device, and make sure it has a file containing 155 * as the parent for the Linux device, and make sure it has a file containing
153 * the driver we're using (for userspace compatibility). 156 * the driver we're using (for userspace compatibility).
154 */ 157 */
155int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head) 158int drm_sysfs_device_add(struct drm_minor *minor)
156{ 159{
157 int err; 160 int err;
158 int i, j; 161 int i, j;
162 char *minor_str;
159 163
160 dev->dev.parent = &dev->pdev->dev; 164 minor->kdev.parent = &minor->dev->pdev->dev;
161 dev->dev.class = drm_class; 165 minor->kdev.class = drm_class;
162 dev->dev.release = drm_sysfs_device_release; 166 minor->kdev.release = drm_sysfs_device_release;
163 dev->dev.devt = head->device; 167 minor->kdev.devt = minor->device;
164 snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor); 168 minor_str = "card%d";
165 169
166 err = device_register(&dev->dev); 170 snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
171
172 err = device_register(&minor->kdev);
167 if (err) { 173 if (err) {
168 DRM_ERROR("device add failed: %d\n", err); 174 DRM_ERROR("device add failed: %d\n", err);
169 goto err_out; 175 goto err_out;
170 } 176 }
171 177
172 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { 178 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
173 err = device_create_file(&dev->dev, &device_attrs[i]); 179 err = device_create_file(&minor->kdev, &device_attrs[i]);
174 if (err) 180 if (err)
175 goto err_out_files; 181 goto err_out_files;
176 } 182 }
@@ -180,8 +186,8 @@ int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
180err_out_files: 186err_out_files:
181 if (i > 0) 187 if (i > 0)
182 for (j = 0; j < i; j++) 188 for (j = 0; j < i; j++)
183 device_remove_file(&dev->dev, &device_attrs[i]); 189 device_remove_file(&minor->kdev, &device_attrs[i]);
184 device_unregister(&dev->dev); 190 device_unregister(&minor->kdev);
185err_out: 191err_out:
186 192
187 return err; 193 return err;
@@ -194,11 +200,11 @@ err_out:
194 * This call unregisters and cleans up a class device that was created with a 200 * This call unregisters and cleans up a class device that was created with a
195 * call to drm_sysfs_device_add() 201 * call to drm_sysfs_device_add()
196 */ 202 */
197void drm_sysfs_device_remove(struct drm_device *dev) 203void drm_sysfs_device_remove(struct drm_minor *minor)
198{ 204{
199 int i; 205 int i;
200 206
201 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) 207 for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
202 device_remove_file(&dev->dev, &device_attrs[i]); 208 device_remove_file(&minor->kdev, &device_attrs[i]);
203 device_unregister(&dev->dev); 209 device_unregister(&minor->kdev);
204} 210}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 945df72a51a9..c234c6f24a8d 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -90,7 +90,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91{ 91{
92 struct drm_file *priv = vma->vm_file->private_data; 92 struct drm_file *priv = vma->vm_file->private_data;
93 struct drm_device *dev = priv->head->dev; 93 struct drm_device *dev = priv->minor->dev;
94 struct drm_map *map = NULL; 94 struct drm_map *map = NULL;
95 struct drm_map_list *r_list; 95 struct drm_map_list *r_list;
96 struct drm_hash_item *hash; 96 struct drm_hash_item *hash;
@@ -207,7 +207,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
207static void drm_vm_shm_close(struct vm_area_struct *vma) 207static void drm_vm_shm_close(struct vm_area_struct *vma)
208{ 208{
209 struct drm_file *priv = vma->vm_file->private_data; 209 struct drm_file *priv = vma->vm_file->private_data;
210 struct drm_device *dev = priv->head->dev; 210 struct drm_device *dev = priv->minor->dev;
211 struct drm_vma_entry *pt, *temp; 211 struct drm_vma_entry *pt, *temp;
212 struct drm_map *map; 212 struct drm_map *map;
213 struct drm_map_list *r_list; 213 struct drm_map_list *r_list;
@@ -286,7 +286,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
286static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 286static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
287{ 287{
288 struct drm_file *priv = vma->vm_file->private_data; 288 struct drm_file *priv = vma->vm_file->private_data;
289 struct drm_device *dev = priv->head->dev; 289 struct drm_device *dev = priv->minor->dev;
290 struct drm_device_dma *dma = dev->dma; 290 struct drm_device_dma *dma = dev->dma;
291 unsigned long offset; 291 unsigned long offset;
292 unsigned long page_nr; 292 unsigned long page_nr;
@@ -321,7 +321,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
321{ 321{
322 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 322 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
323 struct drm_file *priv = vma->vm_file->private_data; 323 struct drm_file *priv = vma->vm_file->private_data;
324 struct drm_device *dev = priv->head->dev; 324 struct drm_device *dev = priv->minor->dev;
325 struct drm_sg_mem *entry = dev->sg; 325 struct drm_sg_mem *entry = dev->sg;
326 unsigned long offset; 326 unsigned long offset;
327 unsigned long map_offset; 327 unsigned long map_offset;
@@ -402,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
402static void drm_vm_open_locked(struct vm_area_struct *vma) 402static void drm_vm_open_locked(struct vm_area_struct *vma)
403{ 403{
404 struct drm_file *priv = vma->vm_file->private_data; 404 struct drm_file *priv = vma->vm_file->private_data;
405 struct drm_device *dev = priv->head->dev; 405 struct drm_device *dev = priv->minor->dev;
406 struct drm_vma_entry *vma_entry; 406 struct drm_vma_entry *vma_entry;
407 407
408 DRM_DEBUG("0x%08lx,0x%08lx\n", 408 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -420,7 +420,7 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
420static void drm_vm_open(struct vm_area_struct *vma) 420static void drm_vm_open(struct vm_area_struct *vma)
421{ 421{
422 struct drm_file *priv = vma->vm_file->private_data; 422 struct drm_file *priv = vma->vm_file->private_data;
423 struct drm_device *dev = priv->head->dev; 423 struct drm_device *dev = priv->minor->dev;
424 424
425 mutex_lock(&dev->struct_mutex); 425 mutex_lock(&dev->struct_mutex);
426 drm_vm_open_locked(vma); 426 drm_vm_open_locked(vma);
@@ -438,7 +438,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
438static void drm_vm_close(struct vm_area_struct *vma) 438static void drm_vm_close(struct vm_area_struct *vma)
439{ 439{
440 struct drm_file *priv = vma->vm_file->private_data; 440 struct drm_file *priv = vma->vm_file->private_data;
441 struct drm_device *dev = priv->head->dev; 441 struct drm_device *dev = priv->minor->dev;
442 struct drm_vma_entry *pt, *temp; 442 struct drm_vma_entry *pt, *temp;
443 443
444 DRM_DEBUG("0x%08lx,0x%08lx\n", 444 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -473,7 +473,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473 struct drm_device_dma *dma; 473 struct drm_device_dma *dma;
474 unsigned long length = vma->vm_end - vma->vm_start; 474 unsigned long length = vma->vm_end - vma->vm_start;
475 475
476 dev = priv->head->dev; 476 dev = priv->minor->dev;
477 dma = dev->dma; 477 dma = dev->dma;
478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
479 vma->vm_start, vma->vm_end, vma->vm_pgoff); 479 vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
544{ 544{
545 struct drm_file *priv = filp->private_data; 545 struct drm_file *priv = filp->private_data;
546 struct drm_device *dev = priv->head->dev; 546 struct drm_device *dev = priv->minor->dev;
547 struct drm_map *map = NULL; 547 struct drm_map *map = NULL;
548 unsigned long offset = 0; 548 unsigned long offset = 0;
549 struct drm_hash_item *hash; 549 struct drm_hash_item *hash;
@@ -640,12 +640,12 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
640 /* Don't let this area swap. Change when 640 /* Don't let this area swap. Change when
641 DRM_KERNEL advisory is supported. */ 641 DRM_KERNEL advisory is supported. */
642 vma->vm_flags |= VM_RESERVED; 642 vma->vm_flags |= VM_RESERVED;
643 vma->vm_page_prot = drm_dma_prot(map->type, vma);
644 break; 643 break;
645 case _DRM_SCATTER_GATHER: 644 case _DRM_SCATTER_GATHER:
646 vma->vm_ops = &drm_vm_sg_ops; 645 vma->vm_ops = &drm_vm_sg_ops;
647 vma->vm_private_data = (void *)map; 646 vma->vm_private_data = (void *)map;
648 vma->vm_flags |= VM_RESERVED; 647 vma->vm_flags |= VM_RESERVED;
648 vma->vm_page_prot = drm_dma_prot(map->type, vma);
649 break; 649 break;
650 default: 650 default:
651 return -EINVAL; /* This should never happen. */ 651 return -EINVAL; /* This should never happen. */
@@ -661,7 +661,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
661int drm_mmap(struct file *filp, struct vm_area_struct *vma) 661int drm_mmap(struct file *filp, struct vm_area_struct *vma)
662{ 662{
663 struct drm_file *priv = filp->private_data; 663 struct drm_file *priv = filp->private_data;
664 struct drm_device *dev = priv->head->dev; 664 struct drm_device *dev = priv->minor->dev;
665 int ret; 665 int ret;
666 666
667 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 8d7ea81c4b66..e5de8ea41544 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -94,7 +94,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
94 drm_i810_buf_priv_t *buf_priv; 94 drm_i810_buf_priv_t *buf_priv;
95 95
96 lock_kernel(); 96 lock_kernel();
97 dev = priv->head->dev; 97 dev = priv->minor->dev;
98 dev_priv = dev->dev_private; 98 dev_priv = dev->dev_private;
99 buf = dev_priv->mmap_buffer; 99 buf = dev_priv->mmap_buffer;
100 buf_priv = buf->dev_private; 100 buf_priv = buf->dev_private;
@@ -122,7 +122,7 @@ static const struct file_operations i810_buffer_fops = {
122 122
123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{ 124{
125 struct drm_device *dev = file_priv->head->dev; 125 struct drm_device *dev = file_priv->minor->dev;
126 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
127 drm_i810_private_t *dev_priv = dev->dev_private; 127 drm_i810_private_t *dev_priv = dev->dev_private;
128 const struct file_operations *old_fops; 128 const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 9df08105f4f3..60c9376be486 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -96,7 +96,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
96 drm_i830_buf_priv_t *buf_priv; 96 drm_i830_buf_priv_t *buf_priv;
97 97
98 lock_kernel(); 98 lock_kernel();
99 dev = priv->head->dev; 99 dev = priv->minor->dev;
100 dev_priv = dev->dev_private; 100 dev_priv = dev->dev_private;
101 buf = dev_priv->mmap_buffer; 101 buf = dev_priv->mmap_buffer;
102 buf_priv = buf->dev_private; 102 buf_priv = buf->dev_private;
@@ -124,7 +124,7 @@ static const struct file_operations i830_buffer_fops = {
124 124
125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
126{ 126{
127 struct drm_device *dev = file_priv->head->dev; 127 struct drm_device *dev = file_priv->minor->dev;
128 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
129 drm_i830_private_t *dev_priv = dev->dev_private; 129 drm_i830_private_t *dev_priv = dev->dev_private;
130 const struct file_operations *old_fops; 130 const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index a043bb12301a..ef7bf143a80c 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -415,10 +415,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
415 drm_i915_private_t *dev_priv = dev->dev_private; 415 drm_i915_private_t *dev_priv = dev->dev_private;
416 RING_LOCALS; 416 RING_LOCALS;
417 417
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 418 if (++dev_priv->counter > BREADCRUMB_MASK) {
419 dev_priv->counter = 1;
420 DRM_DEBUG("Breadcrumb counter wrapped around\n");
421 }
419 422
420 if (dev_priv->counter > 0x7FFFFFFFUL) 423 if (dev_priv->sarea_priv)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 424 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
422 425
423 BEGIN_LP_RING(4); 426 BEGIN_LP_RING(4);
424 OUT_RING(CMD_STORE_DWORD_IDX); 427 OUT_RING(CMD_STORE_DWORD_IDX);
@@ -428,6 +431,26 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
428 ADVANCE_LP_RING(); 431 ADVANCE_LP_RING();
429} 432}
430 433
434int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
435{
436 drm_i915_private_t *dev_priv = dev->dev_private;
437 uint32_t flush_cmd = CMD_MI_FLUSH;
438 RING_LOCALS;
439
440 flush_cmd |= flush;
441
442 i915_kernel_lost_context(dev);
443
444 BEGIN_LP_RING(4);
445 OUT_RING(flush_cmd);
446 OUT_RING(0);
447 OUT_RING(0);
448 OUT_RING(0);
449 ADVANCE_LP_RING();
450
451 return 0;
452}
453
431static int i915_dispatch_cmdbuffer(struct drm_device * dev, 454static int i915_dispatch_cmdbuffer(struct drm_device * dev,
432 drm_i915_cmdbuffer_t * cmd) 455 drm_i915_cmdbuffer_t * cmd)
433{ 456{
@@ -511,52 +534,74 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 return 0; 534 return 0;
512} 535}
513 536
514static int i915_dispatch_flip(struct drm_device * dev) 537static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
515{ 538{
516 drm_i915_private_t *dev_priv = dev->dev_private; 539 drm_i915_private_t *dev_priv = dev->dev_private;
540 u32 num_pages, current_page, next_page, dspbase;
541 int shift = 2 * plane, x, y;
517 RING_LOCALS; 542 RING_LOCALS;
518 543
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 544 /* Calculate display base offset */
520 __FUNCTION__, 545 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
521 dev_priv->current_page, 546 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
522 dev_priv->sarea_priv->pf_current_page); 547 next_page = (current_page + 1) % num_pages;
523 548
524 i915_kernel_lost_context(dev); 549 switch (next_page) {
525 550 default:
526 BEGIN_LP_RING(2); 551 case 0:
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 552 dspbase = dev_priv->sarea_priv->front_offset;
528 OUT_RING(0); 553 break;
529 ADVANCE_LP_RING(); 554 case 1:
555 dspbase = dev_priv->sarea_priv->back_offset;
556 break;
557 case 2:
558 dspbase = dev_priv->sarea_priv->third_offset;
559 break;
560 }
530 561
531 BEGIN_LP_RING(6); 562 if (plane == 0) {
532 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 563 x = dev_priv->sarea_priv->planeA_x;
533 OUT_RING(0); 564 y = dev_priv->sarea_priv->planeA_y;
534 if (dev_priv->current_page == 0) {
535 OUT_RING(dev_priv->back_offset);
536 dev_priv->current_page = 1;
537 } else { 565 } else {
538 OUT_RING(dev_priv->front_offset); 566 x = dev_priv->sarea_priv->planeB_x;
539 dev_priv->current_page = 0; 567 y = dev_priv->sarea_priv->planeB_y;
540 } 568 }
541 OUT_RING(0);
542 ADVANCE_LP_RING();
543 569
544 BEGIN_LP_RING(2); 570 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
545 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
546 OUT_RING(0);
547 ADVANCE_LP_RING();
548 571
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 572 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
573 dspbase);
550 574
551 BEGIN_LP_RING(4); 575 BEGIN_LP_RING(4);
552 OUT_RING(CMD_STORE_DWORD_IDX); 576 OUT_RING(sync ? 0 :
553 OUT_RING(20); 577 (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
554 OUT_RING(dev_priv->counter); 578 MI_WAIT_FOR_PLANE_A_FLIP)));
555 OUT_RING(0); 579 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
580 (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
581 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
582 OUT_RING(dspbase);
556 ADVANCE_LP_RING(); 583 ADVANCE_LP_RING();
557 584
558 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 585 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
559 return 0; 586 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
587}
588
589void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
590{
591 drm_i915_private_t *dev_priv = dev->dev_private;
592 int i;
593
594 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
595 planes, dev_priv->sarea_priv->pf_current_page);
596
597 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
598
599 for (i = 0; i < 2; i++)
600 if (planes & (1 << i))
601 i915_do_dispatch_flip(dev, i, sync);
602
603 i915_emit_breadcrumb(dev);
604
560} 605}
561 606
562static int i915_quiescent(struct drm_device * dev) 607static int i915_quiescent(struct drm_device * dev)
@@ -579,7 +624,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
579 struct drm_file *file_priv) 624 struct drm_file *file_priv)
580{ 625{
581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582 u32 *hw_status = dev_priv->hw_status_page;
583 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 627 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
584 dev_priv->sarea_priv; 628 dev_priv->sarea_priv;
585 drm_i915_batchbuffer_t *batch = data; 629 drm_i915_batchbuffer_t *batch = data;
@@ -602,7 +646,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
602 646
603 ret = i915_dispatch_batchbuffer(dev, batch); 647 ret = i915_dispatch_batchbuffer(dev, batch);
604 648
605 sarea_priv->last_dispatch = (int)hw_status[5]; 649 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
606 return ret; 650 return ret;
607} 651}
608 652
@@ -610,7 +654,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
610 struct drm_file *file_priv) 654 struct drm_file *file_priv)
611{ 655{
612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
613 u32 *hw_status = dev_priv->hw_status_page;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 657 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv; 658 dev_priv->sarea_priv;
616 drm_i915_cmdbuffer_t *cmdbuf = data; 659 drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -635,18 +678,51 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
635 return ret; 678 return ret;
636 } 679 }
637 680
638 sarea_priv->last_dispatch = (int)hw_status[5]; 681 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
682 return 0;
683}
684
685static int i915_do_cleanup_pageflip(struct drm_device * dev)
686{
687 drm_i915_private_t *dev_priv = dev->dev_private;
688 int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
689
690 DRM_DEBUG("\n");
691
692 for (i = 0, planes = 0; i < 2; i++)
693 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
694 dev_priv->sarea_priv->pf_current_page =
695 (dev_priv->sarea_priv->pf_current_page &
696 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
697
698 planes |= 1 << i;
699 }
700
701 if (planes)
702 i915_dispatch_flip(dev, planes, 0);
703
639 return 0; 704 return 0;
640} 705}
641 706
642static int i915_flip_bufs(struct drm_device *dev, void *data, 707static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv) 708 struct drm_file *file_priv)
644{ 709{
645 DRM_DEBUG("%s\n", __FUNCTION__); 710 drm_i915_flip_t *param = data;
711
712 DRM_DEBUG("\n");
646 713
647 LOCK_TEST_WITH_RETURN(dev, file_priv); 714 LOCK_TEST_WITH_RETURN(dev, file_priv);
648 715
649 return i915_dispatch_flip(dev); 716 /* This is really planes */
717 if (param->pipes & ~0x3) {
718 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
719 param->pipes);
720 return -EINVAL;
721 }
722
723 i915_dispatch_flip(dev, param->pipes, 0);
724
725 return 0;
650} 726}
651 727
652static int i915_getparam(struct drm_device *dev, void *data, 728static int i915_getparam(struct drm_device *dev, void *data,
@@ -807,6 +883,8 @@ void i915_driver_lastclose(struct drm_device * dev)
807 if (!dev_priv) 883 if (!dev_priv)
808 return; 884 return;
809 885
886 if (drm_getsarea(dev) && dev_priv->sarea_priv)
887 i915_do_cleanup_pageflip(dev);
810 if (dev_priv->agp_heap) 888 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap)); 889 i915_mem_takedown(&(dev_priv->agp_heap));
812 890
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 05c66cf03a9e..0431c00e2289 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -105,14 +105,29 @@ typedef struct _drm_i915_sarea {
105 unsigned int rotated_tiled; 105 unsigned int rotated_tiled;
106 unsigned int rotated2_tiled; 106 unsigned int rotated2_tiled;
107 107
108 int pipeA_x; 108 int planeA_x;
109 int pipeA_y; 109 int planeA_y;
110 int pipeA_w; 110 int planeA_w;
111 int pipeA_h; 111 int planeA_h;
112 int pipeB_x; 112 int planeB_x;
113 int pipeB_y; 113 int planeB_y;
114 int pipeB_w; 114 int planeB_w;
115 int pipeB_h; 115 int planeB_h;
116
117 /* Triple buffering */
118 drm_handle_t third_handle;
119 int third_offset;
120 int third_size;
121 unsigned int third_tiled;
122
123 /* buffer object handles for the static buffers. May change
124 * over the lifetime of the client, though it doesn't in our current
125 * implementation.
126 */
127 unsigned int front_bo_handle;
128 unsigned int back_bo_handle;
129 unsigned int third_bo_handle;
130 unsigned int depth_bo_handle;
116} drm_i915_sarea_t; 131} drm_i915_sarea_t;
117 132
118/* Flags for perf_boxes 133/* Flags for perf_boxes
@@ -146,7 +161,7 @@ typedef struct _drm_i915_sarea {
146 161
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 162#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 163#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
149#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 164#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
150#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 165#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
151#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 166#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
152#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 167#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
@@ -161,6 +176,18 @@ typedef struct _drm_i915_sarea {
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 177#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
163 178
179/* Asynchronous page flipping:
180 */
181typedef struct drm_i915_flip {
182 /*
183 * This is really talking about planes, and we could rename it
184 * except for the fact that some of the duplicated i915_drm.h files
185 * out there check for HAVE_I915_FLIP and so might pick up this
186 * version.
187 */
188 int pipes;
189} drm_i915_flip_t;
190
164/* Allow drivers to submit batchbuffers directly to hardware, relying 191/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware. 192 * on the security mechanisms provided by hardware.
166 */ 193 */
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index b2b451dc4460..bb8f1b2fb383 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -533,8 +533,7 @@ static struct drm_driver driver = {
533 */ 533 */
534 .driver_features = 534 .driver_features =
535 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 535 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
536 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | 536 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
537 DRIVER_IRQ_VBL2,
538 .load = i915_driver_load, 537 .load = i915_driver_load,
539 .unload = i915_driver_unload, 538 .unload = i915_driver_unload,
540 .lastclose = i915_driver_lastclose, 539 .lastclose = i915_driver_lastclose,
@@ -542,8 +541,9 @@ static struct drm_driver driver = {
542 .suspend = i915_suspend, 541 .suspend = i915_suspend,
543 .resume = i915_resume, 542 .resume = i915_resume,
544 .device_is_agp = i915_driver_device_is_agp, 543 .device_is_agp = i915_driver_device_is_agp,
545 .vblank_wait = i915_driver_vblank_wait, 544 .get_vblank_counter = i915_get_vblank_counter,
546 .vblank_wait2 = i915_driver_vblank_wait2, 545 .enable_vblank = i915_enable_vblank,
546 .disable_vblank = i915_disable_vblank,
547 .irq_preinstall = i915_driver_irq_preinstall, 547 .irq_preinstall = i915_driver_irq_preinstall,
548 .irq_postinstall = i915_driver_irq_postinstall, 548 .irq_postinstall = i915_driver_irq_postinstall,
549 .irq_uninstall = i915_driver_irq_uninstall, 549 .irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 675d88bda066..c614d78b3dfd 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -76,8 +76,9 @@ struct mem_block {
76typedef struct _drm_i915_vbl_swap { 76typedef struct _drm_i915_vbl_swap {
77 struct list_head head; 77 struct list_head head;
78 drm_drawable_t drw_id; 78 drm_drawable_t drw_id;
79 unsigned int pipe; 79 unsigned int plane;
80 unsigned int sequence; 80 unsigned int sequence;
81 int flip;
81} drm_i915_vbl_swap_t; 82} drm_i915_vbl_swap_t;
82 83
83typedef struct drm_i915_private { 84typedef struct drm_i915_private {
@@ -90,7 +91,7 @@ typedef struct drm_i915_private {
90 drm_dma_handle_t *status_page_dmah; 91 drm_dma_handle_t *status_page_dmah;
91 void *hw_status_page; 92 void *hw_status_page;
92 dma_addr_t dma_status_page; 93 dma_addr_t dma_status_page;
93 unsigned long counter; 94 uint32_t counter;
94 unsigned int status_gfx_addr; 95 unsigned int status_gfx_addr;
95 drm_local_map_t hws_map; 96 drm_local_map_t hws_map;
96 97
@@ -103,13 +104,18 @@ typedef struct drm_i915_private {
103 104
104 wait_queue_head_t irq_queue; 105 wait_queue_head_t irq_queue;
105 atomic_t irq_received; 106 atomic_t irq_received;
106 atomic_t irq_emitted; 107 atomic_t irq_emited;
107 108
108 int tex_lru_log_granularity; 109 int tex_lru_log_granularity;
109 int allow_batchbuffer; 110 int allow_batchbuffer;
110 struct mem_block *agp_heap; 111 struct mem_block *agp_heap;
111 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 112 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
112 int vblank_pipe; 113 int vblank_pipe;
114 spinlock_t user_irq_lock;
115 int user_irq_refcount;
116 int fence_irq_on;
117 uint32_t irq_enable_reg;
118 int irq_enabled;
113 119
114 spinlock_t swaps_lock; 120 spinlock_t swaps_lock;
115 drm_i915_vbl_swap_t vbl_swaps; 121 drm_i915_vbl_swap_t vbl_swaps;
@@ -216,7 +222,7 @@ extern void i915_driver_preclose(struct drm_device *dev,
216extern int i915_driver_device_is_agp(struct drm_device * dev); 222extern int i915_driver_device_is_agp(struct drm_device * dev);
217extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 223extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
218 unsigned long arg); 224 unsigned long arg);
219 225extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
220/* i915_irq.c */ 226/* i915_irq.c */
221extern int i915_irq_emit(struct drm_device *dev, void *data, 227extern int i915_irq_emit(struct drm_device *dev, void *data,
222 struct drm_file *file_priv); 228 struct drm_file *file_priv);
@@ -227,7 +233,7 @@ extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequenc
227extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 233extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
228extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 234extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
229extern void i915_driver_irq_preinstall(struct drm_device * dev); 235extern void i915_driver_irq_preinstall(struct drm_device * dev);
230extern void i915_driver_irq_postinstall(struct drm_device * dev); 236extern int i915_driver_irq_postinstall(struct drm_device * dev);
231extern void i915_driver_irq_uninstall(struct drm_device * dev); 237extern void i915_driver_irq_uninstall(struct drm_device * dev);
232extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 238extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
233 struct drm_file *file_priv); 239 struct drm_file *file_priv);
@@ -235,6 +241,9 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
235 struct drm_file *file_priv); 241 struct drm_file *file_priv);
236extern int i915_vblank_swap(struct drm_device *dev, void *data, 242extern int i915_vblank_swap(struct drm_device *dev, void *data,
237 struct drm_file *file_priv); 243 struct drm_file *file_priv);
244extern int i915_enable_vblank(struct drm_device *dev, int crtc);
245extern void i915_disable_vblank(struct drm_device *dev, int crtc);
246extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
238 247
239/* i915_mem.c */ 248/* i915_mem.c */
240extern int i915_mem_alloc(struct drm_device *dev, void *data, 249extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -379,21 +388,91 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
379 388
380/* Interrupt bits: 389/* Interrupt bits:
381 */ 390 */
382#define USER_INT_FLAG (1<<1) 391#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
383#define VSYNC_PIPEB_FLAG (1<<5) 392#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
384#define VSYNC_PIPEA_FLAG (1<<7) 393#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
385#define HWB_OOM_FLAG (1<<13) /* binner out of memory */ 394#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
395#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
396#define I915_SYNC_STATUS_INTERRUPT (1<<12)
397#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
398#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
399#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
400#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
401#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
402#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
403#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
404#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
405#define I915_DEBUG_INTERRUPT (1<<2)
406#define I915_USER_INTERRUPT (1<<1)
407
386 408
387#define I915REG_HWSTAM 0x02098 409#define I915REG_HWSTAM 0x02098
388#define I915REG_INT_IDENTITY_R 0x020a4 410#define I915REG_INT_IDENTITY_R 0x020a4
389#define I915REG_INT_MASK_R 0x020a8 411#define I915REG_INT_MASK_R 0x020a8
390#define I915REG_INT_ENABLE_R 0x020a0 412#define I915REG_INT_ENABLE_R 0x020a0
413#define I915REG_INSTPM 0x020c0
414
415#define PIPEADSL 0x70000
416#define PIPEBDSL 0x71000
391 417
392#define I915REG_PIPEASTAT 0x70024 418#define I915REG_PIPEASTAT 0x70024
393#define I915REG_PIPEBSTAT 0x71024 419#define I915REG_PIPEBSTAT 0x71024
420/*
421 * The two pipe frame counter registers are not synchronized, so
422 * reading a stable value is somewhat tricky. The following code
423 * should work:
424 *
425 * do {
426 * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
427 * PIPE_FRAME_HIGH_SHIFT;
428 * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
429 * PIPE_FRAME_LOW_SHIFT);
430 * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
431 * PIPE_FRAME_HIGH_SHIFT);
432 * } while (high1 != high2);
433 * frame = (high1 << 8) | low1;
434 */
435#define PIPEAFRAMEHIGH 0x70040
436#define PIPEBFRAMEHIGH 0x71040
437#define PIPE_FRAME_HIGH_MASK 0x0000ffff
438#define PIPE_FRAME_HIGH_SHIFT 0
439#define PIPEAFRAMEPIXEL 0x70044
440#define PIPEBFRAMEPIXEL 0x71044
394 441
395#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 442#define PIPE_FRAME_LOW_MASK 0xff000000
396#define I915_VBLANK_CLEAR (1UL<<1) 443#define PIPE_FRAME_LOW_SHIFT 24
444/*
445 * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register
446 * and is 24 bits wide.
447 */
448#define PIPE_PIXEL_MASK 0x00ffffff
449#define PIPE_PIXEL_SHIFT 0
450
451#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
452#define I915_CRC_ERROR_ENABLE (1UL<<29)
453#define I915_CRC_DONE_ENABLE (1UL<<28)
454#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
455#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
456#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
457#define I915_DPST_EVENT_ENABLE (1UL<<23)
458#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
459#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
460#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
461#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
462#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
463#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
464#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
465#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
466#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
467#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
468#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
469#define I915_DPST_EVENT_STATUS (1UL<<7)
470#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
471#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
472#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
473#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
474#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
475#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
397 476
398#define SRX_INDEX 0x3c4 477#define SRX_INDEX 0x3c4
399#define SRX_DATA 0x3c5 478#define SRX_DATA 0x3c5
@@ -566,6 +645,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
566#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 645#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
567#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 646#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
568#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 647#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
648#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
649#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
569 650
570#define MI_BATCH_BUFFER ((0x30<<23)|1) 651#define MI_BATCH_BUFFER ((0x30<<23)|1)
571#define MI_BATCH_BUFFER_START (0x31<<23) 652#define MI_BATCH_BUFFER_START (0x31<<23)
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 92653b38e64c..023ce66ef3ab 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -38,6 +38,109 @@
38#define MAX_NOPID ((u32)~0) 38#define MAX_NOPID ((u32)~0)
39 39
40/** 40/**
41 * i915_get_pipe - return the the pipe associated with a given plane
42 * @dev: DRM device
43 * @plane: plane to look for
44 *
45 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
46 * rather than a pipe number, since they may not always be equal. This routine
47 * maps the given @plane back to a pipe number.
48 */
49static int
50i915_get_pipe(struct drm_device *dev, int plane)
51{
52 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
53 u32 dspcntr;
54
55 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
56
57 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
58}
59
60/**
61 * i915_get_plane - return the the plane associated with a given pipe
62 * @dev: DRM device
63 * @pipe: pipe to look for
64 *
65 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
66 * rather than a plane number, since they may not always be equal. This routine
67 * maps the given @pipe back to a plane number.
68 */
69static int
70i915_get_plane(struct drm_device *dev, int pipe)
71{
72 if (i915_get_pipe(dev, 0) == pipe)
73 return 0;
74 return 1;
75}
76
77/**
78 * i915_pipe_enabled - check if a pipe is enabled
79 * @dev: DRM device
80 * @pipe: pipe to check
81 *
82 * Reading certain registers when the pipe is disabled can hang the chip.
83 * Use this routine to make sure the PLL is running and the pipe is active
84 * before reading such registers if unsure.
85 */
86static int
87i915_pipe_enabled(struct drm_device *dev, int pipe)
88{
89 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
90 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
91
92 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
93 return 1;
94
95 return 0;
96}
97
98/**
99 * Emit a synchronous flip.
100 *
101 * This function must be called with the drawable spinlock held.
102 */
103static void
104i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
105 int plane)
106{
107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
108 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
109 u16 x1, y1, x2, y2;
110 int pf_planes = 1 << plane;
111
112 /* If the window is visible on the other plane, we have to flip on that
113 * plane as well.
114 */
115 if (plane == 1) {
116 x1 = sarea_priv->planeA_x;
117 y1 = sarea_priv->planeA_y;
118 x2 = x1 + sarea_priv->planeA_w;
119 y2 = y1 + sarea_priv->planeA_h;
120 } else {
121 x1 = sarea_priv->planeB_x;
122 y1 = sarea_priv->planeB_y;
123 x2 = x1 + sarea_priv->planeB_w;
124 y2 = y1 + sarea_priv->planeB_h;
125 }
126
127 if (x2 > 0 && y2 > 0) {
128 int i, num_rects = drw->num_rects;
129 struct drm_clip_rect *rect = drw->rects;
130
131 for (i = 0; i < num_rects; i++)
132 if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
133 rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
134 pf_planes = 0x3;
135
136 break;
137 }
138 }
139
140 i915_dispatch_flip(dev, pf_planes, 1);
141}
142
143/**
41 * Emit blits for scheduled buffer swaps. 144 * Emit blits for scheduled buffer swaps.
42 * 145 *
43 * This function will be called with the HW lock held. 146 * This function will be called with the HW lock held.
@@ -45,40 +148,59 @@
45static void i915_vblank_tasklet(struct drm_device *dev) 148static void i915_vblank_tasklet(struct drm_device *dev)
46{ 149{
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit; 151 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i; 152 int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received), 153 unsigned counter[2];
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw; 154 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 155 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp; 156 u32 cpp = dev_priv->cpp, offsets[3];
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | 157 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA | 158 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB) 159 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD; 160 : XY_SRC_COPY_BLT_CMD;
60 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) | 161 u32 src_pitch = sarea_priv->pitch * cpp;
61 (cpp << 23) | (1 << 24); 162 u32 dst_pitch = sarea_priv->pitch * cpp;
163 /* COPY rop (0xcc), map cpp to magic color depth constants */
164 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
62 RING_LOCALS; 165 RING_LOCALS;
63 166
167 if (sarea_priv->front_tiled) {
168 cmd |= XY_SRC_COPY_BLT_DST_TILED;
169 dst_pitch >>= 2;
170 }
171 if (sarea_priv->back_tiled) {
172 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
173 src_pitch >>= 2;
174 }
175
176 counter[0] = drm_vblank_count(dev, 0);
177 counter[1] = drm_vblank_count(dev, 1);
178
64 DRM_DEBUG("\n"); 179 DRM_DEBUG("\n");
65 180
66 INIT_LIST_HEAD(&hits); 181 INIT_LIST_HEAD(&hits);
67 182
68 nhits = nrects = 0; 183 nhits = nrects = 0;
69 184
70 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 185 /* No irqsave/restore necessary. This tasklet may be run in an
186 * interrupt context or normal context, but we don't have to worry
187 * about getting interrupted by something acquiring the lock, because
188 * we are the interrupt context thing that acquires the lock.
189 */
190 spin_lock(&dev_priv->swaps_lock);
71 191
72 /* Find buffer swaps scheduled for this vertical blank */ 192 /* Find buffer swaps scheduled for this vertical blank */
73 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 193 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
74 drm_i915_vbl_swap_t *vbl_swap = 194 drm_i915_vbl_swap_t *vbl_swap =
75 list_entry(list, drm_i915_vbl_swap_t, head); 195 list_entry(list, drm_i915_vbl_swap_t, head);
196 int pipe = i915_get_pipe(dev, vbl_swap->plane);
76 197
77 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) 198 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
78 continue; 199 continue;
79 200
80 list_del(list); 201 list_del(list);
81 dev_priv->swaps_pending--; 202 dev_priv->swaps_pending--;
203 drm_vblank_put(dev, pipe);
82 204
83 spin_unlock(&dev_priv->swaps_lock); 205 spin_unlock(&dev_priv->swaps_lock);
84 spin_lock(&dev->drw_lock); 206 spin_lock(&dev->drw_lock);
@@ -116,33 +238,23 @@ static void i915_vblank_tasklet(struct drm_device *dev)
116 spin_lock(&dev_priv->swaps_lock); 238 spin_lock(&dev_priv->swaps_lock);
117 } 239 }
118 240
119 if (nhits == 0) {
120 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
121 return;
122 }
123
124 spin_unlock(&dev_priv->swaps_lock); 241 spin_unlock(&dev_priv->swaps_lock);
125 242
126 i915_kernel_lost_context(dev); 243 if (nhits == 0)
127 244 return;
128 BEGIN_LP_RING(6);
129
130 OUT_RING(GFX_OP_DRAWRECT_INFO);
131 OUT_RING(0);
132 OUT_RING(0);
133 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
134 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
135 OUT_RING(0);
136
137 ADVANCE_LP_RING();
138 245
139 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; 246 i915_kernel_lost_context(dev);
140 247
141 upper[0] = upper[1] = 0; 248 upper[0] = upper[1] = 0;
142 slice[0] = max(sarea_priv->pipeA_h / nhits, 1); 249 slice[0] = max(sarea_priv->planeA_h / nhits, 1);
143 slice[1] = max(sarea_priv->pipeB_h / nhits, 1); 250 slice[1] = max(sarea_priv->planeB_h / nhits, 1);
144 lower[0] = sarea_priv->pipeA_y + slice[0]; 251 lower[0] = sarea_priv->planeA_y + slice[0];
145 lower[1] = sarea_priv->pipeB_y + slice[0]; 252 lower[1] = sarea_priv->planeB_y + slice[0];
253
254 offsets[0] = sarea_priv->front_offset;
255 offsets[1] = sarea_priv->back_offset;
256 offsets[2] = sarea_priv->third_offset;
257 num_pages = sarea_priv->third_handle ? 3 : 2;
146 258
147 spin_lock(&dev->drw_lock); 259 spin_lock(&dev->drw_lock);
148 260
@@ -154,6 +266,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
154 for (i = 0; i++ < nhits; 266 for (i = 0; i++ < nhits;
155 upper[0] = lower[0], lower[0] += slice[0], 267 upper[0] = lower[0], lower[0] += slice[0],
156 upper[1] = lower[1], lower[1] += slice[1]) { 268 upper[1] = lower[1], lower[1] += slice[1]) {
269 int init_drawrect = 1;
270
157 if (i == nhits) 271 if (i == nhits)
158 lower[0] = lower[1] = sarea_priv->height; 272 lower[0] = lower[1] = sarea_priv->height;
159 273
@@ -161,7 +275,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
161 drm_i915_vbl_swap_t *swap_hit = 275 drm_i915_vbl_swap_t *swap_hit =
162 list_entry(hit, drm_i915_vbl_swap_t, head); 276 list_entry(hit, drm_i915_vbl_swap_t, head);
163 struct drm_clip_rect *rect; 277 struct drm_clip_rect *rect;
164 int num_rects, pipe; 278 int num_rects, plane, front, back;
165 unsigned short top, bottom; 279 unsigned short top, bottom;
166 280
167 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 281 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -169,10 +283,50 @@ static void i915_vblank_tasklet(struct drm_device *dev)
169 if (!drw) 283 if (!drw)
170 continue; 284 continue;
171 285
286 plane = swap_hit->plane;
287
288 if (swap_hit->flip) {
289 i915_dispatch_vsync_flip(dev, drw, plane);
290 continue;
291 }
292
293 if (init_drawrect) {
294 int width = sarea_priv->width;
295 int height = sarea_priv->height;
296 if (IS_I965G(dev)) {
297 BEGIN_LP_RING(4);
298
299 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
300 OUT_RING(0);
301 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
302 OUT_RING(0);
303
304 ADVANCE_LP_RING();
305 } else {
306 BEGIN_LP_RING(6);
307
308 OUT_RING(GFX_OP_DRAWRECT_INFO);
309 OUT_RING(0);
310 OUT_RING(0);
311 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
312 OUT_RING(0);
313 OUT_RING(0);
314
315 ADVANCE_LP_RING();
316 }
317
318 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
319
320 init_drawrect = 0;
321 }
322
172 rect = drw->rects; 323 rect = drw->rects;
173 pipe = swap_hit->pipe; 324 top = upper[plane];
174 top = upper[pipe]; 325 bottom = lower[plane];
175 bottom = lower[pipe]; 326
327 front = (dev_priv->sarea_priv->pf_current_page >>
328 (2 * plane)) & 0x3;
329 back = (front + 1) % num_pages;
176 330
177 for (num_rects = drw->num_rects; num_rects--; rect++) { 331 for (num_rects = drw->num_rects; num_rects--; rect++) {
178 int y1 = max(rect->y1, top); 332 int y1 = max(rect->y1, top);
@@ -184,20 +338,20 @@ static void i915_vblank_tasklet(struct drm_device *dev)
184 BEGIN_LP_RING(8); 338 BEGIN_LP_RING(8);
185 339
186 OUT_RING(cmd); 340 OUT_RING(cmd);
187 OUT_RING(pitchropcpp); 341 OUT_RING(ropcpp | dst_pitch);
188 OUT_RING((y1 << 16) | rect->x1); 342 OUT_RING((y1 << 16) | rect->x1);
189 OUT_RING((y2 << 16) | rect->x2); 343 OUT_RING((y2 << 16) | rect->x2);
190 OUT_RING(sarea_priv->front_offset); 344 OUT_RING(offsets[front]);
191 OUT_RING((y1 << 16) | rect->x1); 345 OUT_RING((y1 << 16) | rect->x1);
192 OUT_RING(pitchropcpp & 0xffff); 346 OUT_RING(src_pitch);
193 OUT_RING(sarea_priv->back_offset); 347 OUT_RING(offsets[back]);
194 348
195 ADVANCE_LP_RING(); 349 ADVANCE_LP_RING();
196 } 350 }
197 } 351 }
198 } 352 }
199 353
200 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 354 spin_unlock(&dev->drw_lock);
201 355
202 list_for_each_safe(hit, tmp, &hits) { 356 list_for_each_safe(hit, tmp, &hits) {
203 drm_i915_vbl_swap_t *swap_hit = 357 drm_i915_vbl_swap_t *swap_hit =
@@ -209,67 +363,112 @@ static void i915_vblank_tasklet(struct drm_device *dev)
209 } 363 }
210} 364}
211 365
366u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
367{
368 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
369 unsigned long high_frame;
370 unsigned long low_frame;
371 u32 high1, high2, low, count;
372 int pipe;
373
374 pipe = i915_get_pipe(dev, plane);
375 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
376 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
377
378 if (!i915_pipe_enabled(dev, pipe)) {
379 printk(KERN_ERR "trying to get vblank count for disabled "
380 "pipe %d\n", pipe);
381 return 0;
382 }
383
384 /*
385 * High & low register fields aren't synchronized, so make sure
386 * we get a low value that's stable across two reads of the high
387 * register.
388 */
389 do {
390 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
391 PIPE_FRAME_HIGH_SHIFT);
392 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
393 PIPE_FRAME_LOW_SHIFT);
394 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
395 PIPE_FRAME_HIGH_SHIFT);
396 } while (high1 != high2);
397
398 count = (high1 << 8) | low;
399
400 /* count may be reset by other driver(e.g. 2D driver),
401 we have no way to know if it is wrapped or resetted
402 when count is zero. do a rough guess.
403 */
404 if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
405 dev->last_vblank[pipe] = 0;
406
407 return count;
408}
409
212irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 410irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
213{ 411{
214 struct drm_device *dev = (struct drm_device *) arg; 412 struct drm_device *dev = (struct drm_device *) arg;
215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 413 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216 u16 temp; 414 u32 iir;
217 u32 pipea_stats, pipeb_stats; 415 u32 pipea_stats, pipeb_stats;
218 416 int vblank = 0;
219 pipea_stats = I915_READ(I915REG_PIPEASTAT); 417
220 pipeb_stats = I915_READ(I915REG_PIPEBSTAT); 418 iir = I915_READ(I915REG_INT_IDENTITY_R);
221 419 if (iir == 0) {
222 temp = I915_READ16(I915REG_INT_IDENTITY_R); 420 DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
223 421 iir,
224 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); 422 I915_READ(I915REG_INT_MASK_R),
225 423 I915_READ(I915REG_INT_ENABLE_R),
226 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 424 I915_READ(I915REG_PIPEASTAT),
227 425 I915_READ(I915REG_PIPEBSTAT));
228 if (temp == 0)
229 return IRQ_NONE; 426 return IRQ_NONE;
427 }
230 428
231 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 429 /*
232 (void) I915_READ16(I915REG_INT_IDENTITY_R); 430 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
233 DRM_READMEMORYBARRIER(); 431 * we may get extra interrupts.
234 432 */
235 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 433 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
434 pipea_stats = I915_READ(I915REG_PIPEASTAT);
435 if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
436 I915_VBLANK_INTERRUPT_STATUS))
437 {
438 vblank++;
439 drm_handle_vblank(dev, i915_get_plane(dev, 0));
440 }
441 I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
442 }
443 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
444 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
445 if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
446 I915_VBLANK_INTERRUPT_STATUS))
447 {
448 vblank++;
449 drm_handle_vblank(dev, i915_get_plane(dev, 1));
450 }
451 I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
452 }
236 453
237 if (temp & USER_INT_FLAG) 454 if (dev_priv->sarea_priv)
238 DRM_WAKEUP(&dev_priv->irq_queue); 455 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
239 456
240 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { 457 I915_WRITE(I915REG_INT_IDENTITY_R, iir);
241 int vblank_pipe = dev_priv->vblank_pipe; 458 (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted write */
242
243 if ((vblank_pipe &
244 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
245 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
246 if (temp & VSYNC_PIPEA_FLAG)
247 atomic_inc(&dev->vbl_received);
248 if (temp & VSYNC_PIPEB_FLAG)
249 atomic_inc(&dev->vbl_received2);
250 } else if (((temp & VSYNC_PIPEA_FLAG) &&
251 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
252 ((temp & VSYNC_PIPEB_FLAG) &&
253 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
254 atomic_inc(&dev->vbl_received);
255
256 DRM_WAKEUP(&dev->vbl_queue);
257 drm_vbl_send_signals(dev);
258 459
460 if (iir & I915_USER_INTERRUPT) {
461 DRM_WAKEUP(&dev_priv->irq_queue);
462 }
463 if (vblank) {
259 if (dev_priv->swaps_pending > 0) 464 if (dev_priv->swaps_pending > 0)
260 drm_locked_tasklet(dev, i915_vblank_tasklet); 465 drm_locked_tasklet(dev, i915_vblank_tasklet);
261 I915_WRITE(I915REG_PIPEASTAT,
262 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
263 I915_VBLANK_CLEAR);
264 I915_WRITE(I915REG_PIPEBSTAT,
265 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
266 I915_VBLANK_CLEAR);
267 } 466 }
268 467
269 return IRQ_HANDLED; 468 return IRQ_HANDLED;
270} 469}
271 470
272static int i915_emit_irq(struct drm_device * dev) 471static int i915_emit_irq(struct drm_device *dev)
273{ 472{
274 drm_i915_private_t *dev_priv = dev->dev_private; 473 drm_i915_private_t *dev_priv = dev->dev_private;
275 RING_LOCALS; 474 RING_LOCALS;
@@ -316,42 +515,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
316 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 515 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
317 } 516 }
318 517
319 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 518 if (dev_priv->sarea_priv)
320 return ret; 519 dev_priv->sarea_priv->last_dispatch =
321} 520 READ_BREADCRUMB(dev_priv);
322
323static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
324 atomic_t *counter)
325{
326 drm_i915_private_t *dev_priv = dev->dev_private;
327 unsigned int cur_vblank;
328 int ret = 0;
329
330 if (!dev_priv) {
331 DRM_ERROR("called with no initialization\n");
332 return -EINVAL;
333 }
334
335 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
336 (((cur_vblank = atomic_read(counter))
337 - *sequence) <= (1<<23)));
338
339 *sequence = cur_vblank;
340
341 return ret; 521 return ret;
342} 522}
343 523
344
345int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
346{
347 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
348}
349
350int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
351{
352 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
353}
354
355/* Needs the lock as it touches the ring. 524/* Needs the lock as it touches the ring.
356 */ 525 */
357int i915_irq_emit(struct drm_device *dev, void *data, 526int i915_irq_emit(struct drm_device *dev, void *data,
@@ -394,18 +563,96 @@ int i915_irq_wait(struct drm_device *dev, void *data,
394 return i915_wait_irq(dev, irqwait->irq_seq); 563 return i915_wait_irq(dev, irqwait->irq_seq);
395} 564}
396 565
566int i915_enable_vblank(struct drm_device *dev, int plane)
567{
568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
569 int pipe = i915_get_pipe(dev, plane);
570 u32 pipestat_reg = 0;
571 u32 pipestat;
572
573 switch (pipe) {
574 case 0:
575 pipestat_reg = I915REG_PIPEASTAT;
576 dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
577 break;
578 case 1:
579 pipestat_reg = I915REG_PIPEBSTAT;
580 dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
581 break;
582 default:
583 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
584 pipe);
585 break;
586 }
587
588 if (pipestat_reg)
589 {
590 pipestat = I915_READ (pipestat_reg);
591 /*
592 * Older chips didn't have the start vblank interrupt,
593 * but
594 */
595 if (IS_I965G (dev))
596 pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE;
597 else
598 pipestat |= I915_VBLANK_INTERRUPT_ENABLE;
599 /*
600 * Clear any pending status
601 */
602 pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
603 I915_VBLANK_INTERRUPT_STATUS);
604 I915_WRITE(pipestat_reg, pipestat);
605 }
606 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
607
608 return 0;
609}
610
611void i915_disable_vblank(struct drm_device *dev, int plane)
612{
613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
614 int pipe = i915_get_pipe(dev, plane);
615 u32 pipestat_reg = 0;
616 u32 pipestat;
617
618 switch (pipe) {
619 case 0:
620 pipestat_reg = I915REG_PIPEASTAT;
621 dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
622 break;
623 case 1:
624 pipestat_reg = I915REG_PIPEBSTAT;
625 dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
626 break;
627 default:
628 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
629 pipe);
630 break;
631 }
632
633 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
634 if (pipestat_reg)
635 {
636 pipestat = I915_READ (pipestat_reg);
637 pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
638 I915_VBLANK_INTERRUPT_ENABLE);
639 /*
640 * Clear any pending status
641 */
642 pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
643 I915_VBLANK_INTERRUPT_STATUS);
644 I915_WRITE(pipestat_reg, pipestat);
645 }
646}
647
397static void i915_enable_interrupt (struct drm_device *dev) 648static void i915_enable_interrupt (struct drm_device *dev)
398{ 649{
399 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
400 u16 flag;
401 651
402 flag = 0; 652 dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
403 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
404 flag |= VSYNC_PIPEA_FLAG;
405 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
406 flag |= VSYNC_PIPEB_FLAG;
407 653
408 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); 654 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
655 dev_priv->irq_enabled = 1;
409} 656}
410 657
411/* Set the vblank monitor pipe 658/* Set the vblank monitor pipe
@@ -428,8 +675,6 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
428 675
429 dev_priv->vblank_pipe = pipe->pipe; 676 dev_priv->vblank_pipe = pipe->pipe;
430 677
431 i915_enable_interrupt (dev);
432
433 return 0; 678 return 0;
434} 679}
435 680
@@ -447,9 +692,9 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
447 692
448 flag = I915_READ(I915REG_INT_ENABLE_R); 693 flag = I915_READ(I915REG_INT_ENABLE_R);
449 pipe->pipe = 0; 694 pipe->pipe = 0;
450 if (flag & VSYNC_PIPEA_FLAG) 695 if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
451 pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 696 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
452 if (flag & VSYNC_PIPEB_FLAG) 697 if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
453 pipe->pipe |= DRM_I915_VBLANK_PIPE_B; 698 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
454 699
455 return 0; 700 return 0;
@@ -464,27 +709,30 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
464 drm_i915_private_t *dev_priv = dev->dev_private; 709 drm_i915_private_t *dev_priv = dev->dev_private;
465 drm_i915_vblank_swap_t *swap = data; 710 drm_i915_vblank_swap_t *swap = data;
466 drm_i915_vbl_swap_t *vbl_swap; 711 drm_i915_vbl_swap_t *vbl_swap;
467 unsigned int pipe, seqtype, curseq; 712 unsigned int pipe, seqtype, curseq, plane;
468 unsigned long irqflags; 713 unsigned long irqflags;
469 struct list_head *list; 714 struct list_head *list;
715 int ret;
470 716
471 if (!dev_priv) { 717 if (!dev_priv) {
472 DRM_ERROR("%s called with no initialization\n", __func__); 718 DRM_ERROR("%s called with no initialization\n", __func__);
473 return -EINVAL; 719 return -EINVAL;
474 } 720 }
475 721
476 if (dev_priv->sarea_priv->rotation) { 722 if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
477 DRM_DEBUG("Rotation not supported\n"); 723 DRM_DEBUG("Rotation not supported\n");
478 return -EINVAL; 724 return -EINVAL;
479 } 725 }
480 726
481 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 727 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
482 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 728 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
729 _DRM_VBLANK_FLIP)) {
483 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); 730 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
484 return -EINVAL; 731 return -EINVAL;
485 } 732 }
486 733
487 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 734 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
735 pipe = i915_get_pipe(dev, plane);
488 736
489 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 737 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
490 738
@@ -495,6 +743,11 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
495 743
496 spin_lock_irqsave(&dev->drw_lock, irqflags); 744 spin_lock_irqsave(&dev->drw_lock, irqflags);
497 745
746 /* It makes no sense to schedule a swap for a drawable that doesn't have
747 * valid information at this point. E.g. this could mean that the X
748 * server is too old to push drawable information to the DRM, in which
749 * case all such swaps would become ineffective.
750 */
498 if (!drm_get_drawable_info(dev, swap->drawable)) { 751 if (!drm_get_drawable_info(dev, swap->drawable)) {
499 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 752 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
500 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); 753 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
@@ -503,7 +756,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
503 756
504 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 757 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
505 758
506 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 759 drm_update_vblank_count(dev, pipe);
760 curseq = drm_vblank_count(dev, pipe);
507 761
508 if (seqtype == _DRM_VBLANK_RELATIVE) 762 if (seqtype == _DRM_VBLANK_RELATIVE)
509 swap->sequence += curseq; 763 swap->sequence += curseq;
@@ -517,14 +771,43 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
517 } 771 }
518 } 772 }
519 773
774 if (swap->seqtype & _DRM_VBLANK_FLIP) {
775 swap->sequence--;
776
777 if ((curseq - swap->sequence) <= (1<<23)) {
778 struct drm_drawable_info *drw;
779
780 LOCK_TEST_WITH_RETURN(dev, file_priv);
781
782 spin_lock_irqsave(&dev->drw_lock, irqflags);
783
784 drw = drm_get_drawable_info(dev, swap->drawable);
785
786 if (!drw) {
787 spin_unlock_irqrestore(&dev->drw_lock,
788 irqflags);
789 DRM_DEBUG("Invalid drawable ID %d\n",
790 swap->drawable);
791 return -EINVAL;
792 }
793
794 i915_dispatch_vsync_flip(dev, drw, plane);
795
796 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
797
798 return 0;
799 }
800 }
801
520 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 802 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
521 803
522 list_for_each(list, &dev_priv->vbl_swaps.head) { 804 list_for_each(list, &dev_priv->vbl_swaps.head) {
523 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 805 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
524 806
525 if (vbl_swap->drw_id == swap->drawable && 807 if (vbl_swap->drw_id == swap->drawable &&
526 vbl_swap->pipe == pipe && 808 vbl_swap->plane == plane &&
527 vbl_swap->sequence == swap->sequence) { 809 vbl_swap->sequence == swap->sequence) {
810 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
528 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 811 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
529 DRM_DEBUG("Already scheduled\n"); 812 DRM_DEBUG("Already scheduled\n");
530 return 0; 813 return 0;
@@ -547,9 +830,19 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
547 830
548 DRM_DEBUG("\n"); 831 DRM_DEBUG("\n");
549 832
833 ret = drm_vblank_get(dev, pipe);
834 if (ret) {
835 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
836 return ret;
837 }
838
550 vbl_swap->drw_id = swap->drawable; 839 vbl_swap->drw_id = swap->drawable;
551 vbl_swap->pipe = pipe; 840 vbl_swap->plane = plane;
552 vbl_swap->sequence = swap->sequence; 841 vbl_swap->sequence = swap->sequence;
842 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
843
844 if (vbl_swap->flip)
845 swap->sequence++;
553 846
554 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 847 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
555 848
@@ -567,37 +860,57 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
567{ 860{
568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 861 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
569 862
570 I915_WRITE16(I915REG_HWSTAM, 0xfffe); 863 I915_WRITE16(I915REG_HWSTAM, 0xeffe);
571 I915_WRITE16(I915REG_INT_MASK_R, 0x0); 864 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
572 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 865 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
573} 866}
574 867
575void i915_driver_irq_postinstall(struct drm_device * dev) 868int i915_driver_irq_postinstall(struct drm_device * dev)
576{ 869{
577 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 870 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
871 int ret, num_pipes = 2;
578 872
579 spin_lock_init(&dev_priv->swaps_lock); 873 spin_lock_init(&dev_priv->swaps_lock);
580 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 874 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
581 dev_priv->swaps_pending = 0; 875 dev_priv->swaps_pending = 0;
582 876
583 if (!dev_priv->vblank_pipe) 877 dev_priv->user_irq_refcount = 0;
584 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 878 dev_priv->irq_enable_reg = 0;
879
880 ret = drm_vblank_init(dev, num_pipes);
881 if (ret)
882 return ret;
883
884 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
885
585 i915_enable_interrupt(dev); 886 i915_enable_interrupt(dev);
586 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 887 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
888
889 /*
890 * Initialize the hardware status page IRQ location.
891 */
892
893 I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
894 return 0;
587} 895}
588 896
589void i915_driver_irq_uninstall(struct drm_device * dev) 897void i915_driver_irq_uninstall(struct drm_device * dev)
590{ 898{
591 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 899 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
592 u16 temp; 900 u32 temp;
593 901
594 if (!dev_priv) 902 if (!dev_priv)
595 return; 903 return;
596 904
597 I915_WRITE16(I915REG_HWSTAM, 0xffff); 905 dev_priv->irq_enabled = 0;
598 I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 906 I915_WRITE(I915REG_HWSTAM, 0xffffffff);
599 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 907 I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
600 908 I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
601 temp = I915_READ16(I915REG_INT_IDENTITY_R); 909
602 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 910 temp = I915_READ(I915REG_PIPEASTAT);
911 I915_WRITE(I915REG_PIPEASTAT, temp);
912 temp = I915_READ(I915REG_PIPEBSTAT);
913 I915_WRITE(I915REG_PIPEBSTAT, temp);
914 temp = I915_READ(I915REG_INT_IDENTITY_R);
915 I915_WRITE(I915REG_INT_IDENTITY_R, temp);
603} 916}
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 5572939fc7d1..6b3790939e76 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
45static struct drm_driver driver = { 45static struct drm_driver driver = {
46 .driver_features = 46 .driver_features =
47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
49 DRIVER_IRQ_VBL,
50 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 49 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
51 .load = mga_driver_load, 50 .load = mga_driver_load,
52 .unload = mga_driver_unload, 51 .unload = mga_driver_unload,
53 .lastclose = mga_driver_lastclose, 52 .lastclose = mga_driver_lastclose,
54 .dma_quiescent = mga_driver_dma_quiescent, 53 .dma_quiescent = mga_driver_dma_quiescent,
55 .device_is_agp = mga_driver_device_is_agp, 54 .device_is_agp = mga_driver_device_is_agp,
56 .vblank_wait = mga_driver_vblank_wait, 55 .get_vblank_counter = mga_get_vblank_counter,
56 .enable_vblank = mga_enable_vblank,
57 .disable_vblank = mga_disable_vblank,
57 .irq_preinstall = mga_driver_irq_preinstall, 58 .irq_preinstall = mga_driver_irq_preinstall,
58 .irq_postinstall = mga_driver_irq_postinstall, 59 .irq_postinstall = mga_driver_irq_postinstall,
59 .irq_uninstall = mga_driver_irq_uninstall, 60 .irq_uninstall = mga_driver_irq_uninstall,
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index f6ebd24bd587..8f7291f36363 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
120 u32 clear_cmd; 120 u32 clear_cmd;
121 u32 maccess; 121 u32 maccess;
122 122
123 atomic_t vbl_received; /**< Number of vblanks received. */
123 wait_queue_head_t fence_queue; 124 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired; 125 atomic_t last_fence_retired;
125 u32 next_fence_to_post; 126 u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
181extern int mga_warp_init(drm_mga_private_t * dev_priv); 182extern int mga_warp_init(drm_mga_private_t * dev_priv);
182 183
183 /* mga_irq.c */ 184 /* mga_irq.c */
185extern int mga_enable_vblank(struct drm_device *dev, int crtc);
186extern void mga_disable_vblank(struct drm_device *dev, int crtc);
187extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
184extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 188extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
185extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 189extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
186extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 190extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
187extern void mga_driver_irq_preinstall(struct drm_device * dev); 191extern void mga_driver_irq_preinstall(struct drm_device * dev);
188extern void mga_driver_irq_postinstall(struct drm_device * dev); 192extern int mga_driver_irq_postinstall(struct drm_device * dev);
189extern void mga_driver_irq_uninstall(struct drm_device * dev); 193extern void mga_driver_irq_uninstall(struct drm_device * dev);
190extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 194extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
191 unsigned long arg); 195 unsigned long arg);
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index 9302cb8f0f83..06852fb4b278 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -35,6 +35,20 @@
35#include "mga_drm.h" 35#include "mga_drm.h"
36#include "mga_drv.h" 36#include "mga_drv.h"
37 37
38u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39{
40 const drm_mga_private_t *const dev_priv =
41 (drm_mga_private_t *) dev->dev_private;
42
43 if (crtc != 0) {
44 return 0;
45 }
46
47
48 return atomic_read(&dev_priv->vbl_received);
49}
50
51
38irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 52irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39{ 53{
40 struct drm_device *dev = (struct drm_device *) arg; 54 struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +61,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
47 /* VBLANK interrupt */ 61 /* VBLANK interrupt */
48 if (status & MGA_VLINEPEN) { 62 if (status & MGA_VLINEPEN) {
49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 63 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
50 atomic_inc(&dev->vbl_received); 64 atomic_inc(&dev_priv->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 65 drm_handle_vblank(dev, 0);
52 drm_vbl_send_signals(dev);
53 handled = 1; 66 handled = 1;
54 } 67 }
55 68
@@ -78,22 +91,34 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
78 return IRQ_NONE; 91 return IRQ_NONE;
79} 92}
80 93
81int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 94int mga_enable_vblank(struct drm_device *dev, int crtc)
82{ 95{
83 unsigned int cur_vblank; 96 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
84 int ret = 0;
85 97
86 /* Assume that the user has missed the current sequence number 98 if (crtc != 0) {
87 * by about a day rather than she wants to wait for years 99 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
88 * using vertical blanks... 100 crtc);
89 */ 101 return 0;
90 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 102 }
91 (((cur_vblank = atomic_read(&dev->vbl_received))
92 - *sequence) <= (1 << 23)));
93 103
94 *sequence = cur_vblank; 104 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
105 return 0;
106}
95 107
96 return ret; 108
109void mga_disable_vblank(struct drm_device *dev, int crtc)
110{
111 if (crtc != 0) {
112 DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
113 crtc);
114 }
115
116 /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
117 * a nice hardware counter that tracks the number of refreshes when
118 * the interrupt is disabled, and the kernel doesn't know the refresh
119 * rate to calculate an estimate.
120 */
121 /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
97} 122}
98 123
99int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) 124int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +150,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
125 MGA_WRITE(MGA_ICLEAR, ~0); 150 MGA_WRITE(MGA_ICLEAR, ~0);
126} 151}
127 152
128void mga_driver_irq_postinstall(struct drm_device * dev) 153int mga_driver_irq_postinstall(struct drm_device * dev)
129{ 154{
130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 155 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
156 int ret;
157
158 ret = drm_vblank_init(dev, 1);
159 if (ret)
160 return ret;
131 161
132 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 162 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
133 163
134 /* Turn on vertical blank interrupt and soft trap interrupt. */ 164 /* Turn on soft trap interrupt. Vertical blank interrupts are enabled
135 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 165 * in mga_enable_vblank.
166 */
167 MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
168 return 0;
136} 169}
137 170
138void mga_driver_irq_uninstall(struct drm_device * dev) 171void mga_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/r128_drv.c b/drivers/char/drm/r128_drv.c
index 6108e7587e12..2888aa01ebc7 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/char/drm/r128_drv.c
@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
43static struct drm_driver driver = { 43static struct drm_driver driver = {
44 .driver_features = 44 .driver_features =
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
47 DRIVER_IRQ_VBL,
48 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
49 .preclose = r128_driver_preclose, 48 .preclose = r128_driver_preclose,
50 .lastclose = r128_driver_lastclose, 49 .lastclose = r128_driver_lastclose,
51 .vblank_wait = r128_driver_vblank_wait, 50 .get_vblank_counter = r128_get_vblank_counter,
51 .enable_vblank = r128_enable_vblank,
52 .disable_vblank = r128_disable_vblank,
52 .irq_preinstall = r128_driver_irq_preinstall, 53 .irq_preinstall = r128_driver_irq_preinstall,
53 .irq_postinstall = r128_driver_irq_postinstall, 54 .irq_postinstall = r128_driver_irq_postinstall,
54 .irq_uninstall = r128_driver_irq_uninstall, 55 .irq_uninstall = r128_driver_irq_uninstall,
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 011105e51ac6..80af9e09e75d 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
97 u32 crtc_offset; 97 u32 crtc_offset;
98 u32 crtc_offset_cntl; 98 u32 crtc_offset_cntl;
99 99
100 atomic_t vbl_received;
101
100 u32 color_fmt; 102 u32 color_fmt;
101 unsigned int front_offset; 103 unsigned int front_offset;
102 unsigned int front_pitch; 104 unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
149extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 151extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
150extern int r128_do_cleanup_cce(struct drm_device * dev); 152extern int r128_do_cleanup_cce(struct drm_device * dev);
151 153
152extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 154extern int r128_enable_vblank(struct drm_device *dev, int crtc);
153 155extern void r128_disable_vblank(struct drm_device *dev, int crtc);
156extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
154extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
155extern void r128_driver_irq_preinstall(struct drm_device * dev); 158extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev); 159extern int r128_driver_irq_postinstall(struct drm_device * dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev, 162extern void r128_driver_preclose(struct drm_device * dev,
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index c76fdca7662d..5b95bd898f95 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -35,6 +35,16 @@
35#include "r128_drm.h" 35#include "r128_drm.h"
36#include "r128_drv.h" 36#include "r128_drv.h"
37 37
38u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
39{
40 const drm_r128_private_t *dev_priv = dev->dev_private;
41
42 if (crtc != 0)
43 return 0;
44
45 return atomic_read(&dev_priv->vbl_received);
46}
47
38irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 48irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39{ 49{
40 struct drm_device *dev = (struct drm_device *) arg; 50 struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
46 /* VBLANK interrupt */ 56 /* VBLANK interrupt */
47 if (status & R128_CRTC_VBLANK_INT) { 57 if (status & R128_CRTC_VBLANK_INT) {
48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 58 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
49 atomic_inc(&dev->vbl_received); 59 atomic_inc(&dev_priv->vbl_received);
50 DRM_WAKEUP(&dev->vbl_queue); 60 drm_handle_vblank(dev, 0);
51 drm_vbl_send_signals(dev);
52 return IRQ_HANDLED; 61 return IRQ_HANDLED;
53 } 62 }
54 return IRQ_NONE; 63 return IRQ_NONE;
55} 64}
56 65
57int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 66int r128_enable_vblank(struct drm_device *dev, int crtc)
58{ 67{
59 unsigned int cur_vblank; 68 drm_r128_private_t *dev_priv = dev->dev_private;
60 int ret = 0;
61 69
62 /* Assume that the user has missed the current sequence number 70 if (crtc != 0) {
63 * by about a day rather than she wants to wait for years 71 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
64 * using vertical blanks... 72 return -EINVAL;
65 */ 73 }
66 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
67 (((cur_vblank = atomic_read(&dev->vbl_received))
68 - *sequence) <= (1 << 23)));
69 74
70 *sequence = cur_vblank; 75 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
76 return 0;
77}
78
79void r128_disable_vblank(struct drm_device *dev, int crtc)
80{
81 if (crtc != 0)
82 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
71 83
72 return ret; 84 /*
85 * FIXME: implement proper interrupt disable by using the vblank
86 * counter register (if available)
87 *
88 * R128_WRITE(R128_GEN_INT_CNTL,
89 * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
90 */
73} 91}
74 92
75void r128_driver_irq_preinstall(struct drm_device * dev) 93void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 100 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
83} 101}
84 102
85void r128_driver_irq_postinstall(struct drm_device * dev) 103int r128_driver_irq_postinstall(struct drm_device * dev)
86{ 104{
87 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 105 return drm_vblank_init(dev, 1);
88
89 /* Turn on VBL interrupt */
90 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
91} 106}
92 107
93void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 349ac3d3b848..a2610319624d 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -59,8 +59,7 @@ static struct pci_device_id pciidlist[] = {
59static struct drm_driver driver = { 59static struct drm_driver driver = {
60 .driver_features = 60 .driver_features =
61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
63 DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
64 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 63 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
65 .load = radeon_driver_load, 64 .load = radeon_driver_load,
66 .firstopen = radeon_driver_firstopen, 65 .firstopen = radeon_driver_firstopen,
@@ -69,8 +68,9 @@ static struct drm_driver driver = {
69 .postclose = radeon_driver_postclose, 68 .postclose = radeon_driver_postclose,
70 .lastclose = radeon_driver_lastclose, 69 .lastclose = radeon_driver_lastclose,
71 .unload = radeon_driver_unload, 70 .unload = radeon_driver_unload,
72 .vblank_wait = radeon_driver_vblank_wait, 71 .get_vblank_counter = radeon_get_vblank_counter,
73 .vblank_wait2 = radeon_driver_vblank_wait2, 72 .enable_vblank = radeon_enable_vblank,
73 .disable_vblank = radeon_disable_vblank,
74 .dri_library_name = dri_library_name, 74 .dri_library_name = dri_library_name,
75 .irq_preinstall = radeon_driver_irq_preinstall, 75 .irq_preinstall = radeon_driver_irq_preinstall,
76 .irq_postinstall = radeon_driver_irq_postinstall, 76 .irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 173ae620223a..b791420bd3d9 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -304,6 +304,9 @@ typedef struct drm_radeon_private {
304 304
305 u32 scratch_ages[5]; 305 u32 scratch_ages[5];
306 306
307 unsigned int crtc_last_cnt;
308 unsigned int crtc2_last_cnt;
309
307 /* starting from here on, data is preserved accross an open */ 310 /* starting from here on, data is preserved accross an open */
308 uint32_t flags; /* see radeon_chip_flags */ 311 uint32_t flags; /* see radeon_chip_flags */
309 unsigned long fb_aper_offset; 312 unsigned long fb_aper_offset;
@@ -374,13 +377,13 @@ extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *
374extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 377extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
375 378
376extern void radeon_do_release(struct drm_device * dev); 379extern void radeon_do_release(struct drm_device * dev);
377extern int radeon_driver_vblank_wait(struct drm_device * dev, 380extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
378 unsigned int *sequence); 381extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
379extern int radeon_driver_vblank_wait2(struct drm_device * dev, 382extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
380 unsigned int *sequence); 383extern void radeon_do_release(struct drm_device * dev);
381extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 384extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
382extern void radeon_driver_irq_preinstall(struct drm_device * dev); 385extern void radeon_driver_irq_preinstall(struct drm_device * dev);
383extern void radeon_driver_irq_postinstall(struct drm_device * dev); 386extern int radeon_driver_irq_postinstall(struct drm_device * dev);
384extern void radeon_driver_irq_uninstall(struct drm_device * dev); 387extern void radeon_driver_irq_uninstall(struct drm_device * dev);
385extern int radeon_vblank_crtc_get(struct drm_device *dev); 388extern int radeon_vblank_crtc_get(struct drm_device *dev);
386extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); 389extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
@@ -558,6 +561,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
558 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ 561 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
559 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) 562 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
560 563
564#define RADEON_CRTC_CRNT_FRAME 0x0214
565#define RADEON_CRTC2_CRNT_FRAME 0x0314
566
567#define RADEON_CRTC_STATUS 0x005c
568#define RADEON_CRTC2_STATUS 0x03fc
569
561#define RADEON_GEN_INT_CNTL 0x0040 570#define RADEON_GEN_INT_CNTL 0x0040
562# define RADEON_CRTC_VBLANK_MASK (1 << 0) 571# define RADEON_CRTC_VBLANK_MASK (1 << 0)
563# define RADEON_CRTC2_VBLANK_MASK (1 << 9) 572# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 009af3814b6f..507d6b747a13 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -35,12 +35,61 @@
35#include "radeon_drm.h" 35#include "radeon_drm.h"
36#include "radeon_drv.h" 36#include "radeon_drv.h"
37 37
38static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, 38static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
39 u32 mask)
40{ 39{
41 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; 40 drm_radeon_private_t *dev_priv = dev->dev_private;
41
42 if (state)
43 dev_priv->irq_enable_reg |= mask;
44 else
45 dev_priv->irq_enable_reg &= ~mask;
46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48}
49
50int radeon_enable_vblank(struct drm_device *dev, int crtc)
51{
52 switch (crtc) {
53 case 0:
54 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
55 break;
56 case 1:
57 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
58 break;
59 default:
60 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
61 crtc);
62 return EINVAL;
63 }
64
65 return 0;
66}
67
68void radeon_disable_vblank(struct drm_device *dev, int crtc)
69{
70 switch (crtc) {
71 case 0:
72 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
73 break;
74 case 1:
75 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
76 break;
77 default:
78 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
79 crtc);
80 break;
81 }
82}
83
84static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv)
85{
86 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) &
87 (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
88 RADEON_CRTC2_VBLANK_STAT);
89
42 if (irqs) 90 if (irqs)
43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 91 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
92
44 return irqs; 93 return irqs;
45} 94}
46 95
@@ -72,39 +121,21 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
72 /* Only consider the bits we're interested in - others could be used 121 /* Only consider the bits we're interested in - others could be used
73 * outside the DRM 122 * outside the DRM
74 */ 123 */
75 stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 124 stat = radeon_acknowledge_irqs(dev_priv);
76 RADEON_CRTC_VBLANK_STAT |
77 RADEON_CRTC2_VBLANK_STAT));
78 if (!stat) 125 if (!stat)
79 return IRQ_NONE; 126 return IRQ_NONE;
80 127
81 stat &= dev_priv->irq_enable_reg; 128 stat &= dev_priv->irq_enable_reg;
82 129
83 /* SW interrupt */ 130 /* SW interrupt */
84 if (stat & RADEON_SW_INT_TEST) { 131 if (stat & RADEON_SW_INT_TEST)
85 DRM_WAKEUP(&dev_priv->swi_queue); 132 DRM_WAKEUP(&dev_priv->swi_queue);
86 }
87 133
88 /* VBLANK interrupt */ 134 /* VBLANK interrupt */
89 if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { 135 if (stat & RADEON_CRTC_VBLANK_STAT)
90 int vblank_crtc = dev_priv->vblank_crtc; 136 drm_handle_vblank(dev, 0);
91 137 if (stat & RADEON_CRTC2_VBLANK_STAT)
92 if ((vblank_crtc & 138 drm_handle_vblank(dev, 1);
93 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
94 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
95 if (stat & RADEON_CRTC_VBLANK_STAT)
96 atomic_inc(&dev->vbl_received);
97 if (stat & RADEON_CRTC2_VBLANK_STAT)
98 atomic_inc(&dev->vbl_received2);
99 } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
100 (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
101 ((stat & RADEON_CRTC2_VBLANK_STAT) &&
102 (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
103 atomic_inc(&dev->vbl_received);
104
105 DRM_WAKEUP(&dev->vbl_queue);
106 drm_vbl_send_signals(dev);
107 }
108 139
109 return IRQ_HANDLED; 140 return IRQ_HANDLED;
110} 141}
@@ -144,54 +175,27 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
144 return ret; 175 return ret;
145} 176}
146 177
147static int radeon_driver_vblank_do_wait(struct drm_device * dev, 178u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
148 unsigned int *sequence, int crtc)
149{ 179{
150 drm_radeon_private_t *dev_priv = 180 drm_radeon_private_t *dev_priv = dev->dev_private;
151 (drm_radeon_private_t *) dev->dev_private; 181 u32 crtc_cnt_reg, crtc_status_reg;
152 unsigned int cur_vblank; 182
153 int ret = 0;
154 int ack = 0;
155 atomic_t *counter;
156 if (!dev_priv) { 183 if (!dev_priv) {
157 DRM_ERROR("called with no initialization\n"); 184 DRM_ERROR("called with no initialization\n");
158 return -EINVAL; 185 return -EINVAL;
159 } 186 }
160 187
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { 188 if (crtc == 0) {
162 counter = &dev->vbl_received; 189 crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
163 ack |= RADEON_CRTC_VBLANK_STAT; 190 crtc_status_reg = RADEON_CRTC_STATUS;
164 } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { 191 } else if (crtc == 1) {
165 counter = &dev->vbl_received2; 192 crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
166 ack |= RADEON_CRTC2_VBLANK_STAT; 193 crtc_status_reg = RADEON_CRTC2_STATUS;
167 } else 194 } else {
168 return -EINVAL; 195 return -EINVAL;
196 }
169 197
170 radeon_acknowledge_irqs(dev_priv, ack); 198 return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
171
172 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
173
174 /* Assume that the user has missed the current sequence number
175 * by about a day rather than she wants to wait for years
176 * using vertical blanks...
177 */
178 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
179 (((cur_vblank = atomic_read(counter))
180 - *sequence) <= (1 << 23)));
181
182 *sequence = cur_vblank;
183
184 return ret;
185}
186
187int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
188{
189 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
190}
191
192int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
193{
194 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
195} 199}
196 200
197/* Needs the lock as it touches the ring. 201/* Needs the lock as it touches the ring.
@@ -234,21 +238,6 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
234 return radeon_wait_irq(dev, irqwait->irq_seq); 238 return radeon_wait_irq(dev, irqwait->irq_seq);
235} 239}
236 240
237static void radeon_enable_interrupt(struct drm_device *dev)
238{
239 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
240
241 dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
242 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
243 dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
244
245 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
246 dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
247
248 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
249 dev_priv->irq_enabled = 1;
250}
251
252/* drm_dma.h hooks 241/* drm_dma.h hooks
253*/ 242*/
254void radeon_driver_irq_preinstall(struct drm_device * dev) 243void radeon_driver_irq_preinstall(struct drm_device * dev)
@@ -260,20 +249,27 @@ void radeon_driver_irq_preinstall(struct drm_device * dev)
260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 249 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
261 250
262 /* Clear bits if they're already high */ 251 /* Clear bits if they're already high */
263 radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 252 radeon_acknowledge_irqs(dev_priv);
264 RADEON_CRTC_VBLANK_STAT |
265 RADEON_CRTC2_VBLANK_STAT));
266} 253}
267 254
268void radeon_driver_irq_postinstall(struct drm_device * dev) 255int radeon_driver_irq_postinstall(struct drm_device * dev)
269{ 256{
270 drm_radeon_private_t *dev_priv = 257 drm_radeon_private_t *dev_priv =
271 (drm_radeon_private_t *) dev->dev_private; 258 (drm_radeon_private_t *) dev->dev_private;
259 int ret;
272 260
273 atomic_set(&dev_priv->swi_emitted, 0); 261 atomic_set(&dev_priv->swi_emitted, 0);
274 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 262 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
275 263
276 radeon_enable_interrupt(dev); 264 ret = drm_vblank_init(dev, 2);
265 if (ret)
266 return ret;
267
268 dev->max_vblank_count = 0x001fffff;
269
270 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
271
272 return 0;
277} 273}
278 274
279void radeon_driver_irq_uninstall(struct drm_device * dev) 275void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -315,6 +311,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
315 return -EINVAL; 311 return -EINVAL;
316 } 312 }
317 dev_priv->vblank_crtc = (unsigned int)value; 313 dev_priv->vblank_crtc = (unsigned int)value;
318 radeon_enable_interrupt(dev);
319 return 0; 314 return 0;
320} 315}
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 80c01cdfa37d..37870a4a3dc7 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
40static struct drm_driver driver = { 40static struct drm_driver driver = {
41 .driver_features = 41 .driver_features =
42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 43 DRIVER_IRQ_SHARED,
44 .load = via_driver_load, 44 .load = via_driver_load,
45 .unload = via_driver_unload, 45 .unload = via_driver_unload,
46 .context_dtor = via_final_context, 46 .context_dtor = via_final_context,
47 .vblank_wait = via_driver_vblank_wait, 47 .get_vblank_counter = via_get_vblank_counter,
48 .enable_vblank = via_enable_vblank,
49 .disable_vblank = via_disable_vblank,
48 .irq_preinstall = via_driver_irq_preinstall, 50 .irq_preinstall = via_driver_irq_preinstall,
49 .irq_postinstall = via_driver_irq_postinstall, 51 .irq_postinstall = via_driver_irq_postinstall,
50 .irq_uninstall = via_driver_irq_uninstall, 52 .irq_uninstall = via_driver_irq_uninstall,
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 2daae81874cd..fe67030e39ac 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -75,6 +75,7 @@ typedef struct drm_via_private {
75 struct timeval last_vblank; 75 struct timeval last_vblank;
76 int last_vblank_valid; 76 int last_vblank_valid;
77 unsigned usec_per_vblank; 77 unsigned usec_per_vblank;
78 atomic_t vbl_received;
78 drm_via_state_t hc_state; 79 drm_via_state_t hc_state;
79 char pci_buf[VIA_PCI_BUF_SIZE]; 80 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; 81 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,11 +131,13 @@ extern int via_init_context(struct drm_device * dev, int context);
130extern int via_final_context(struct drm_device * dev, int context); 131extern int via_final_context(struct drm_device * dev, int context);
131 132
132extern int via_do_cleanup_map(struct drm_device * dev); 133extern int via_do_cleanup_map(struct drm_device * dev);
133extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 134extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
135extern int via_enable_vblank(struct drm_device *dev, int crtc);
136extern void via_disable_vblank(struct drm_device *dev, int crtc);
134 137
135extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 138extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
136extern void via_driver_irq_preinstall(struct drm_device * dev); 139extern void via_driver_irq_preinstall(struct drm_device * dev);
137extern void via_driver_irq_postinstall(struct drm_device * dev); 140extern int via_driver_irq_postinstall(struct drm_device * dev);
138extern void via_driver_irq_uninstall(struct drm_device * dev); 141extern void via_driver_irq_uninstall(struct drm_device * dev);
139 142
140extern int via_dma_cleanup(struct drm_device * dev); 143extern int via_dma_cleanup(struct drm_device * dev);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index c6bb978a1106..f1ab6fc7c07e 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -92,8 +92,17 @@ static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
92static unsigned time_diff(struct timeval *now, struct timeval *then) 92static unsigned time_diff(struct timeval *now, struct timeval *then)
93{ 93{
94 return (now->tv_usec >= then->tv_usec) ? 94 return (now->tv_usec >= then->tv_usec) ?
95 now->tv_usec - then->tv_usec : 95 now->tv_usec - then->tv_usec :
96 1000000 - (then->tv_usec - now->tv_usec); 96 1000000 - (then->tv_usec - now->tv_usec);
97}
98
99u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
100{
101 drm_via_private_t *dev_priv = dev->dev_private;
102 if (crtc != 0)
103 return 0;
104
105 return atomic_read(&dev_priv->vbl_received);
97} 106}
98 107
99irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 108irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
108 117
109 status = VIA_READ(VIA_REG_INTERRUPT); 118 status = VIA_READ(VIA_REG_INTERRUPT);
110 if (status & VIA_IRQ_VBLANK_PENDING) { 119 if (status & VIA_IRQ_VBLANK_PENDING) {
111 atomic_inc(&dev->vbl_received); 120 atomic_inc(&dev_priv->vbl_received);
112 if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 121 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
113 do_gettimeofday(&cur_vblank); 122 do_gettimeofday(&cur_vblank);
114 if (dev_priv->last_vblank_valid) { 123 if (dev_priv->last_vblank_valid) {
115 dev_priv->usec_per_vblank = 124 dev_priv->usec_per_vblank =
@@ -119,12 +128,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
119 dev_priv->last_vblank = cur_vblank; 128 dev_priv->last_vblank = cur_vblank;
120 dev_priv->last_vblank_valid = 1; 129 dev_priv->last_vblank_valid = 1;
121 } 130 }
122 if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 131 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
123 DRM_DEBUG("US per vblank is: %u\n", 132 DRM_DEBUG("US per vblank is: %u\n",
124 dev_priv->usec_per_vblank); 133 dev_priv->usec_per_vblank);
125 } 134 }
126 DRM_WAKEUP(&dev->vbl_queue); 135 drm_handle_vblank(dev, 0);
127 drm_vbl_send_signals(dev);
128 handled = 1; 136 handled = 1;
129 } 137 }
130 138
@@ -163,31 +171,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
163 } 171 }
164} 172}
165 173
166int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 174int via_enable_vblank(struct drm_device *dev, int crtc)
167{ 175{
168 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 176 drm_via_private_t *dev_priv = dev->dev_private;
169 unsigned int cur_vblank; 177 u32 status;
170 int ret = 0;
171 178
172 DRM_DEBUG("\n"); 179 if (crtc != 0) {
173 if (!dev_priv) { 180 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
174 DRM_ERROR("called with no initialization\n");
175 return -EINVAL; 181 return -EINVAL;
176 } 182 }
177 183
178 viadrv_acknowledge_irqs(dev_priv); 184 status = VIA_READ(VIA_REG_INTERRUPT);
185 VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
179 186
180 /* Assume that the user has missed the current sequence number 187 VIA_WRITE8(0x83d4, 0x11);
181 * by about a day rather than she wants to wait for years 188 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
182 * using vertical blanks...
183 */
184 189
185 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 190 return 0;
186 (((cur_vblank = atomic_read(&dev->vbl_received)) - 191}
187 *sequence) <= (1 << 23)));
188 192
189 *sequence = cur_vblank; 193void via_disable_vblank(struct drm_device *dev, int crtc)
190 return ret; 194{
195 drm_via_private_t *dev_priv = dev->dev_private;
196
197 VIA_WRITE8(0x83d4, 0x11);
198 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
199
200 if (crtc != 0)
201 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
191} 202}
192 203
193static int 204static int
@@ -292,23 +303,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
292 } 303 }
293} 304}
294 305
295void via_driver_irq_postinstall(struct drm_device * dev) 306int via_driver_irq_postinstall(struct drm_device * dev)
296{ 307{
297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 308 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
298 u32 status; 309 u32 status;
299 310
300 DRM_DEBUG("\n"); 311 DRM_DEBUG("via_driver_irq_postinstall\n");
301 if (dev_priv) { 312 if (!dev_priv)
302 status = VIA_READ(VIA_REG_INTERRUPT); 313 return -EINVAL;
303 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
304 | dev_priv->irq_enable_mask);
305 314
306 /* Some magic, oh for some data sheets ! */ 315 drm_vblank_init(dev, 1);
316 status = VIA_READ(VIA_REG_INTERRUPT);
317 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
318 | dev_priv->irq_enable_mask);
307 319
308 VIA_WRITE8(0x83d4, 0x11); 320 /* Some magic, oh for some data sheets ! */
309 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 321 VIA_WRITE8(0x83d4, 0x11);
322 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
310 323
311 } 324 return 0;
312} 325}
313 326
314void via_driver_irq_uninstall(struct drm_device * dev) 327void via_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 87532034d105..3f9e10001e19 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1031,7 +1031,7 @@ comment "Other IDE chipsets support"
1031comment "Note: most of these also require special kernel boot parameters" 1031comment "Note: most of these also require special kernel boot parameters"
1032 1032
1033config BLK_DEV_4DRIVES 1033config BLK_DEV_4DRIVES
1034 bool "Generic 4 drives/port support" 1034 tristate "Generic 4 drives/port support"
1035 help 1035 help
1036 Certain older chipsets, including the Tekram 690CD, use a single set 1036 Certain older chipsets, including the Tekram 690CD, use a single set
1037 of I/O ports at 0x1f0 to control up to four drives, instead of the 1037 of I/O ports at 0x1f0 to control up to four drives, instead of the
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index ec46c44b061c..713cef20622e 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -21,6 +21,8 @@
21#include <asm/arch/bast-map.h> 21#include <asm/arch/bast-map.h>
22#include <asm/arch/bast-irq.h> 22#include <asm/arch/bast-irq.h>
23 23
24#define DRV_NAME "bast-ide"
25
24static int __init bastide_register(unsigned int base, unsigned int aux, int irq) 26static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
25{ 27{
26 ide_hwif_t *hwif; 28 ide_hwif_t *hwif;
@@ -33,27 +35,23 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
33 base += BAST_IDE_CS; 35 base += BAST_IDE_CS;
34 aux += BAST_IDE_CS; 36 aux += BAST_IDE_CS;
35 37
36 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 38 for (i = 0; i <= 7; i++) {
37 hw.io_ports[i] = (unsigned long)base; 39 hw.io_ports_array[i] = (unsigned long)base;
38 base += 0x20; 40 base += 0x20;
39 } 41 }
40 42
41 hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); 43 hw.io_ports.ctl_addr = aux + (6 * 0x20);
42 hw.irq = irq; 44 hw.irq = irq;
43 45
44 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 46 hwif = ide_find_port();
45 if (hwif == NULL) 47 if (hwif == NULL)
46 goto out; 48 goto out;
47 49
48 i = hwif->index; 50 i = hwif->index;
49 51
50 if (hwif->present) 52 ide_init_port_data(hwif, i);
51 ide_unregister(i);
52 else
53 ide_init_port_data(hwif, i);
54
55 ide_init_port_hw(hwif, &hw); 53 ide_init_port_hw(hwif, &hw);
56 hwif->quirkproc = NULL; 54 hwif->port_ops = NULL;
57 55
58 idx[0] = i; 56 idx[0] = i;
59 57
@@ -64,6 +62,8 @@ out:
64 62
65static int __init bastide_init(void) 63static int __init bastide_init(void)
66{ 64{
65 unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS;
66
67 /* we can treat the VR1000 and the BAST the same */ 67 /* we can treat the VR1000 and the BAST the same */
68 68
69 if (!(machine_is_bast() || machine_is_vr1000())) 69 if (!(machine_is_bast() || machine_is_vr1000()))
@@ -71,6 +71,11 @@ static int __init bastide_init(void)
71 71
72 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n"); 72 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
73 73
74 if (!request_mem_region(base, 0x400000, DRV_NAME)) {
75 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
76 return -EBUSY;
77 }
78
74 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0); 79 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
75 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1); 80 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
76 81
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e816b0ffcfe6..124445c20921 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -191,6 +191,10 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
191 local_irq_restore(flags); 191 local_irq_restore(flags);
192} 192}
193 193
194static const struct ide_port_ops icside_v6_no_dma_port_ops = {
195 .maskproc = icside_maskproc,
196};
197
194#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 198#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
195/* 199/*
196 * SG-DMA support. 200 * SG-DMA support.
@@ -266,6 +270,11 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
266 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); 270 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
267} 271}
268 272
273static const struct ide_port_ops icside_v6_port_ops = {
274 .set_dma_mode = icside_set_dma_mode,
275 .maskproc = icside_maskproc,
276};
277
269static void icside_dma_host_set(ide_drive_t *drive, int on) 278static void icside_dma_host_set(ide_drive_t *drive, int on)
270{ 279{
271} 280}
@@ -375,32 +384,40 @@ static void icside_dma_lost_irq(ide_drive_t *drive)
375 printk(KERN_ERR "%s: IRQ lost\n", drive->name); 384 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
376} 385}
377 386
378static void icside_dma_init(ide_hwif_t *hwif) 387static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
379{ 388{
380 hwif->dmatable_cpu = NULL; 389 hwif->dmatable_cpu = NULL;
381 hwif->dmatable_dma = 0; 390 hwif->dmatable_dma = 0;
382 hwif->set_dma_mode = icside_set_dma_mode; 391
383 392 return 0;
384 hwif->dma_host_set = icside_dma_host_set;
385 hwif->dma_setup = icside_dma_setup;
386 hwif->dma_exec_cmd = icside_dma_exec_cmd;
387 hwif->dma_start = icside_dma_start;
388 hwif->ide_dma_end = icside_dma_end;
389 hwif->ide_dma_test_irq = icside_dma_test_irq;
390 hwif->dma_timeout = icside_dma_timeout;
391 hwif->dma_lost_irq = icside_dma_lost_irq;
392} 393}
394
395static const struct ide_dma_ops icside_v6_dma_ops = {
396 .dma_host_set = icside_dma_host_set,
397 .dma_setup = icside_dma_setup,
398 .dma_exec_cmd = icside_dma_exec_cmd,
399 .dma_start = icside_dma_start,
400 .dma_end = icside_dma_end,
401 .dma_test_irq = icside_dma_test_irq,
402 .dma_timeout = icside_dma_timeout,
403 .dma_lost_irq = icside_dma_lost_irq,
404};
393#else 405#else
394#define icside_dma_init(hwif) (0) 406#define icside_v6_dma_ops NULL
395#endif 407#endif
396 408
409static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
410{
411 return -EOPNOTSUPP;
412}
413
397static ide_hwif_t * 414static ide_hwif_t *
398icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) 415icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec)
399{ 416{
400 unsigned long port = (unsigned long)base + info->dataoffset; 417 unsigned long port = (unsigned long)base + info->dataoffset;
401 ide_hwif_t *hwif; 418 ide_hwif_t *hwif;
402 419
403 hwif = ide_find_port(port); 420 hwif = ide_find_port();
404 if (hwif) { 421 if (hwif) {
405 int i; 422 int i;
406 423
@@ -408,15 +425,14 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
408 * Ensure we're using MMIO 425 * Ensure we're using MMIO
409 */ 426 */
410 default_hwif_mmiops(hwif); 427 default_hwif_mmiops(hwif);
411 hwif->mmio = 1;
412 428
413 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 429 for (i = 0; i <= 7; i++) {
414 hwif->io_ports[i] = port; 430 hwif->io_ports_array[i] = port;
415 port += 1 << info->stepping; 431 port += 1 << info->stepping;
416 } 432 }
417 hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset; 433 hwif->io_ports.ctl_addr =
434 (unsigned long)base + info->ctrloffset;
418 hwif->irq = ec->irq; 435 hwif->irq = ec->irq;
419 hwif->noprobe = 0;
420 hwif->chipset = ide_acorn; 436 hwif->chipset = ide_acorn;
421 hwif->gendev.parent = &ec->dev; 437 hwif->gendev.parent = &ec->dev;
422 hwif->dev = &ec->dev; 438 hwif->dev = &ec->dev;
@@ -462,9 +478,10 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
462} 478}
463 479
464static const struct ide_port_info icside_v6_port_info __initdata = { 480static const struct ide_port_info icside_v6_port_info __initdata = {
465 .host_flags = IDE_HFLAG_SERIALIZE | 481 .init_dma = icside_dma_off_init,
466 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */ 482 .port_ops = &icside_v6_no_dma_port_ops,
467 IDE_HFLAG_NO_AUTOTUNE, 483 .dma_ops = &icside_v6_dma_ops,
484 .host_flags = IDE_HFLAG_SERIALIZE,
468 .mwdma_mask = ATA_MWDMA2, 485 .mwdma_mask = ATA_MWDMA2,
469 .swdma_mask = ATA_SWDMA2, 486 .swdma_mask = ATA_SWDMA2,
470}; 487};
@@ -526,21 +543,19 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
526 state->hwif[0] = hwif; 543 state->hwif[0] = hwif;
527 state->hwif[1] = mate; 544 state->hwif[1] = mate;
528 545
529 hwif->maskproc = icside_maskproc;
530 hwif->hwif_data = state; 546 hwif->hwif_data = state;
531 hwif->config_data = (unsigned long)ioc_base; 547 hwif->config_data = (unsigned long)ioc_base;
532 hwif->select_data = sel; 548 hwif->select_data = sel;
533 549
534 mate->maskproc = icside_maskproc;
535 mate->hwif_data = state; 550 mate->hwif_data = state;
536 mate->config_data = (unsigned long)ioc_base; 551 mate->config_data = (unsigned long)ioc_base;
537 mate->select_data = sel | 1; 552 mate->select_data = sel | 1;
538 553
539 if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { 554 if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
540 icside_dma_init(hwif); 555 d.init_dma = icside_dma_init;
541 icside_dma_init(mate); 556 d.port_ops = &icside_v6_port_ops;
542 } else 557 d.dma_ops = NULL;
543 d.mwdma_mask = d.swdma_mask = 0; 558 }
544 559
545 idx[0] = hwif->index; 560 idx[0] = hwif->index;
546 idx[1] = mate->index; 561 idx[1] = mate->index;
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index be9ff7334c52..4263ffd4ab20 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -14,6 +14,8 @@
14#include <asm/mach-types.h> 14#include <asm/mach-types.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16 16
17#define DRV_NAME "ide_arm"
18
17#ifdef CONFIG_ARCH_CLPS7500 19#ifdef CONFIG_ARCH_CLPS7500
18# include <asm/arch/hardware.h> 20# include <asm/arch/hardware.h>
19# 21#
@@ -28,13 +30,27 @@ static int __init ide_arm_init(void)
28{ 30{
29 ide_hwif_t *hwif; 31 ide_hwif_t *hwif;
30 hw_regs_t hw; 32 hw_regs_t hw;
33 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
31 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 34 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
32 35
36 if (!request_region(base, 8, DRV_NAME)) {
37 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
38 DRV_NAME, base, base + 7);
39 return -EBUSY;
40 }
41
42 if (!request_region(ctl, 1, DRV_NAME)) {
43 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
44 DRV_NAME, ctl);
45 release_region(base, 8);
46 return -EBUSY;
47 }
48
33 memset(&hw, 0, sizeof(hw)); 49 memset(&hw, 0, sizeof(hw));
34 ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); 50 ide_std_init_ports(&hw, base, ctl);
35 hw.irq = IDE_ARM_IRQ; 51 hw.irq = IDE_ARM_IRQ;
36 52
37 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 53 hwif = ide_find_port();
38 if (hwif) { 54 if (hwif) {
39 ide_init_port_hw(hwif, &hw); 55 ide_init_port_hw(hwif, &hw);
40 idx[0] = hwif->index; 56 idx[0] = hwif->index;
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 420fcb78a7cd..aaf32541622d 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -96,11 +96,11 @@ static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
96 u16 val16; 96 u16 val16;
97 97
98 /* DMA Data Setup */ 98 /* DMA Data Setup */
99 t0 = (palm_bk3710_udmatimings[mode].cycletime + ide_palm_clk - 1) 99 t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
100 / ide_palm_clk - 1; 100 ide_palm_clk) - 1;
101 tenv = (20 + ide_palm_clk - 1) / ide_palm_clk - 1; 101 tenv = DIV_ROUND_UP(20, ide_palm_clk) - 1;
102 trp = (palm_bk3710_udmatimings[mode].rptime + ide_palm_clk - 1) 102 trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
103 / ide_palm_clk - 1; 103 ide_palm_clk) - 1;
104 104
105 /* udmatim Register */ 105 /* udmatim Register */
106 val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0); 106 val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
@@ -141,8 +141,8 @@ static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
141 cycletime = max_t(int, t->cycle, min_cycle); 141 cycletime = max_t(int, t->cycle, min_cycle);
142 142
143 /* DMA Data Setup */ 143 /* DMA Data Setup */
144 t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk; 144 t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
145 td = (t->active + ide_palm_clk - 1) / ide_palm_clk; 145 td = DIV_ROUND_UP(t->active, ide_palm_clk);
146 tkw = t0 - td - 1; 146 tkw = t0 - td - 1;
147 td -= 1; 147 td -= 1;
148 148
@@ -168,9 +168,9 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
168 struct ide_timing *t; 168 struct ide_timing *t;
169 169
170 /* PIO Data Setup */ 170 /* PIO Data Setup */
171 t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk; 171 t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
172 t2 = (ide_timing_find_mode(XFER_PIO_0 + mode)->active + 172 t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
173 ide_palm_clk - 1) / ide_palm_clk; 173 ide_palm_clk);
174 174
175 t2i = t0 - t2 - 1; 175 t2i = t0 - t2 - 1;
176 t2 -= 1; 176 t2 -= 1;
@@ -192,8 +192,8 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
192 192
193 /* TASKFILE Setup */ 193 /* TASKFILE Setup */
194 t = ide_timing_find_mode(XFER_PIO_0 + mode); 194 t = ide_timing_find_mode(XFER_PIO_0 + mode);
195 t0 = (t->cyc8b + ide_palm_clk - 1) / ide_palm_clk; 195 t0 = DIV_ROUND_UP(t->cyc8b, ide_palm_clk);
196 t2 = (t->act8b + ide_palm_clk - 1) / ide_palm_clk; 196 t2 = DIV_ROUND_UP(t->act8b, ide_palm_clk);
197 197
198 t2i = t0 - t2 - 1; 198 t2i = t0 - t2 - 1;
199 t2 -= 1; 199 t2 -= 1;
@@ -317,17 +317,31 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
317 return ATA_CBL_PATA80; 317 return ATA_CBL_PATA80;
318} 318}
319 319
320static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif) 320static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
321 const struct ide_port_info *d)
321{ 322{
322 hwif->set_pio_mode = palm_bk3710_set_pio_mode; 323 unsigned long base =
323 hwif->set_dma_mode = palm_bk3710_set_dma_mode; 324 hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
324 325
325 hwif->cable_detect = palm_bk3710_cable_detect; 326 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
327
328 if (ide_allocate_dma_engine(hwif))
329 return -1;
330
331 ide_setup_dma(hwif, base);
332
333 return 0;
326} 334}
327 335
336static const struct ide_port_ops palm_bk3710_ports_ops = {
337 .set_pio_mode = palm_bk3710_set_pio_mode,
338 .set_dma_mode = palm_bk3710_set_dma_mode,
339 .cable_detect = palm_bk3710_cable_detect,
340};
341
328static const struct ide_port_info __devinitdata palm_bk3710_port_info = { 342static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
329 .init_hwif = palm_bk3710_init_hwif, 343 .init_dma = palm_bk3710_init_dma,
330 .host_flags = IDE_HFLAG_NO_DMA, /* hack (no PCI) */ 344 .port_ops = &palm_bk3710_ports_ops,
331 .pio_mask = ATA_PIO4, 345 .pio_mask = ATA_PIO4,
332 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */ 346 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
333 .mwdma_mask = ATA_MWDMA2, 347 .mwdma_mask = ATA_MWDMA2,
@@ -372,30 +386,24 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
372 386
373 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET; 387 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
374 for (i = 0; i < IDE_NR_PORTS - 2; i++) 388 for (i = 0; i < IDE_NR_PORTS - 2; i++)
375 hw.io_ports[i] = pribase + i; 389 hw.io_ports_array[i] = pribase + i;
376 hw.io_ports[IDE_CONTROL_OFFSET] = mem->start + 390 hw.io_ports.ctl_addr = mem->start +
377 IDE_PALM_ATA_PRI_CTL_OFFSET; 391 IDE_PALM_ATA_PRI_CTL_OFFSET;
378 hw.irq = irq->start; 392 hw.irq = irq->start;
379 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
380 394
381 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 395 hwif = ide_find_port();
382 if (hwif == NULL) 396 if (hwif == NULL)
383 goto out; 397 goto out;
384 398
385 i = hwif->index; 399 i = hwif->index;
386 400
387 if (hwif->present) 401 ide_init_port_data(hwif, i);
388 ide_unregister(i);
389 else
390 ide_init_port_data(hwif, i);
391
392 ide_init_port_hw(hwif, &hw); 402 ide_init_port_hw(hwif, &hw);
393 403
394 hwif->mmio = 1; 404 hwif->mmio = 1;
395 default_hwif_mmiops(hwif); 405 default_hwif_mmiops(hwif);
396 406
397 ide_setup_dma(hwif, mem->start);
398
399 idx[0] = i; 407 idx[0] = i;
400 408
401 ide_device_add(idx, &palm_bk3710_port_info); 409 ide_device_add(idx, &palm_bk3710_port_info);
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index b30adcf321c3..babc1a5e128d 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
17 unsigned long port = (unsigned long)base; 17 unsigned long port = (unsigned long)base;
18 int i; 18 int i;
19 19
20 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 20 for (i = 0; i <= 7; i++) {
21 hw->io_ports[i] = port; 21 hw->io_ports_array[i] = port;
22 port += sz; 22 port += sz;
23 } 23 }
24 hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; 24 hw->io_ports.ctl_addr = (unsigned long)ctrl;
25 hw->irq = irq; 25 hw->irq = irq;
26} 26}
27 27
@@ -44,7 +44,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
44 goto release; 44 goto release;
45 } 45 }
46 46
47 hwif = ide_find_port((unsigned long)base); 47 hwif = ide_find_port();
48 if (hwif) { 48 if (hwif) {
49 memset(&hw, 0, sizeof(hw)); 49 memset(&hw, 0, sizeof(hw));
50 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); 50 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
@@ -53,7 +53,6 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 53
54 ide_init_port_hw(hwif, &hw); 54 ide_init_port_hw(hwif, &hw);
55 55
56 hwif->mmio = 1;
57 default_hwif_mmiops(hwif); 56 default_hwif_mmiops(hwif);
58 57
59 idx[0] = hwif->index; 58 idx[0] = hwif->index;
@@ -76,7 +75,7 @@ static void __devexit rapide_remove(struct expansion_card *ec)
76 75
77 ecard_set_drvdata(ec, NULL); 76 ecard_set_drvdata(ec, NULL);
78 77
79 ide_unregister(hwif->index); 78 ide_unregister(hwif);
80 79
81 ecard_release_resources(ec); 80 ecard_release_resources(ec);
82} 81}
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 31266d278095..9df26855bc05 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -88,8 +88,8 @@ enum /* Transfer types */
88int 88int
89cris_ide_ack_intr(ide_hwif_t* hwif) 89cris_ide_ack_intr(ide_hwif_t* hwif)
90{ 90{
91 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, 91 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
92 int, hwif->io_ports[0]); 92 hwif->io_ports.data_addr);
93 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel); 93 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
94 return 1; 94 return 1;
95} 95}
@@ -231,7 +231,7 @@ cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,
231 ide_hwif_t *hwif = drive->hwif; 231 ide_hwif_t *hwif = drive->hwif;
232 232
233 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, 233 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
234 hwif->io_ports[IDE_DATA_OFFSET]); 234 hwif->io_ports.data_addr);
235 reg_ata_rw_trf_cnt trf_cnt = {0}; 235 reg_ata_rw_trf_cnt trf_cnt = {0};
236 236
237 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d); 237 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
@@ -271,7 +271,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
271 int intr = REG_RD_INT(ata, regi_ata, r_intr); 271 int intr = REG_RD_INT(ata, regi_ata, r_intr);
272 272
273 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, 273 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
274 hwif->io_ports[IDE_DATA_OFFSET]); 274 hwif->io_ports.data_addr);
275 275
276 return intr & (1 << ctrl2.sel) ? 1 : 0; 276 return intr & (1 << ctrl2.sel) ? 1 : 0;
277} 277}
@@ -531,7 +531,7 @@ static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int d
531 *R_ATA_CTRL_DATA = 531 *R_ATA_CTRL_DATA =
532 cmd | 532 cmd |
533 IO_FIELD(R_ATA_CTRL_DATA, data, 533 IO_FIELD(R_ATA_CTRL_DATA, data,
534 drive->hwif->io_ports[IDE_DATA_OFFSET]) | 534 drive->hwif->io_ports.data_addr) |
535 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | 535 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
536 IO_STATE(R_ATA_CTRL_DATA, multi, on) | 536 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
537 IO_STATE(R_ATA_CTRL_DATA, dma_size, word); 537 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
@@ -550,7 +550,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
550{ 550{
551 int intr = *R_IRQ_MASK0_RD; 551 int intr = *R_IRQ_MASK0_RD;
552 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, 552 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
553 drive->hwif->io_ports[IDE_DATA_OFFSET]); 553 drive->hwif->io_ports.data_addr);
554 554
555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0; 555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
556} 556}
@@ -644,7 +644,7 @@ cris_ide_inw(unsigned long reg) {
644 * call will also timeout on busy, but as long as the 644 * call will also timeout on busy, but as long as the
645 * write is still performed, everything will be fine. 645 * write is still performed, everything will be fine.
646 */ 646 */
647 if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET) 647 if (cris_ide_get_reg(reg) == 7)
648 return BUSY_STAT; 648 return BUSY_STAT;
649 else 649 else
650 /* For other rare cases we assume 0 is good enough. */ 650 /* For other rare cases we assume 0 is good enough. */
@@ -673,11 +673,6 @@ cris_ide_inb(unsigned long reg)
673 return (unsigned char)cris_ide_inw(reg); 673 return (unsigned char)cris_ide_inw(reg);
674} 674}
675 675
676static int cris_dma_end (ide_drive_t *drive);
677static int cris_dma_setup (ide_drive_t *drive);
678static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
679static int cris_dma_test_irq(ide_drive_t *drive);
680static void cris_dma_start(ide_drive_t *drive);
681static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int); 676static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
682static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int); 677static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
683static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); 678static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
@@ -770,20 +765,29 @@ static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
770 memset(hw, 0, sizeof(*hw)); 765 memset(hw, 0, sizeof(*hw));
771 766
772 for (i = 0; i <= 7; i++) 767 for (i = 0; i <= 7; i++)
773 hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1); 768 hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
774 769
775 /* 770 /*
776 * the IDE control register is at ATA address 6, 771 * the IDE control register is at ATA address 6,
777 * with CS1 active instead of CS0 772 * with CS1 active instead of CS0
778 */ 773 */
779 hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0); 774 hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
780 775
781 hw->irq = ide_default_irq(0); 776 hw->irq = ide_default_irq(0);
782 hw->ack_intr = cris_ide_ack_intr; 777 hw->ack_intr = cris_ide_ack_intr;
783} 778}
784 779
780static const struct ide_port_ops cris_port_ops = {
781 .set_pio_mode = cris_set_pio_mode,
782 .set_dma_mode = cris_set_dma_mode,
783};
784
785static const struct ide_dma_ops cris_dma_ops;
786
785static const struct ide_port_info cris_port_info __initdata = { 787static const struct ide_port_info cris_port_info __initdata = {
786 .chipset = ide_etrax100, 788 .chipset = ide_etrax100,
789 .port_ops = &cris_port_ops,
790 .dma_ops = &cris_dma_ops,
787 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 791 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
788 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */ 792 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
789 .pio_mask = ATA_PIO4, 793 .pio_mask = ATA_PIO4,
@@ -804,24 +808,16 @@ static int __init init_e100_ide(void)
804 808
805 cris_setup_ports(&hw, cris_ide_base_address(h)); 809 cris_setup_ports(&hw, cris_ide_base_address(h));
806 810
807 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 811 hwif = ide_find_port();
808 if (hwif == NULL) 812 if (hwif == NULL)
809 continue; 813 continue;
810 ide_init_port_data(hwif, hwif->index); 814 ide_init_port_data(hwif, hwif->index);
811 ide_init_port_hw(hwif, &hw); 815 ide_init_port_hw(hwif, &hw);
812 hwif->mmio = 1; 816
813 hwif->set_pio_mode = &cris_set_pio_mode;
814 hwif->set_dma_mode = &cris_set_dma_mode;
815 hwif->ata_input_data = &cris_ide_input_data; 817 hwif->ata_input_data = &cris_ide_input_data;
816 hwif->ata_output_data = &cris_ide_output_data; 818 hwif->ata_output_data = &cris_ide_output_data;
817 hwif->atapi_input_bytes = &cris_atapi_input_bytes; 819 hwif->atapi_input_bytes = &cris_atapi_input_bytes;
818 hwif->atapi_output_bytes = &cris_atapi_output_bytes; 820 hwif->atapi_output_bytes = &cris_atapi_output_bytes;
819 hwif->dma_host_set = &cris_dma_host_set;
820 hwif->ide_dma_end = &cris_dma_end;
821 hwif->dma_setup = &cris_dma_setup;
822 hwif->dma_exec_cmd = &cris_dma_exec_cmd;
823 hwif->ide_dma_test_irq = &cris_dma_test_irq;
824 hwif->dma_start = &cris_dma_start;
825 hwif->OUTB = &cris_ide_outb; 821 hwif->OUTB = &cris_ide_outb;
826 hwif->OUTW = &cris_ide_outw; 822 hwif->OUTW = &cris_ide_outw;
827 hwif->OUTBSYNC = &cris_ide_outbsync; 823 hwif->OUTBSYNC = &cris_ide_outbsync;
@@ -1076,6 +1072,15 @@ static void cris_dma_start(ide_drive_t *drive)
1076 } 1072 }
1077} 1073}
1078 1074
1075static const struct ide_dma_ops cris_dma_ops = {
1076 .dma_host_set = cris_dma_host_set,
1077 .dma_setup = cris_dma_setup,
1078 .dma_exec_cmd = cris_dma_exec_cmd,
1079 .dma_start = cris_dma_start,
1080 .dma_end = cris_dma_end,
1081 .dma_test_irq = cris_dma_test_irq,
1082};
1083
1079module_init(init_e100_ide); 1084module_init(init_e100_ide);
1080 1085
1081MODULE_LICENSE("GPL"); 1086MODULE_LICENSE("GPL");
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 4108ec4ffa7f..fd23f12e17aa 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -63,9 +63,9 @@ static inline void hw_setup(hw_regs_t *hw)
63 int i; 63 int i;
64 64
65 memset(hw, 0, sizeof(hw_regs_t)); 65 memset(hw, 0, sizeof(hw_regs_t));
66 for (i = 0; i <= IDE_STATUS_OFFSET; i++) 66 for (i = 0; i <= 7; i++)
67 hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; 67 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
68 hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT; 68 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
69 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; 69 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
70 hw->chipset = ide_generic; 70 hw->chipset = ide_generic;
71} 71}
@@ -74,7 +74,6 @@ static inline void hwif_setup(ide_hwif_t *hwif)
74{ 74{
75 default_hwif_iops(hwif); 75 default_hwif_iops(hwif);
76 76
77 hwif->mmio = 1;
78 hwif->OUTW = mm_outw; 77 hwif->OUTW = mm_outw;
79 hwif->OUTSW = mm_outsw; 78 hwif->OUTSW = mm_outsw;
80 hwif->INW = mm_inw; 79 hwif->INW = mm_inw;
@@ -99,8 +98,7 @@ static int __init h8300_ide_init(void)
99 98
100 hw_setup(&hw); 99 hw_setup(&hw);
101 100
102 /* register if */ 101 hwif = ide_find_port();
103 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
104 if (hwif == NULL) { 102 if (hwif == NULL) {
105 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); 103 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
106 return -ENOENT; 104 return -ENOENT;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 0f6fb6b72dd9..9d3601fa5680 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -55,14 +55,22 @@ struct ide_acpi_hwif_link {
55/* note: adds function name and KERN_DEBUG */ 55/* note: adds function name and KERN_DEBUG */
56#ifdef DEBUGGING 56#ifdef DEBUGGING
57#define DEBPRINT(fmt, args...) \ 57#define DEBPRINT(fmt, args...) \
58 printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args) 58 printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
59#else 59#else
60#define DEBPRINT(fmt, args...) do {} while (0) 60#define DEBPRINT(fmt, args...) do {} while (0)
61#endif /* DEBUGGING */ 61#endif /* DEBUGGING */
62 62
63extern int ide_noacpi; 63int ide_noacpi;
64extern int ide_noacpitfs; 64module_param_named(noacpi, ide_noacpi, bool, 0);
65extern int ide_noacpionboot; 65MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
66
67int ide_acpigtf;
68module_param_named(acpigtf, ide_acpigtf, bool, 0);
69MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
70
71int ide_acpionboot;
72module_param_named(acpionboot, ide_acpionboot, bool, 0);
73MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
66 74
67static bool ide_noacpi_psx; 75static bool ide_noacpi_psx;
68static int no_acpi_psx(const struct dmi_system_id *id) 76static int no_acpi_psx(const struct dmi_system_id *id)
@@ -309,7 +317,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
309 if (ACPI_FAILURE(status)) { 317 if (ACPI_FAILURE(status)) {
310 printk(KERN_DEBUG 318 printk(KERN_DEBUG
311 "%s: Run _GTF error: status = 0x%x\n", 319 "%s: Run _GTF error: status = 0x%x\n",
312 __FUNCTION__, status); 320 __func__, status);
313 goto out; 321 goto out;
314 } 322 }
315 323
@@ -335,7 +343,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
335 out_obj->buffer.length % REGS_PER_GTF) { 343 out_obj->buffer.length % REGS_PER_GTF) {
336 printk(KERN_ERR 344 printk(KERN_ERR
337 "%s: unexpected GTF length (%d) or addr (0x%p)\n", 345 "%s: unexpected GTF length (%d) or addr (0x%p)\n",
338 __FUNCTION__, out_obj->buffer.length, 346 __func__, out_obj->buffer.length,
339 out_obj->buffer.pointer); 347 out_obj->buffer.pointer);
340 err = -ENOENT; 348 err = -ENOENT;
341 kfree(output.pointer); 349 kfree(output.pointer);
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
376 memcpy(&args.tf_array[7], &gtf->tfa, 7); 384 memcpy(&args.tf_array[7], &gtf->tfa, 7);
377 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 385 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
378 386
379 if (ide_noacpitfs) { 387 if (!ide_acpigtf) {
380 DEBPRINT("_GTF execution disabled\n"); 388 DEBPRINT("_GTF execution disabled\n");
381 return err; 389 return err;
382 } 390 }
@@ -384,7 +392,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
384 err = ide_no_data_taskfile(drive, &args); 392 err = ide_no_data_taskfile(drive, &args);
385 if (err) 393 if (err)
386 printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", 394 printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
387 __FUNCTION__, err); 395 __func__, err);
388 396
389 return err; 397 return err;
390} 398}
@@ -422,7 +430,7 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
422 430
423 if (gtf_length % REGS_PER_GTF) { 431 if (gtf_length % REGS_PER_GTF) {
424 printk(KERN_ERR "%s: unexpected GTF length (%d)\n", 432 printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
425 __FUNCTION__, gtf_length); 433 __func__, gtf_length);
426 goto out; 434 goto out;
427 } 435 }
428 436
@@ -547,7 +555,7 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
547 printk(KERN_ERR 555 printk(KERN_ERR
548 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or " 556 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
549 "addr (0x%p)\n", 557 "addr (0x%p)\n",
550 __FUNCTION__, out_obj->buffer.length, 558 __func__, out_obj->buffer.length,
551 sizeof(struct GTM_buffer), out_obj->buffer.pointer); 559 sizeof(struct GTM_buffer), out_obj->buffer.pointer);
552 return; 560 return;
553 } 561 }
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
721 drive->name, err); 729 drive->name, err);
722 } 730 }
723 731
724 if (ide_noacpionboot) { 732 if (!ide_acpionboot) {
725 DEBPRINT("ACPI methods disabled on boot\n"); 733 DEBPRINT("ACPI methods disabled on boot\n");
726 return; 734 return;
727 } 735 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fe5aefbf8339..b34fd2bde96f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -13,8 +13,8 @@
13 * 13 *
14 * Suggestions are welcome. Patches that work are more welcome though. ;-) 14 * Suggestions are welcome. Patches that work are more welcome though. ;-)
15 * For those wishing to work on this driver, please be sure you download 15 * For those wishing to work on this driver, please be sure you download
16 * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI 16 * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI
17 * (SFF-8020i rev 2.6) standards. These documents can be obtained by 17 * (SFF-8020i rev 2.6) standards. These documents can be obtained by
18 * anonymous ftp from: 18 * anonymous ftp from:
19 * ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps 19 * ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps
20 * ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf 20 * ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf
@@ -39,19 +39,20 @@
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/bcd.h> 40#include <linux/bcd.h>
41 41
42#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */ 42/* For SCSI -> ATAPI command conversion */
43#include <scsi/scsi.h>
43 44
44#include <asm/irq.h> 45#include <linux/irq.h>
45#include <asm/io.h> 46#include <linux/io.h>
46#include <asm/byteorder.h> 47#include <asm/byteorder.h>
47#include <asm/uaccess.h> 48#include <linux/uaccess.h>
48#include <asm/unaligned.h> 49#include <asm/unaligned.h>
49 50
50#include "ide-cd.h" 51#include "ide-cd.h"
51 52
52static DEFINE_MUTEX(idecd_ref_mutex); 53static DEFINE_MUTEX(idecd_ref_mutex);
53 54
54#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) 55#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref)
55 56
56#define ide_cd_g(disk) \ 57#define ide_cd_g(disk) \
57 container_of((disk)->private_data, struct cdrom_info, driver) 58 container_of((disk)->private_data, struct cdrom_info, driver)
@@ -77,19 +78,17 @@ static void ide_cd_put(struct cdrom_info *cd)
77 mutex_unlock(&idecd_ref_mutex); 78 mutex_unlock(&idecd_ref_mutex);
78} 79}
79 80
80/**************************************************************************** 81/*
81 * Generic packet command support and error handling routines. 82 * Generic packet command support and error handling routines.
82 */ 83 */
83 84
84/* Mark that we've seen a media change, and invalidate our internal 85/* Mark that we've seen a media change and invalidate our internal buffers. */
85 buffers. */ 86static void cdrom_saw_media_change(ide_drive_t *drive)
86static void cdrom_saw_media_change (ide_drive_t *drive)
87{ 87{
88 struct cdrom_info *cd = drive->driver_data; 88 struct cdrom_info *cd = drive->driver_data;
89 89
90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED; 90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; 91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
92 cd->nsectors_buffered = 0;
93} 92}
94 93
95static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, 94static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -101,44 +100,43 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
101 return 0; 100 return 0;
102 101
103 switch (sense->sense_key) { 102 switch (sense->sense_key) {
104 case NO_SENSE: case RECOVERED_ERROR: 103 case NO_SENSE:
105 break; 104 case RECOVERED_ERROR:
106 case NOT_READY: 105 break;
107 /* 106 case NOT_READY:
108 * don't care about tray state messages for 107 /*
109 * e.g. capacity commands or in-progress or 108 * don't care about tray state messages for e.g. capacity
110 * becoming ready 109 * commands or in-progress or becoming ready
111 */ 110 */
112 if (sense->asc == 0x3a || sense->asc == 0x04) 111 if (sense->asc == 0x3a || sense->asc == 0x04)
113 break;
114 log = 1;
115 break;
116 case ILLEGAL_REQUEST:
117 /*
118 * don't log START_STOP unit with LoEj set, since
119 * we cannot reliably check if drive can auto-close
120 */
121 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
122 break;
123 log = 1;
124 break;
125 case UNIT_ATTENTION:
126 /*
127 * Make good and sure we've seen this potential media
128 * change. Some drives (i.e. Creative) fail to present
129 * the correct sense key in the error register.
130 */
131 cdrom_saw_media_change(drive);
132 break; 112 break;
133 default: 113 log = 1;
134 log = 1; 114 break;
115 case ILLEGAL_REQUEST:
116 /*
117 * don't log START_STOP unit with LoEj set, since we cannot
118 * reliably check if drive can auto-close
119 */
120 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
135 break; 121 break;
122 log = 1;
123 break;
124 case UNIT_ATTENTION:
125 /*
126 * Make good and sure we've seen this potential media change.
127 * Some drives (i.e. Creative) fail to present the correct sense
128 * key in the error register.
129 */
130 cdrom_saw_media_change(drive);
131 break;
132 default:
133 log = 1;
134 break;
136 } 135 }
137 return log; 136 return log;
138} 137}
139 138
140static 139static void cdrom_analyze_sense_data(ide_drive_t *drive,
141void cdrom_analyze_sense_data(ide_drive_t *drive,
142 struct request *failed_command, 140 struct request *failed_command,
143 struct request_sense *sense) 141 struct request_sense *sense)
144{ 142{
@@ -151,16 +149,17 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
151 return; 149 return;
152 150
153 /* 151 /*
154 * If a read toc is executed for a CD-R or CD-RW medium where 152 * If a read toc is executed for a CD-R or CD-RW medium where the first
155 * the first toc has not been recorded yet, it will fail with 153 * toc has not been recorded yet, it will fail with 05/24/00 (which is a
156 * 05/24/00 (which is a confusing error) 154 * confusing error)
157 */ 155 */
158 if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) 156 if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
159 if (sense->sense_key == 0x05 && sense->asc == 0x24) 157 if (sense->sense_key == 0x05 && sense->asc == 0x24)
160 return; 158 return;
161 159
162 if (sense->error_code == 0x70) { /* Current Error */ 160 /* current error */
163 switch(sense->sense_key) { 161 if (sense->error_code == 0x70) {
162 switch (sense->sense_key) {
164 case MEDIUM_ERROR: 163 case MEDIUM_ERROR:
165 case VOLUME_OVERFLOW: 164 case VOLUME_OVERFLOW:
166 case ILLEGAL_REQUEST: 165 case ILLEGAL_REQUEST:
@@ -178,25 +177,23 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
178 if (bio_sectors < 4) 177 if (bio_sectors < 4)
179 bio_sectors = 4; 178 bio_sectors = 4;
180 if (drive->queue->hardsect_size == 2048) 179 if (drive->queue->hardsect_size == 2048)
181 sector <<= 2; /* Device sector size is 2K */ 180 /* device sector size is 2K */
182 sector &= ~(bio_sectors -1); 181 sector <<= 2;
182 sector &= ~(bio_sectors - 1);
183 valid = (sector - failed_command->sector) << 9; 183 valid = (sector - failed_command->sector) << 9;
184 184
185 if (valid < 0) 185 if (valid < 0)
186 valid = 0; 186 valid = 0;
187 if (sector < get_capacity(info->disk) && 187 if (sector < get_capacity(info->disk) &&
188 drive->probed_capacity - sector < 4 * 75) { 188 drive->probed_capacity - sector < 4 * 75)
189 set_capacity(info->disk, sector); 189 set_capacity(info->disk, sector);
190 } 190 }
191 } 191 }
192 }
193 192
194 ide_cd_log_error(drive->name, failed_command, sense); 193 ide_cd_log_error(drive->name, failed_command, sense);
195} 194}
196 195
197/* 196/* Initialize a ide-cd packet command request */
198 * Initialize a ide-cd packet command request
199 */
200void ide_cd_init_rq(ide_drive_t *drive, struct request *rq) 197void ide_cd_init_rq(ide_drive_t *drive, struct request *rq)
201{ 198{
202 struct cdrom_info *cd = drive->driver_data; 199 struct cdrom_info *cd = drive->driver_data;
@@ -220,7 +217,8 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
220 217
221 rq->data = sense; 218 rq->data = sense;
222 rq->cmd[0] = GPCMD_REQUEST_SENSE; 219 rq->cmd[0] = GPCMD_REQUEST_SENSE;
223 rq->cmd[4] = rq->data_len = 18; 220 rq->cmd[4] = 18;
221 rq->data_len = 18;
224 222
225 rq->cmd_type = REQ_TYPE_SENSE; 223 rq->cmd_type = REQ_TYPE_SENSE;
226 224
@@ -230,7 +228,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
230 (void) ide_do_drive_cmd(drive, rq, ide_preempt); 228 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
231} 229}
232 230
233static void cdrom_end_request (ide_drive_t *drive, int uptodate) 231static void cdrom_end_request(ide_drive_t *drive, int uptodate)
234{ 232{
235 struct request *rq = HWGROUP(drive)->rq; 233 struct request *rq = HWGROUP(drive)->rq;
236 int nsectors = rq->hard_cur_sectors; 234 int nsectors = rq->hard_cur_sectors;
@@ -252,7 +250,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
252 } 250 }
253 cdrom_analyze_sense_data(drive, failed, sense); 251 cdrom_analyze_sense_data(drive, failed, sense);
254 /* 252 /*
255 * now end failed request 253 * now end the failed request
256 */ 254 */
257 if (blk_fs_request(failed)) { 255 if (blk_fs_request(failed)) {
258 if (ide_end_dequeued_request(drive, failed, 0, 256 if (ide_end_dequeued_request(drive, failed, 0,
@@ -280,21 +278,24 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
280 ide_end_request(drive, uptodate, nsectors); 278 ide_end_request(drive, uptodate, nsectors);
281} 279}
282 280
283static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 stat) 281static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
284{ 282{
285 if (stat & 0x80) 283 if (st & 0x80)
286 return; 284 return;
287 ide_dump_status(drive, msg, stat); 285 ide_dump_status(drive, msg, st);
288} 286}
289 287
290/* Returns 0 if the request should be continued. 288/*
291 Returns 1 if the request was ended. */ 289 * Returns:
290 * 0: if the request should be continued.
291 * 1: if the request was ended.
292 */
292static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 293static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
293{ 294{
294 struct request *rq = HWGROUP(drive)->rq; 295 struct request *rq = HWGROUP(drive)->rq;
295 int stat, err, sense_key; 296 int stat, err, sense_key;
296 297
297 /* Check for errors. */ 298 /* check for errors */
298 stat = ide_read_status(drive); 299 stat = ide_read_status(drive);
299 300
300 if (stat_ret) 301 if (stat_ret)
@@ -303,20 +304,22 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
303 if (OK_STAT(stat, good_stat, BAD_R_STAT)) 304 if (OK_STAT(stat, good_stat, BAD_R_STAT))
304 return 0; 305 return 0;
305 306
306 /* Get the IDE error register. */ 307 /* get the IDE error register */
307 err = ide_read_error(drive); 308 err = ide_read_error(drive);
308 sense_key = err >> 4; 309 sense_key = err >> 4;
309 310
310 if (rq == NULL) { 311 if (rq == NULL) {
311 printk("%s: missing rq in cdrom_decode_status\n", drive->name); 312 printk(KERN_ERR "%s: missing rq in %s\n",
313 drive->name, __func__);
312 return 1; 314 return 1;
313 } 315 }
314 316
315 if (blk_sense_request(rq)) { 317 if (blk_sense_request(rq)) {
316 /* We got an error trying to get sense info 318 /*
317 from the drive (probably while trying 319 * We got an error trying to get sense info from the drive
318 to recover from a former error). Just give up. */ 320 * (probably while trying to recover from a former error).
319 321 * Just give up.
322 */
320 rq->cmd_flags |= REQ_FAILED; 323 rq->cmd_flags |= REQ_FAILED;
321 cdrom_end_request(drive, 0); 324 cdrom_end_request(drive, 0);
322 ide_error(drive, "request sense failure", stat); 325 ide_error(drive, "request sense failure", stat);
@@ -332,28 +335,27 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
332 if (blk_pc_request(rq) && !rq->errors) 335 if (blk_pc_request(rq) && !rq->errors)
333 rq->errors = SAM_STAT_CHECK_CONDITION; 336 rq->errors = SAM_STAT_CHECK_CONDITION;
334 337
335 /* Check for tray open. */ 338 /* check for tray open */
336 if (sense_key == NOT_READY) { 339 if (sense_key == NOT_READY) {
337 cdrom_saw_media_change (drive); 340 cdrom_saw_media_change(drive);
338 } else if (sense_key == UNIT_ATTENTION) { 341 } else if (sense_key == UNIT_ATTENTION) {
339 /* Check for media change. */ 342 /* check for media change */
340 cdrom_saw_media_change (drive); 343 cdrom_saw_media_change(drive);
341 /*printk("%s: media changed\n",drive->name);*/
342 return 0; 344 return 0;
343 } else if ((sense_key == ILLEGAL_REQUEST) && 345 } else if (sense_key == ILLEGAL_REQUEST &&
344 (rq->cmd[0] == GPCMD_START_STOP_UNIT)) { 346 rq->cmd[0] == GPCMD_START_STOP_UNIT) {
345 /* 347 /*
346 * Don't print error message for this condition-- 348 * Don't print error message for this condition--
347 * SFF8090i indicates that 5/24/00 is the correct 349 * SFF8090i indicates that 5/24/00 is the correct
348 * response to a request to close the tray if the 350 * response to a request to close the tray if the
349 * drive doesn't have that capability. 351 * drive doesn't have that capability.
350 * cdrom_log_sense() knows this! 352 * cdrom_log_sense() knows this!
351 */ 353 */
352 } else if (!(rq->cmd_flags & REQ_QUIET)) { 354 } else if (!(rq->cmd_flags & REQ_QUIET)) {
353 /* Otherwise, print an error. */ 355 /* otherwise, print an error */
354 ide_dump_status(drive, "packet command error", stat); 356 ide_dump_status(drive, "packet command error", stat);
355 } 357 }
356 358
357 rq->cmd_flags |= REQ_FAILED; 359 rq->cmd_flags |= REQ_FAILED;
358 360
359 /* 361 /*
@@ -366,27 +368,30 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
366 } else if (blk_fs_request(rq)) { 368 } else if (blk_fs_request(rq)) {
367 int do_end_request = 0; 369 int do_end_request = 0;
368 370
369 /* Handle errors from READ and WRITE requests. */ 371 /* handle errors from READ and WRITE requests */
370 372
371 if (blk_noretry_request(rq)) 373 if (blk_noretry_request(rq))
372 do_end_request = 1; 374 do_end_request = 1;
373 375
374 if (sense_key == NOT_READY) { 376 if (sense_key == NOT_READY) {
375 /* Tray open. */ 377 /* tray open */
376 if (rq_data_dir(rq) == READ) { 378 if (rq_data_dir(rq) == READ) {
377 cdrom_saw_media_change (drive); 379 cdrom_saw_media_change(drive);
378 380
379 /* Fail the request. */ 381 /* fail the request */
380 printk ("%s: tray open\n", drive->name); 382 printk(KERN_ERR "%s: tray open\n", drive->name);
381 do_end_request = 1; 383 do_end_request = 1;
382 } else { 384 } else {
383 struct cdrom_info *info = drive->driver_data; 385 struct cdrom_info *info = drive->driver_data;
384 386
385 /* allow the drive 5 seconds to recover, some 387 /*
388 * Allow the drive 5 seconds to recover, some
386 * devices will return this error while flushing 389 * devices will return this error while flushing
387 * data from cache */ 390 * data from cache.
391 */
388 if (!rq->errors) 392 if (!rq->errors)
389 info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; 393 info->write_timeout = jiffies +
394 ATAPI_WAIT_WRITE_BUSY;
390 rq->errors = 1; 395 rq->errors = 1;
391 if (time_after(jiffies, info->write_timeout)) 396 if (time_after(jiffies, info->write_timeout))
392 do_end_request = 1; 397 do_end_request = 1;
@@ -394,59 +399,68 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
394 unsigned long flags; 399 unsigned long flags;
395 400
396 /* 401 /*
397 * take a breather relying on the 402 * take a breather relying on the unplug
398 * unplug timer to kick us again 403 * timer to kick us again
399 */ 404 */
400 spin_lock_irqsave(&ide_lock, flags); 405 spin_lock_irqsave(&ide_lock, flags);
401 blk_plug_device(drive->queue); 406 blk_plug_device(drive->queue);
402 spin_unlock_irqrestore(&ide_lock,flags); 407 spin_unlock_irqrestore(&ide_lock,
408 flags);
403 return 1; 409 return 1;
404 } 410 }
405 } 411 }
406 } else if (sense_key == UNIT_ATTENTION) { 412 } else if (sense_key == UNIT_ATTENTION) {
407 /* Media change. */ 413 /* media change */
408 cdrom_saw_media_change (drive); 414 cdrom_saw_media_change(drive);
409 415
410 /* Arrange to retry the request. 416 /*
411 But be sure to give up if we've retried 417 * Arrange to retry the request but be sure to give up
412 too many times. */ 418 * if we've retried too many times.
419 */
413 if (++rq->errors > ERROR_MAX) 420 if (++rq->errors > ERROR_MAX)
414 do_end_request = 1; 421 do_end_request = 1;
415 } else if (sense_key == ILLEGAL_REQUEST || 422 } else if (sense_key == ILLEGAL_REQUEST ||
416 sense_key == DATA_PROTECT) { 423 sense_key == DATA_PROTECT) {
417 /* No point in retrying after an illegal 424 /*
418 request or data protect error.*/ 425 * No point in retrying after an illegal request or data
419 ide_dump_status_no_sense (drive, "command error", stat); 426 * protect error.
427 */
428 ide_dump_status_no_sense(drive, "command error", stat);
420 do_end_request = 1; 429 do_end_request = 1;
421 } else if (sense_key == MEDIUM_ERROR) { 430 } else if (sense_key == MEDIUM_ERROR) {
422 /* No point in re-trying a zillion times on a bad 431 /*
423 * sector... If we got here the error is not correctable */ 432 * No point in re-trying a zillion times on a bad
424 ide_dump_status_no_sense (drive, "media error (bad sector)", stat); 433 * sector. If we got here the error is not correctable.
434 */
435 ide_dump_status_no_sense(drive,
436 "media error (bad sector)",
437 stat);
425 do_end_request = 1; 438 do_end_request = 1;
426 } else if (sense_key == BLANK_CHECK) { 439 } else if (sense_key == BLANK_CHECK) {
427 /* Disk appears blank ?? */ 440 /* disk appears blank ?? */
428 ide_dump_status_no_sense (drive, "media error (blank)", stat); 441 ide_dump_status_no_sense(drive, "media error (blank)",
442 stat);
429 do_end_request = 1; 443 do_end_request = 1;
430 } else if ((err & ~ABRT_ERR) != 0) { 444 } else if ((err & ~ABRT_ERR) != 0) {
431 /* Go to the default handler 445 /* go to the default handler for other errors */
432 for other errors. */
433 ide_error(drive, "cdrom_decode_status", stat); 446 ide_error(drive, "cdrom_decode_status", stat);
434 return 1; 447 return 1;
435 } else if ((++rq->errors > ERROR_MAX)) { 448 } else if ((++rq->errors > ERROR_MAX)) {
436 /* We've racked up too many retries. Abort. */ 449 /* we've racked up too many retries, abort */
437 do_end_request = 1; 450 do_end_request = 1;
438 } 451 }
439 452
440 /* End a request through request sense analysis when we have 453 /*
441 sense data. We need this in order to perform end of media 454 * End a request through request sense analysis when we have
442 processing */ 455 * sense data. We need this in order to perform end of media
443 456 * processing.
457 */
444 if (do_end_request) 458 if (do_end_request)
445 goto end_request; 459 goto end_request;
446 460
447 /* 461 /*
448 * If we got a CHECK_CONDITION status, 462 * If we got a CHECK_CONDITION status, queue
449 * queue a request sense command. 463 * a request sense command.
450 */ 464 */
451 if (stat & ERR_STAT) 465 if (stat & ERR_STAT)
452 cdrom_queue_request_sense(drive, NULL, NULL); 466 cdrom_queue_request_sense(drive, NULL, NULL);
@@ -455,7 +469,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
455 cdrom_end_request(drive, 0); 469 cdrom_end_request(drive, 0);
456 } 470 }
457 471
458 /* Retry, or handle the next request. */ 472 /* retry, or handle the next request */
459 return 1; 473 return 1;
460 474
461end_request: 475end_request:
@@ -480,35 +494,37 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
480 unsigned long wait = 0; 494 unsigned long wait = 0;
481 495
482 /* 496 /*
483 * Some commands are *slow* and normally take a long time to 497 * Some commands are *slow* and normally take a long time to complete.
484 * complete. Usually we can use the ATAPI "disconnect" to bypass 498 * Usually we can use the ATAPI "disconnect" to bypass this, but not all
485 * this, but not all commands/drives support that. Let 499 * commands/drives support that. Let ide_timer_expiry keep polling us
486 * ide_timer_expiry keep polling us for these. 500 * for these.
487 */ 501 */
488 switch (rq->cmd[0]) { 502 switch (rq->cmd[0]) {
489 case GPCMD_BLANK: 503 case GPCMD_BLANK:
490 case GPCMD_FORMAT_UNIT: 504 case GPCMD_FORMAT_UNIT:
491 case GPCMD_RESERVE_RZONE_TRACK: 505 case GPCMD_RESERVE_RZONE_TRACK:
492 case GPCMD_CLOSE_TRACK: 506 case GPCMD_CLOSE_TRACK:
493 case GPCMD_FLUSH_CACHE: 507 case GPCMD_FLUSH_CACHE:
494 wait = ATAPI_WAIT_PC; 508 wait = ATAPI_WAIT_PC;
495 break; 509 break;
496 default: 510 default:
497 if (!(rq->cmd_flags & REQ_QUIET)) 511 if (!(rq->cmd_flags & REQ_QUIET))
498 printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); 512 printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n",
499 wait = 0; 513 rq->cmd[0]);
500 break; 514 wait = 0;
515 break;
501 } 516 }
502 return wait; 517 return wait;
503} 518}
504 519
505/* Set up the device registers for transferring a packet command on DEV, 520/*
506 expecting to later transfer XFERLEN bytes. HANDLER is the routine 521 * Set up the device registers for transferring a packet command on DEV,
507 which actually transfers the command to the drive. If this is a 522 * expecting to later transfer XFERLEN bytes. HANDLER is the routine
508 drq_interrupt device, this routine will arrange for HANDLER to be 523 * which actually transfers the command to the drive. If this is a
509 called when the interrupt from the drive arrives. Otherwise, HANDLER 524 * drq_interrupt device, this routine will arrange for HANDLER to be
510 will be called immediately after the drive is prepared for the transfer. */ 525 * called when the interrupt from the drive arrives. Otherwise, HANDLER
511 526 * will be called immediately after the drive is prepared for the transfer.
527 */
512static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, 528static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
513 int xferlen, 529 int xferlen,
514 ide_handler_t *handler) 530 ide_handler_t *handler)
@@ -517,15 +533,15 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
517 struct cdrom_info *info = drive->driver_data; 533 struct cdrom_info *info = drive->driver_data;
518 ide_hwif_t *hwif = drive->hwif; 534 ide_hwif_t *hwif = drive->hwif;
519 535
520 /* Wait for the controller to be idle. */ 536 /* wait for the controller to be idle */
521 if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) 537 if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
522 return startstop; 538 return startstop;
523 539
524 /* FIXME: for Virtual DMA we must check harder */ 540 /* FIXME: for Virtual DMA we must check harder */
525 if (info->dma) 541 if (info->dma)
526 info->dma = !hwif->dma_setup(drive); 542 info->dma = !hwif->dma_ops->dma_setup(drive);
527 543
528 /* Set up the controller registers. */ 544 /* set up the controller registers */
529 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL | 545 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
530 IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma); 546 IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
531 547
@@ -535,7 +551,8 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
535 drive->waiting_for_dma = 0; 551 drive->waiting_for_dma = 0;
536 552
537 /* packet command */ 553 /* packet command */
538 ide_execute_command(drive, WIN_PACKETCMD, handler, ATAPI_WAIT_PC, cdrom_timer_expiry); 554 ide_execute_command(drive, WIN_PACKETCMD, handler,
555 ATAPI_WAIT_PC, cdrom_timer_expiry);
539 return ide_started; 556 return ide_started;
540 } else { 557 } else {
541 unsigned long flags; 558 unsigned long flags;
@@ -543,7 +560,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
543 /* packet command */ 560 /* packet command */
544 spin_lock_irqsave(&ide_lock, flags); 561 spin_lock_irqsave(&ide_lock, flags);
545 hwif->OUTBSYNC(drive, WIN_PACKETCMD, 562 hwif->OUTBSYNC(drive, WIN_PACKETCMD,
546 hwif->io_ports[IDE_COMMAND_OFFSET]); 563 hwif->io_ports.command_addr);
547 ndelay(400); 564 ndelay(400);
548 spin_unlock_irqrestore(&ide_lock, flags); 565 spin_unlock_irqrestore(&ide_lock, flags);
549 566
@@ -551,13 +568,14 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
551 } 568 }
552} 569}
553 570
554/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. 571/*
555 The device registers must have already been prepared 572 * Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. The device
556 by cdrom_start_packet_command. 573 * registers must have already been prepared by cdrom_start_packet_command.
557 HANDLER is the interrupt handler to call when the command completes 574 * HANDLER is the interrupt handler to call when the command completes or
558 or there's data ready. */ 575 * there's data ready.
576 */
559#define ATAPI_MIN_CDB_BYTES 12 577#define ATAPI_MIN_CDB_BYTES 12
560static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive, 578static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
561 struct request *rq, 579 struct request *rq,
562 ide_handler_t *handler) 580 ide_handler_t *handler)
563{ 581{
@@ -567,24 +585,26 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
567 ide_startstop_t startstop; 585 ide_startstop_t startstop;
568 586
569 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 587 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
570 /* Here we should have been called after receiving an interrupt 588 /*
571 from the device. DRQ should how be set. */ 589 * Here we should have been called after receiving an interrupt
590 * from the device. DRQ should how be set.
591 */
572 592
573 /* Check for errors. */ 593 /* check for errors */
574 if (cdrom_decode_status(drive, DRQ_STAT, NULL)) 594 if (cdrom_decode_status(drive, DRQ_STAT, NULL))
575 return ide_stopped; 595 return ide_stopped;
576 596
577 /* Ok, next interrupt will be DMA interrupt. */ 597 /* ok, next interrupt will be DMA interrupt */
578 if (info->dma) 598 if (info->dma)
579 drive->waiting_for_dma = 1; 599 drive->waiting_for_dma = 1;
580 } else { 600 } else {
581 /* Otherwise, we must wait for DRQ to get set. */ 601 /* otherwise, we must wait for DRQ to get set */
582 if (ide_wait_stat(&startstop, drive, DRQ_STAT, 602 if (ide_wait_stat(&startstop, drive, DRQ_STAT,
583 BUSY_STAT, WAIT_READY)) 603 BUSY_STAT, WAIT_READY))
584 return startstop; 604 return startstop;
585 } 605 }
586 606
587 /* Arm the interrupt handler. */ 607 /* arm the interrupt handler */
588 ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry); 608 ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry);
589 609
590 /* ATAPI commands get padded out to 12 bytes minimum */ 610 /* ATAPI commands get padded out to 12 bytes minimum */
@@ -592,20 +612,19 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
592 if (cmd_len < ATAPI_MIN_CDB_BYTES) 612 if (cmd_len < ATAPI_MIN_CDB_BYTES)
593 cmd_len = ATAPI_MIN_CDB_BYTES; 613 cmd_len = ATAPI_MIN_CDB_BYTES;
594 614
595 /* Send the command to the device. */ 615 /* send the command to the device */
596 HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len); 616 HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
597 617
598 /* Start the DMA if need be */ 618 /* start the DMA if need be */
599 if (info->dma) 619 if (info->dma)
600 hwif->dma_start(drive); 620 hwif->dma_ops->dma_start(drive);
601 621
602 return ide_started; 622 return ide_started;
603} 623}
604 624
605/**************************************************************************** 625/*
606 * Block read functions. 626 * Block read functions.
607 */ 627 */
608
609static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len) 628static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
610{ 629{
611 while (len > 0) { 630 while (len > 0) {
@@ -626,47 +645,6 @@ static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
626} 645}
627 646
628/* 647/*
629 * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
630 * buffer. Once the first sector is added, any subsequent sectors are
631 * assumed to be continuous (until the buffer is cleared). For the first
632 * sector added, SECTOR is its sector number. (SECTOR is then ignored until
633 * the buffer is cleared.)
634 */
635static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
636 int sectors_to_transfer)
637{
638 struct cdrom_info *info = drive->driver_data;
639
640 /* Number of sectors to read into the buffer. */
641 int sectors_to_buffer = min_t(int, sectors_to_transfer,
642 (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
643 info->nsectors_buffered);
644
645 char *dest;
646
647 /* If we couldn't get a buffer, don't try to buffer anything... */
648 if (info->buffer == NULL)
649 sectors_to_buffer = 0;
650
651 /* If this is the first sector in the buffer, remember its number. */
652 if (info->nsectors_buffered == 0)
653 info->sector_buffered = sector;
654
655 /* Read the data into the buffer. */
656 dest = info->buffer + info->nsectors_buffered * SECTOR_SIZE;
657 while (sectors_to_buffer > 0) {
658 HWIF(drive)->atapi_input_bytes(drive, dest, SECTOR_SIZE);
659 --sectors_to_buffer;
660 --sectors_to_transfer;
661 ++info->nsectors_buffered;
662 dest += SECTOR_SIZE;
663 }
664
665 /* Throw away any remaining data. */
666 ide_cd_drain_data(drive, sectors_to_transfer);
667}
668
669/*
670 * Check the contents of the interrupt reason register from the cdrom 648 * Check the contents of the interrupt reason register from the cdrom
671 * and attempt to recover if there are problems. Returns 0 if everything's 649 * and attempt to recover if there are problems. Returns 0 if everything's
672 * ok; nonzero if the request has been terminated. 650 * ok; nonzero if the request has been terminated.
@@ -684,22 +662,23 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
684 ide_hwif_t *hwif = drive->hwif; 662 ide_hwif_t *hwif = drive->hwif;
685 xfer_func_t *xf; 663 xfer_func_t *xf;
686 664
687 /* Whoops... */ 665 /* whoops... */
688 printk(KERN_ERR "%s: %s: wrong transfer direction!\n", 666 printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
689 drive->name, __FUNCTION__); 667 drive->name, __func__);
690 668
691 xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes; 669 xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes;
692 ide_cd_pad_transfer(drive, xf, len); 670 ide_cd_pad_transfer(drive, xf, len);
693 } else if (rw == 0 && ireason == 1) { 671 } else if (rw == 0 && ireason == 1) {
694 /* Some drives (ASUS) seem to tell us that status 672 /*
695 * info is available. just get it and ignore. 673 * Some drives (ASUS) seem to tell us that status info is
674 * available. Just get it and ignore.
696 */ 675 */
697 (void)ide_read_status(drive); 676 (void)ide_read_status(drive);
698 return 0; 677 return 0;
699 } else { 678 } else {
700 /* Drive wants a command packet, or invalid ireason... */ 679 /* drive wants a command packet, or invalid ireason... */
701 printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n", 680 printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n",
702 drive->name, __FUNCTION__, ireason); 681 drive->name, __func__, ireason);
703 } 682 }
704 683
705 if (rq->cmd_type == REQ_TYPE_ATA_PC) 684 if (rq->cmd_type == REQ_TYPE_ATA_PC)
@@ -721,7 +700,7 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
721 return 0; 700 return 0;
722 701
723 printk(KERN_ERR "%s: %s: Bad transfer size %d\n", 702 printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
724 drive->name, __FUNCTION__, len); 703 drive->name, __func__, len);
725 704
726 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) 705 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES)
727 printk(KERN_ERR " This drive is not supported by " 706 printk(KERN_ERR " This drive is not supported by "
@@ -734,72 +713,13 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
734 return 1; 713 return 1;
735} 714}
736 715
737/*
738 * Try to satisfy some of the current read request from our cached data.
739 * Returns nonzero if the request has been completed, zero otherwise.
740 */
741static int cdrom_read_from_buffer (ide_drive_t *drive)
742{
743 struct cdrom_info *info = drive->driver_data;
744 struct request *rq = HWGROUP(drive)->rq;
745 unsigned short sectors_per_frame;
746
747 sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
748
749 /* Can't do anything if there's no buffer. */
750 if (info->buffer == NULL) return 0;
751
752 /* Loop while this request needs data and the next block is present
753 in our cache. */
754 while (rq->nr_sectors > 0 &&
755 rq->sector >= info->sector_buffered &&
756 rq->sector < info->sector_buffered + info->nsectors_buffered) {
757 if (rq->current_nr_sectors == 0)
758 cdrom_end_request(drive, 1);
759
760 memcpy (rq->buffer,
761 info->buffer +
762 (rq->sector - info->sector_buffered) * SECTOR_SIZE,
763 SECTOR_SIZE);
764 rq->buffer += SECTOR_SIZE;
765 --rq->current_nr_sectors;
766 --rq->nr_sectors;
767 ++rq->sector;
768 }
769
770 /* If we've satisfied the current request,
771 terminate it successfully. */
772 if (rq->nr_sectors == 0) {
773 cdrom_end_request(drive, 1);
774 return -1;
775 }
776
777 /* Move on to the next buffer if needed. */
778 if (rq->current_nr_sectors == 0)
779 cdrom_end_request(drive, 1);
780
781 /* If this condition does not hold, then the kluge i use to
782 represent the number of sectors to skip at the start of a transfer
783 will fail. I think that this will never happen, but let's be
784 paranoid and check. */
785 if (rq->current_nr_sectors < bio_cur_sectors(rq->bio) &&
786 (rq->sector & (sectors_per_frame - 1))) {
787 printk(KERN_ERR "%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
788 drive->name, (long)rq->sector);
789 cdrom_end_request(drive, 0);
790 return -1;
791 }
792
793 return 0;
794}
795
796static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); 716static ide_startstop_t cdrom_newpc_intr(ide_drive_t *);
797 717
798/* 718/*
799 * Routine to send a read/write packet command to the drive. 719 * Routine to send a read/write packet command to the drive. This is usually
800 * This is usually called directly from cdrom_start_{read,write}(). 720 * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
801 * However, for drq_interrupt devices, it is called from an interrupt 721 * devices, it is called from an interrupt when the drive is ready to accept
802 * when the drive is ready to accept the command. 722 * the command.
803 */ 723 */
804static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) 724static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
805{ 725{
@@ -821,11 +741,11 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
821 * is larger than the buffer size. 741 * is larger than the buffer size.
822 */ 742 */
823 if (nskip > 0) { 743 if (nskip > 0) {
824 /* Sanity check... */ 744 /* sanity check... */
825 if (rq->current_nr_sectors != 745 if (rq->current_nr_sectors !=
826 bio_cur_sectors(rq->bio)) { 746 bio_cur_sectors(rq->bio)) {
827 printk(KERN_ERR "%s: %s: buffer botch (%u)\n", 747 printk(KERN_ERR "%s: %s: buffer botch (%u)\n",
828 drive->name, __FUNCTION__, 748 drive->name, __func__,
829 rq->current_nr_sectors); 749 rq->current_nr_sectors);
830 cdrom_end_request(drive, 0); 750 cdrom_end_request(drive, 0);
831 return ide_stopped; 751 return ide_stopped;
@@ -838,10 +758,10 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
838 /* the immediate bit */ 758 /* the immediate bit */
839 rq->cmd[1] = 1 << 3; 759 rq->cmd[1] = 1 << 3;
840#endif 760#endif
841 /* Set up the command */ 761 /* set up the command */
842 rq->timeout = ATAPI_WAIT_PC; 762 rq->timeout = ATAPI_WAIT_PC;
843 763
844 /* Send the command to the drive and return. */ 764 /* send the command to the drive and return */
845 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 765 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
846} 766}
847 767
@@ -849,7 +769,7 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
849#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */ 769#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
850#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */ 770#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
851 771
852static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive) 772static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
853{ 773{
854 struct cdrom_info *info = drive->driver_data; 774 struct cdrom_info *info = drive->driver_data;
855 int stat; 775 int stat;
@@ -861,19 +781,13 @@ static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
861 info->cd_flags |= IDE_CD_FLAG_SEEKING; 781 info->cd_flags |= IDE_CD_FLAG_SEEKING;
862 782
863 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { 783 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
864 if (--retry == 0) { 784 if (--retry == 0)
865 /*
866 * this condition is far too common, to bother
867 * users about it
868 */
869 /* printk("%s: disabled DSC seek overlap\n", drive->name);*/
870 drive->dsc_overlap = 0; 785 drive->dsc_overlap = 0;
871 }
872 } 786 }
873 return ide_stopped; 787 return ide_stopped;
874} 788}
875 789
876static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive) 790static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
877{ 791{
878 struct request *rq = HWGROUP(drive)->rq; 792 struct request *rq = HWGROUP(drive)->rq;
879 sector_t frame = rq->sector; 793 sector_t frame = rq->sector;
@@ -888,36 +802,40 @@ static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
888 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); 802 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
889} 803}
890 804
891static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block) 805static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block)
892{ 806{
893 struct cdrom_info *info = drive->driver_data; 807 struct cdrom_info *info = drive->driver_data;
894 808
895 info->dma = 0; 809 info->dma = 0;
896 info->start_seek = jiffies; 810 info->start_seek = jiffies;
897 return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation); 811 return cdrom_start_packet_command(drive, 0,
812 cdrom_start_seek_continuation);
898} 813}
899 814
900/* Fix up a possibly partially-processed request so that we can 815/*
901 start it over entirely, or even put it back on the request queue. */ 816 * Fix up a possibly partially-processed request so that we can start it over
902static void restore_request (struct request *rq) 817 * entirely, or even put it back on the request queue.
818 */
819static void restore_request(struct request *rq)
903{ 820{
904 if (rq->buffer != bio_data(rq->bio)) { 821 if (rq->buffer != bio_data(rq->bio)) {
905 sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE; 822 sector_t n =
823 (rq->buffer - (char *)bio_data(rq->bio)) / SECTOR_SIZE;
906 824
907 rq->buffer = bio_data(rq->bio); 825 rq->buffer = bio_data(rq->bio);
908 rq->nr_sectors += n; 826 rq->nr_sectors += n;
909 rq->sector -= n; 827 rq->sector -= n;
910 } 828 }
911 rq->hard_cur_sectors = rq->current_nr_sectors = bio_cur_sectors(rq->bio); 829 rq->current_nr_sectors = bio_cur_sectors(rq->bio);
830 rq->hard_cur_sectors = rq->current_nr_sectors;
912 rq->hard_nr_sectors = rq->nr_sectors; 831 rq->hard_nr_sectors = rq->nr_sectors;
913 rq->hard_sector = rq->sector; 832 rq->hard_sector = rq->sector;
914 rq->q->prep_rq_fn(rq->q, rq); 833 rq->q->prep_rq_fn(rq->q, rq);
915} 834}
916 835
917/**************************************************************************** 836/*
918 * Execute all other packet commands. 837 * All other packet commands.
919 */ 838 */
920
921static void ide_cd_request_sense_fixup(struct request *rq) 839static void ide_cd_request_sense_fixup(struct request *rq)
922{ 840{
923 /* 841 /*
@@ -941,7 +859,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
941 if (rq->sense == NULL) 859 if (rq->sense == NULL)
942 rq->sense = &sense; 860 rq->sense = &sense;
943 861
944 /* Start of retry loop. */ 862 /* start of retry loop */
945 do { 863 do {
946 int error; 864 int error;
947 unsigned long time = jiffies; 865 unsigned long time = jiffies;
@@ -950,41 +868,45 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
950 error = ide_do_drive_cmd(drive, rq, ide_wait); 868 error = ide_do_drive_cmd(drive, rq, ide_wait);
951 time = jiffies - time; 869 time = jiffies - time;
952 870
953 /* FIXME: we should probably abort/retry or something 871 /*
954 * in case of failure */ 872 * FIXME: we should probably abort/retry or something in case of
873 * failure.
874 */
955 if (rq->cmd_flags & REQ_FAILED) { 875 if (rq->cmd_flags & REQ_FAILED) {
956 /* The request failed. Retry if it was due to a unit 876 /*
957 attention status 877 * The request failed. Retry if it was due to a unit
958 (usually means media was changed). */ 878 * attention status (usually means media was changed).
879 */
959 struct request_sense *reqbuf = rq->sense; 880 struct request_sense *reqbuf = rq->sense;
960 881
961 if (reqbuf->sense_key == UNIT_ATTENTION) 882 if (reqbuf->sense_key == UNIT_ATTENTION)
962 cdrom_saw_media_change(drive); 883 cdrom_saw_media_change(drive);
963 else if (reqbuf->sense_key == NOT_READY && 884 else if (reqbuf->sense_key == NOT_READY &&
964 reqbuf->asc == 4 && reqbuf->ascq != 4) { 885 reqbuf->asc == 4 && reqbuf->ascq != 4) {
965 /* The drive is in the process of loading 886 /*
966 a disk. Retry, but wait a little to give 887 * The drive is in the process of loading
967 the drive time to complete the load. */ 888 * a disk. Retry, but wait a little to give
889 * the drive time to complete the load.
890 */
968 ssleep(2); 891 ssleep(2);
969 } else { 892 } else {
970 /* Otherwise, don't retry. */ 893 /* otherwise, don't retry */
971 retries = 0; 894 retries = 0;
972 } 895 }
973 --retries; 896 --retries;
974 } 897 }
975 898
976 /* End of retry loop. */ 899 /* end of retry loop */
977 } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0); 900 } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
978 901
979 /* Return an error if the command failed. */ 902 /* return an error if the command failed */
980 return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0; 903 return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
981} 904}
982 905
983/* 906/*
984 * Called from blk_end_request_callback() after the data of the request 907 * Called from blk_end_request_callback() after the data of the request is
985 * is completed and before the request is completed. 908 * completed and before the request itself is completed. By returning value '1',
986 * By returning value '1', blk_end_request_callback() returns immediately 909 * blk_end_request_callback() returns immediately without completing it.
987 * without completing the request.
988 */ 910 */
989static int cdrom_newpc_intr_dummy_cb(struct request *rq) 911static int cdrom_newpc_intr_dummy_cb(struct request *rq)
990{ 912{
@@ -1003,11 +925,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1003 unsigned int timeout; 925 unsigned int timeout;
1004 u8 lowcyl, highcyl; 926 u8 lowcyl, highcyl;
1005 927
1006 /* Check for errors. */ 928 /* check for errors */
1007 dma = info->dma; 929 dma = info->dma;
1008 if (dma) { 930 if (dma) {
1009 info->dma = 0; 931 info->dma = 0;
1010 dma_error = HWIF(drive)->ide_dma_end(drive); 932 dma_error = hwif->dma_ops->dma_end(drive);
1011 if (dma_error) { 933 if (dma_error) {
1012 printk(KERN_ERR "%s: DMA %s error\n", drive->name, 934 printk(KERN_ERR "%s: DMA %s error\n", drive->name,
1013 write ? "write" : "read"); 935 write ? "write" : "read");
@@ -1018,9 +940,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1018 if (cdrom_decode_status(drive, 0, &stat)) 940 if (cdrom_decode_status(drive, 0, &stat))
1019 return ide_stopped; 941 return ide_stopped;
1020 942
1021 /* 943 /* using dma, transfer is complete now */
1022 * using dma, transfer is complete now
1023 */
1024 if (dma) { 944 if (dma) {
1025 if (dma_error) 945 if (dma_error)
1026 return ide_error(drive, "dma error", stat); 946 return ide_error(drive, "dma error", stat);
@@ -1031,12 +951,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1031 goto end_request; 951 goto end_request;
1032 } 952 }
1033 953
1034 /* 954 /* ok we fall to pio :/ */
1035 * ok we fall to pio :/ 955 ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
1036 */ 956 lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
1037 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3; 957 highcyl = hwif->INB(hwif->io_ports.lbah_addr);
1038 lowcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
1039 highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
1040 958
1041 len = lowcyl + (256 * highcyl); 959 len = lowcyl + (256 * highcyl);
1042 960
@@ -1044,9 +962,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1044 if (thislen > len) 962 if (thislen > len)
1045 thislen = len; 963 thislen = len;
1046 964
1047 /* 965 /* If DRQ is clear, the command has completed. */
1048 * If DRQ is clear, the command has completed.
1049 */
1050 if ((stat & DRQ_STAT) == 0) { 966 if ((stat & DRQ_STAT) == 0) {
1051 if (blk_fs_request(rq)) { 967 if (blk_fs_request(rq)) {
1052 /* 968 /*
@@ -1057,7 +973,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1057 if (rq->current_nr_sectors > 0) { 973 if (rq->current_nr_sectors > 0) {
1058 printk(KERN_ERR "%s: %s: data underrun " 974 printk(KERN_ERR "%s: %s: data underrun "
1059 "(%d blocks)\n", 975 "(%d blocks)\n",
1060 drive->name, __FUNCTION__, 976 drive->name, __func__,
1061 rq->current_nr_sectors); 977 rq->current_nr_sectors);
1062 if (!write) 978 if (!write)
1063 rq->cmd_flags |= REQ_FAILED; 979 rq->cmd_flags |= REQ_FAILED;
@@ -1067,15 +983,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1067 return ide_stopped; 983 return ide_stopped;
1068 } else if (!blk_pc_request(rq)) { 984 } else if (!blk_pc_request(rq)) {
1069 ide_cd_request_sense_fixup(rq); 985 ide_cd_request_sense_fixup(rq);
1070 /* Complain if we still have data left to transfer. */ 986 /* complain if we still have data left to transfer */
1071 uptodate = rq->data_len ? 0 : 1; 987 uptodate = rq->data_len ? 0 : 1;
1072 } 988 }
1073 goto end_request; 989 goto end_request;
1074 } 990 }
1075 991
1076 /* 992 /* check which way to transfer data */
1077 * check which way to transfer data
1078 */
1079 if (ide_cd_check_ireason(drive, rq, len, ireason, write)) 993 if (ide_cd_check_ireason(drive, rq, len, ireason, write))
1080 return ide_stopped; 994 return ide_stopped;
1081 995
@@ -1111,16 +1025,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1111 xferfunc = HWIF(drive)->atapi_input_bytes; 1025 xferfunc = HWIF(drive)->atapi_input_bytes;
1112 } 1026 }
1113 1027
1114 /* 1028 /* transfer data */
1115 * transfer data
1116 */
1117 while (thislen > 0) { 1029 while (thislen > 0) {
1118 u8 *ptr = blk_fs_request(rq) ? NULL : rq->data; 1030 u8 *ptr = blk_fs_request(rq) ? NULL : rq->data;
1119 int blen = rq->data_len; 1031 int blen = rq->data_len;
1120 1032
1121 /* 1033 /* bio backed? */
1122 * bio backed?
1123 */
1124 if (rq->bio) { 1034 if (rq->bio) {
1125 if (blk_fs_request(rq)) { 1035 if (blk_fs_request(rq)) {
1126 ptr = rq->buffer; 1036 ptr = rq->buffer;
@@ -1134,11 +1044,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1134 if (!ptr) { 1044 if (!ptr) {
1135 if (blk_fs_request(rq) && !write) 1045 if (blk_fs_request(rq) && !write)
1136 /* 1046 /*
1137 * If the buffers are full, cache the rest 1047 * If the buffers are full, pipe the rest into
1138 * of the data in our internal buffer. 1048 * oblivion.
1139 */ 1049 */
1140 cdrom_buffer_sectors(drive, rq->sector, 1050 ide_cd_drain_data(drive, thislen >> 9);
1141 thislen >> 9);
1142 else { 1051 else {
1143 printk(KERN_ERR "%s: confused, missing data\n", 1052 printk(KERN_ERR "%s: confused, missing data\n",
1144 drive->name); 1053 drive->name);
@@ -1184,9 +1093,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1184 rq->sense_len += blen; 1093 rq->sense_len += blen;
1185 } 1094 }
1186 1095
1187 /* 1096 /* pad, if necessary */
1188 * pad, if necessary
1189 */
1190 if (!blk_fs_request(rq) && len > 0) 1097 if (!blk_fs_request(rq) && len > 0)
1191 ide_cd_pad_transfer(drive, xferfunc, len); 1098 ide_cd_pad_transfer(drive, xferfunc, len);
1192 1099
@@ -1230,9 +1137,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1230 queue_hardsect_size(drive->queue) >> SECTOR_BITS; 1137 queue_hardsect_size(drive->queue) >> SECTOR_BITS;
1231 1138
1232 if (write) { 1139 if (write) {
1233 /* 1140 /* disk has become write protected */
1234 * disk has become write protected
1235 */
1236 if (cd->disk->policy) { 1141 if (cd->disk->policy) {
1237 cdrom_end_request(drive, 0); 1142 cdrom_end_request(drive, 0);
1238 return ide_stopped; 1143 return ide_stopped;
@@ -1243,15 +1148,9 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1243 * weirdness which might be present in the request packet. 1148 * weirdness which might be present in the request packet.
1244 */ 1149 */
1245 restore_request(rq); 1150 restore_request(rq);
1246
1247 /* Satisfy whatever we can of this request from our cache. */
1248 if (cdrom_read_from_buffer(drive))
1249 return ide_stopped;
1250 } 1151 }
1251 1152
1252 /* 1153 /* use DMA, if possible / writes *must* be hardware frame aligned */
1253 * use DMA, if possible / writes *must* be hardware frame aligned
1254 */
1255 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 1154 if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
1256 (rq->sector & (sectors_per_frame - 1))) { 1155 (rq->sector & (sectors_per_frame - 1))) {
1257 if (write) { 1156 if (write) {
@@ -1262,13 +1161,10 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1262 } else 1161 } else
1263 cd->dma = drive->using_dma; 1162 cd->dma = drive->using_dma;
1264 1163
1265 /* Clear the local sector buffer. */
1266 cd->nsectors_buffered = 0;
1267
1268 if (write) 1164 if (write)
1269 cd->devinfo.media_written = 1; 1165 cd->devinfo.media_written = 1;
1270 1166
1271 /* Start sending the read/write request to the drive. */ 1167 /* start sending the read/write request to the drive */
1272 return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont); 1168 return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont);
1273} 1169}
1274 1170
@@ -1293,12 +1189,11 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1293 1189
1294 info->dma = 0; 1190 info->dma = 0;
1295 1191
1296 /* 1192 /* sg request */
1297 * sg request
1298 */
1299 if (rq->bio) { 1193 if (rq->bio) {
1300 int mask = drive->queue->dma_alignment; 1194 int mask = drive->queue->dma_alignment;
1301 unsigned long addr = (unsigned long) page_address(bio_page(rq->bio)); 1195 unsigned long addr =
1196 (unsigned long)page_address(bio_page(rq->bio));
1302 1197
1303 info->dma = drive->using_dma; 1198 info->dma = drive->using_dma;
1304 1199
@@ -1312,15 +1207,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1312 info->dma = 0; 1207 info->dma = 0;
1313 } 1208 }
1314 1209
1315 /* Start sending the command to the drive. */ 1210 /* start sending the command to the drive */
1316 return cdrom_start_packet_command(drive, rq->data_len, cdrom_do_newpc_cont); 1211 return cdrom_start_packet_command(drive, rq->data_len,
1212 cdrom_do_newpc_cont);
1317} 1213}
1318 1214
1319/**************************************************************************** 1215/*
1320 * cdrom driver request routine. 1216 * cdrom driver request routine.
1321 */ 1217 */
1322static ide_startstop_t 1218static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq,
1323ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block) 1219 sector_t block)
1324{ 1220{
1325 ide_startstop_t action; 1221 ide_startstop_t action;
1326 struct cdrom_info *info = drive->driver_data; 1222 struct cdrom_info *info = drive->driver_data;
@@ -1332,16 +1228,21 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1332 1228
1333 if ((stat & SEEK_STAT) != SEEK_STAT) { 1229 if ((stat & SEEK_STAT) != SEEK_STAT) {
1334 if (elapsed < IDECD_SEEK_TIMEOUT) { 1230 if (elapsed < IDECD_SEEK_TIMEOUT) {
1335 ide_stall_queue(drive, IDECD_SEEK_TIMER); 1231 ide_stall_queue(drive,
1232 IDECD_SEEK_TIMER);
1336 return ide_stopped; 1233 return ide_stopped;
1337 } 1234 }
1338 printk (KERN_ERR "%s: DSC timeout\n", drive->name); 1235 printk(KERN_ERR "%s: DSC timeout\n",
1236 drive->name);
1339 } 1237 }
1340 info->cd_flags &= ~IDE_CD_FLAG_SEEKING; 1238 info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
1341 } 1239 }
1342 if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) { 1240 if (rq_data_dir(rq) == READ &&
1241 IDE_LARGE_SEEK(info->last_block, block,
1242 IDECD_SEEK_THRESHOLD) &&
1243 drive->dsc_overlap)
1343 action = cdrom_start_seek(drive, block); 1244 action = cdrom_start_seek(drive, block);
1344 } else 1245 else
1345 action = cdrom_start_rw(drive, rq); 1246 action = cdrom_start_rw(drive, rq);
1346 info->last_block = block; 1247 info->last_block = block;
1347 return action; 1248 return action;
@@ -1349,9 +1250,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1349 rq->cmd_type == REQ_TYPE_ATA_PC) { 1250 rq->cmd_type == REQ_TYPE_ATA_PC) {
1350 return cdrom_do_block_pc(drive, rq); 1251 return cdrom_do_block_pc(drive, rq);
1351 } else if (blk_special_request(rq)) { 1252 } else if (blk_special_request(rq)) {
1352 /* 1253 /* right now this can only be a reset... */
1353 * right now this can only be a reset...
1354 */
1355 cdrom_end_request(drive, 1); 1254 cdrom_end_request(drive, 1);
1356 return ide_stopped; 1255 return ide_stopped;
1357 } 1256 }
@@ -1363,18 +1262,16 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1363 1262
1364 1263
1365 1264
1366/**************************************************************************** 1265/*
1367 * Ioctl handling. 1266 * Ioctl handling.
1368 * 1267 *
1369 * Routines which queue packet commands take as a final argument a pointer 1268 * Routines which queue packet commands take as a final argument a pointer to a
1370 * to a request_sense struct. If execution of the command results 1269 * request_sense struct. If execution of the command results in an error with a
1371 * in an error with a CHECK CONDITION status, this structure will be filled 1270 * CHECK CONDITION status, this structure will be filled with the results of the
1372 * with the results of the subsequent request sense command. The pointer 1271 * subsequent request sense command. The pointer can also be NULL, in which case
1373 * can also be NULL, in which case no sense information is returned. 1272 * no sense information is returned.
1374 */ 1273 */
1375 1274static void msf_from_bcd(struct atapi_msf *msf)
1376static
1377void msf_from_bcd (struct atapi_msf *msf)
1378{ 1275{
1379 msf->minute = BCD2BIN(msf->minute); 1276 msf->minute = BCD2BIN(msf->minute);
1380 msf->second = BCD2BIN(msf->second); 1277 msf->second = BCD2BIN(msf->second);
@@ -1394,8 +1291,8 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
1394 req.cmd_flags |= REQ_QUIET; 1291 req.cmd_flags |= REQ_QUIET;
1395 1292
1396 /* 1293 /*
1397 * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to 1294 * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
1398 * switch CDs instead of supporting the LOAD_UNLOAD opcode. 1295 * instead of supporting the LOAD_UNLOAD opcode.
1399 */ 1296 */
1400 req.cmd[7] = cdi->sanyo_slot % 3; 1297 req.cmd[7] = cdi->sanyo_slot % 3;
1401 1298
@@ -1471,36 +1368,39 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1471 unsigned long sectors_per_frame = SECTORS_PER_FRAME; 1368 unsigned long sectors_per_frame = SECTORS_PER_FRAME;
1472 1369
1473 if (toc == NULL) { 1370 if (toc == NULL) {
1474 /* Try to allocate space. */ 1371 /* try to allocate space */
1475 toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL); 1372 toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
1476 if (toc == NULL) { 1373 if (toc == NULL) {
1477 printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name); 1374 printk(KERN_ERR "%s: No cdrom TOC buffer!\n",
1375 drive->name);
1478 return -ENOMEM; 1376 return -ENOMEM;
1479 } 1377 }
1480 info->toc = toc; 1378 info->toc = toc;
1481 } 1379 }
1482 1380
1483 /* Check to see if the existing data is still valid. 1381 /*
1484 If it is, just return. */ 1382 * Check to see if the existing data is still valid. If it is,
1383 * just return.
1384 */
1485 (void) cdrom_check_status(drive, sense); 1385 (void) cdrom_check_status(drive, sense);
1486 1386
1487 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) 1387 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
1488 return 0; 1388 return 0;
1489 1389
1490 /* Try to get the total cdrom capacity and sector size. */ 1390 /* try to get the total cdrom capacity and sector size */
1491 stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame, 1391 stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
1492 sense); 1392 sense);
1493 if (stat) 1393 if (stat)
1494 toc->capacity = 0x1fffff; 1394 toc->capacity = 0x1fffff;
1495 1395
1496 set_capacity(info->disk, toc->capacity * sectors_per_frame); 1396 set_capacity(info->disk, toc->capacity * sectors_per_frame);
1497 /* Save a private copy of te TOC capacity for error handling */ 1397 /* save a private copy of the TOC capacity for error handling */
1498 drive->probed_capacity = toc->capacity * sectors_per_frame; 1398 drive->probed_capacity = toc->capacity * sectors_per_frame;
1499 1399
1500 blk_queue_hardsect_size(drive->queue, 1400 blk_queue_hardsect_size(drive->queue,
1501 sectors_per_frame << SECTOR_BITS); 1401 sectors_per_frame << SECTOR_BITS);
1502 1402
1503 /* First read just the header, so we know how long the TOC is. */ 1403 /* first read just the header, so we know how long the TOC is */
1504 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 1404 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
1505 sizeof(struct atapi_toc_header), sense); 1405 sizeof(struct atapi_toc_header), sense);
1506 if (stat) 1406 if (stat)
@@ -1517,7 +1417,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1517 if (ntracks > MAX_TRACKS) 1417 if (ntracks > MAX_TRACKS)
1518 ntracks = MAX_TRACKS; 1418 ntracks = MAX_TRACKS;
1519 1419
1520 /* Now read the whole schmeer. */ 1420 /* now read the whole schmeer */
1521 stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0, 1421 stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
1522 (char *)&toc->hdr, 1422 (char *)&toc->hdr,
1523 sizeof(struct atapi_toc_header) + 1423 sizeof(struct atapi_toc_header) +
@@ -1525,15 +1425,18 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1525 sizeof(struct atapi_toc_entry), sense); 1425 sizeof(struct atapi_toc_entry), sense);
1526 1426
1527 if (stat && toc->hdr.first_track > 1) { 1427 if (stat && toc->hdr.first_track > 1) {
1528 /* Cds with CDI tracks only don't have any TOC entries, 1428 /*
1529 despite of this the returned values are 1429 * Cds with CDI tracks only don't have any TOC entries, despite
1530 first_track == last_track = number of CDI tracks + 1, 1430 * of this the returned values are
1531 so that this case is indistinguishable from the same 1431 * first_track == last_track = number of CDI tracks + 1,
1532 layout plus an additional audio track. 1432 * so that this case is indistinguishable from the same layout
1533 If we get an error for the regular case, we assume 1433 * plus an additional audio track. If we get an error for the
1534 a CDI without additional audio tracks. In this case 1434 * regular case, we assume a CDI without additional audio
1535 the readable TOC is empty (CDI tracks are not included) 1435 * tracks. In this case the readable TOC is empty (CDI tracks
1536 and only holds the Leadout entry. Heiko Eißfeldt */ 1436 * are not included) and only holds the Leadout entry.
1437 *
1438 * Heiko Eißfeldt.
1439 */
1537 ntracks = 0; 1440 ntracks = 0;
1538 stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0, 1441 stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
1539 (char *)&toc->hdr, 1442 (char *)&toc->hdr,
@@ -1569,14 +1472,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1569 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1472 toc->ent[i].track = BCD2BIN(toc->ent[i].track);
1570 msf_from_bcd(&toc->ent[i].addr.msf); 1473 msf_from_bcd(&toc->ent[i].addr.msf);
1571 } 1474 }
1572 toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute, 1475 toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
1573 toc->ent[i].addr.msf.second, 1476 toc->ent[i].addr.msf.second,
1574 toc->ent[i].addr.msf.frame); 1477 toc->ent[i].addr.msf.frame);
1575 } 1478 }
1576 1479
1577 /* Read the multisession information. */
1578 if (toc->hdr.first_track != CDROM_LEADOUT) { 1480 if (toc->hdr.first_track != CDROM_LEADOUT) {
1579 /* Read the multisession information. */ 1481 /* read the multisession information */
1580 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, 1482 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
1581 sizeof(ms_tmp), sense); 1483 sizeof(ms_tmp), sense);
1582 if (stat) 1484 if (stat)
@@ -1584,26 +1486,27 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1584 1486
1585 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); 1487 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
1586 } else { 1488 } else {
1587 ms_tmp.hdr.first_track = ms_tmp.hdr.last_track = CDROM_LEADOUT; 1489 ms_tmp.hdr.last_track = CDROM_LEADOUT;
1490 ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
1588 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ 1491 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
1589 } 1492 }
1590 1493
1591 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1494 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
1592 /* Re-read multisession information using MSF format */ 1495 /* re-read multisession information using MSF format */
1593 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, 1496 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
1594 sizeof(ms_tmp), sense); 1497 sizeof(ms_tmp), sense);
1595 if (stat) 1498 if (stat)
1596 return stat; 1499 return stat;
1597 1500
1598 msf_from_bcd (&ms_tmp.ent.addr.msf); 1501 msf_from_bcd(&ms_tmp.ent.addr.msf);
1599 toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute, 1502 toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
1600 ms_tmp.ent.addr.msf.second, 1503 ms_tmp.ent.addr.msf.second,
1601 ms_tmp.ent.addr.msf.frame); 1504 ms_tmp.ent.addr.msf.frame);
1602 } 1505 }
1603 1506
1604 toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track); 1507 toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
1605 1508
1606 /* Now try to get the total cdrom capacity. */ 1509 /* now try to get the total cdrom capacity */
1607 stat = cdrom_get_last_written(cdi, &last_written); 1510 stat = cdrom_get_last_written(cdi, &last_written);
1608 if (!stat && (last_written > toc->capacity)) { 1511 if (!stat && (last_written > toc->capacity)) {
1609 toc->capacity = last_written; 1512 toc->capacity = last_written;
@@ -1628,7 +1531,8 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
1628 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; 1531 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
1629 1532
1630 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); 1533 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
1631 do { /* we seem to get stat=0x01,err=0x00 the first time (??) */ 1534 do {
1535 /* we seem to get stat=0x01,err=0x00 the first time (??) */
1632 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1536 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1633 if (!stat) 1537 if (!stat)
1634 break; 1538 break;
@@ -1679,7 +1583,7 @@ static struct cdrom_device_ops ide_cdrom_dops = {
1679 .generic_packet = ide_cdrom_packet, 1583 .generic_packet = ide_cdrom_packet,
1680}; 1584};
1681 1585
1682static int ide_cdrom_register (ide_drive_t *drive, int nslots) 1586static int ide_cdrom_register(ide_drive_t *drive, int nslots)
1683{ 1587{
1684 struct cdrom_info *info = drive->driver_data; 1588 struct cdrom_info *info = drive->driver_data;
1685 struct cdrom_device_info *devinfo = &info->devinfo; 1589 struct cdrom_device_info *devinfo = &info->devinfo;
@@ -1697,8 +1601,7 @@ static int ide_cdrom_register (ide_drive_t *drive, int nslots)
1697 return register_cdrom(devinfo); 1601 return register_cdrom(devinfo);
1698} 1602}
1699 1603
1700static 1604static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1701int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1702{ 1605{
1703 struct cdrom_info *cd = drive->driver_data; 1606 struct cdrom_info *cd = drive->driver_data;
1704 struct cdrom_device_info *cdi = &cd->devinfo; 1607 struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1712,7 +1615,8 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1712 1615
1713 if (drive->media == ide_optical) { 1616 if (drive->media == ide_optical) {
1714 cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM); 1617 cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
1715 printk(KERN_ERR "%s: ATAPI magneto-optical drive\n", drive->name); 1618 printk(KERN_ERR "%s: ATAPI magneto-optical drive\n",
1619 drive->name);
1716 return nslots; 1620 return nslots;
1717 } 1621 }
1718 1622
@@ -1723,11 +1627,10 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1723 } 1627 }
1724 1628
1725 /* 1629 /*
1726 * we have to cheat a little here. the packet will eventually 1630 * We have to cheat a little here. the packet will eventually be queued
1727 * be queued with ide_cdrom_packet(), which extracts the 1631 * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
1728 * drive from cdi->handle. Since this device hasn't been 1632 * Since this device hasn't been registered with the Uniform layer yet,
1729 * registered with the Uniform layer yet, it can't do this. 1633 * it can't do this. Same goes for cdi->ops.
1730 * Same goes for cdi->ops.
1731 */ 1634 */
1732 cdi->handle = drive; 1635 cdi->handle = drive;
1733 cdi->ops = &ide_cdrom_dops; 1636 cdi->ops = &ide_cdrom_dops;
@@ -1796,18 +1699,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1796 return nslots; 1699 return nslots;
1797} 1700}
1798 1701
1799#ifdef CONFIG_IDE_PROC_FS 1702/* standard prep_rq_fn that builds 10 byte cmds */
1800static void ide_cdrom_add_settings(ide_drive_t *drive)
1801{
1802 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
1803}
1804#else
1805static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1806#endif
1807
1808/*
1809 * standard prep_rq_fn that builds 10 byte cmds
1810 */
1811static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1703static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1812{ 1704{
1813 int hard_sect = queue_hardsect_size(q); 1705 int hard_sect = queue_hardsect_size(q);
@@ -1846,9 +1738,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
1846{ 1738{
1847 u8 *c = rq->cmd; 1739 u8 *c = rq->cmd;
1848 1740
1849 /* 1741 /* transform 6-byte read/write commands to the 10-byte version */
1850 * Transform 6-byte read/write commands to the 10-byte version
1851 */
1852 if (c[0] == READ_6 || c[0] == WRITE_6) { 1742 if (c[0] == READ_6 || c[0] == WRITE_6) {
1853 c[8] = c[4]; 1743 c[8] = c[4];
1854 c[5] = c[3]; 1744 c[5] = c[3];
@@ -1870,7 +1760,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
1870 rq->errors = ILLEGAL_REQUEST; 1760 rq->errors = ILLEGAL_REQUEST;
1871 return BLKPREP_KILL; 1761 return BLKPREP_KILL;
1872 } 1762 }
1873 1763
1874 return BLKPREP_OK; 1764 return BLKPREP_OK;
1875} 1765}
1876 1766
@@ -1890,6 +1780,41 @@ struct cd_list_entry {
1890 unsigned int cd_flags; 1780 unsigned int cd_flags;
1891}; 1781};
1892 1782
1783#ifdef CONFIG_IDE_PROC_FS
1784static sector_t ide_cdrom_capacity(ide_drive_t *drive)
1785{
1786 unsigned long capacity, sectors_per_frame;
1787
1788 if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
1789 return 0;
1790
1791 return capacity * sectors_per_frame;
1792}
1793
1794static int proc_idecd_read_capacity(char *page, char **start, off_t off,
1795 int count, int *eof, void *data)
1796{
1797 ide_drive_t *drive = data;
1798 int len;
1799
1800 len = sprintf(page, "%llu\n", (long long)ide_cdrom_capacity(drive));
1801 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
1802}
1803
1804static ide_proc_entry_t idecd_proc[] = {
1805 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
1806 { NULL, 0, NULL, NULL }
1807};
1808
1809static void ide_cdrom_add_settings(ide_drive_t *drive)
1810{
1811 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
1812 &drive->dsc_overlap, NULL);
1813}
1814#else
1815static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1816#endif
1817
1893static const struct cd_list_entry ide_cd_quirks_list[] = { 1818static const struct cd_list_entry ide_cd_quirks_list[] = {
1894 /* Limit transfer size per interrupt. */ 1819 /* Limit transfer size per interrupt. */
1895 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1820 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES },
@@ -1947,8 +1872,7 @@ static unsigned int ide_cd_flags(struct hd_driveid *id)
1947 return 0; 1872 return 0;
1948} 1873}
1949 1874
1950static 1875static int ide_cdrom_setup(ide_drive_t *drive)
1951int ide_cdrom_setup (ide_drive_t *drive)
1952{ 1876{
1953 struct cdrom_info *cd = drive->driver_data; 1877 struct cdrom_info *cd = drive->driver_data;
1954 struct cdrom_device_info *cdi = &cd->devinfo; 1878 struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1977,21 +1901,19 @@ int ide_cdrom_setup (ide_drive_t *drive)
1977 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1901 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1978 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; 1902 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
1979 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) 1903 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
1980 cdi->sanyo_slot = 3; /* 3 => use CD in slot 0 */ 1904 /* 3 => use CD in slot 0 */
1905 cdi->sanyo_slot = 3;
1981 1906
1982 nslots = ide_cdrom_probe_capabilities (drive); 1907 nslots = ide_cdrom_probe_capabilities(drive);
1983 1908
1984 /* 1909 /* set correct block size */
1985 * set correct block size
1986 */
1987 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); 1910 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
1988 1911
1989 if (drive->autotune == IDE_TUNE_DEFAULT || 1912 drive->dsc_overlap = (drive->next != drive);
1990 drive->autotune == IDE_TUNE_AUTO)
1991 drive->dsc_overlap = (drive->next != drive);
1992 1913
1993 if (ide_cdrom_register(drive, nslots)) { 1914 if (ide_cdrom_register(drive, nslots)) {
1994 printk (KERN_ERR "%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name); 1915 printk(KERN_ERR "%s: %s failed to register device with the"
1916 " cdrom driver.\n", drive->name, __func__);
1995 cd->devinfo.handle = NULL; 1917 cd->devinfo.handle = NULL;
1996 return 1; 1918 return 1;
1997 } 1919 }
@@ -1999,19 +1921,6 @@ int ide_cdrom_setup (ide_drive_t *drive)
1999 return 0; 1921 return 0;
2000} 1922}
2001 1923
2002#ifdef CONFIG_IDE_PROC_FS
2003static
2004sector_t ide_cdrom_capacity (ide_drive_t *drive)
2005{
2006 unsigned long capacity, sectors_per_frame;
2007
2008 if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
2009 return 0;
2010
2011 return capacity * sectors_per_frame;
2012}
2013#endif
2014
2015static void ide_cd_remove(ide_drive_t *drive) 1924static void ide_cd_remove(ide_drive_t *drive)
2016{ 1925{
2017 struct cdrom_info *info = drive->driver_data; 1926 struct cdrom_info *info = drive->driver_data;
@@ -2030,7 +1939,6 @@ static void ide_cd_release(struct kref *kref)
2030 ide_drive_t *drive = info->drive; 1939 ide_drive_t *drive = info->drive;
2031 struct gendisk *g = info->disk; 1940 struct gendisk *g = info->disk;
2032 1941
2033 kfree(info->buffer);
2034 kfree(info->toc); 1942 kfree(info->toc);
2035 if (devinfo->handle == drive) 1943 if (devinfo->handle == drive)
2036 unregister_cdrom(devinfo); 1944 unregister_cdrom(devinfo);
@@ -2044,23 +1952,6 @@ static void ide_cd_release(struct kref *kref)
2044 1952
2045static int ide_cd_probe(ide_drive_t *); 1953static int ide_cd_probe(ide_drive_t *);
2046 1954
2047#ifdef CONFIG_IDE_PROC_FS
2048static int proc_idecd_read_capacity
2049 (char *page, char **start, off_t off, int count, int *eof, void *data)
2050{
2051 ide_drive_t *drive = data;
2052 int len;
2053
2054 len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive));
2055 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
2056}
2057
2058static ide_proc_entry_t idecd_proc[] = {
2059 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
2060 { NULL, 0, NULL, NULL }
2061};
2062#endif
2063
2064static ide_driver_t ide_cdrom_driver = { 1955static ide_driver_t ide_cdrom_driver = {
2065 .gen_driver = { 1956 .gen_driver = {
2066 .owner = THIS_MODULE, 1957 .owner = THIS_MODULE,
@@ -2081,20 +1972,17 @@ static ide_driver_t ide_cdrom_driver = {
2081#endif 1972#endif
2082}; 1973};
2083 1974
2084static int idecd_open(struct inode * inode, struct file * file) 1975static int idecd_open(struct inode *inode, struct file *file)
2085{ 1976{
2086 struct gendisk *disk = inode->i_bdev->bd_disk; 1977 struct gendisk *disk = inode->i_bdev->bd_disk;
2087 struct cdrom_info *info; 1978 struct cdrom_info *info;
2088 int rc = -ENOMEM; 1979 int rc = -ENOMEM;
2089 1980
2090 if (!(info = ide_cd_get(disk))) 1981 info = ide_cd_get(disk);
1982 if (!info)
2091 return -ENXIO; 1983 return -ENXIO;
2092 1984
2093 if (!info->buffer) 1985 rc = cdrom_open(&info->devinfo, inode, file);
2094 info->buffer = kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL|__GFP_REPEAT);
2095
2096 if (info->buffer)
2097 rc = cdrom_open(&info->devinfo, inode, file);
2098 1986
2099 if (rc < 0) 1987 if (rc < 0)
2100 ide_cd_put(info); 1988 ide_cd_put(info);
@@ -2102,12 +1990,12 @@ static int idecd_open(struct inode * inode, struct file * file)
2102 return rc; 1990 return rc;
2103} 1991}
2104 1992
2105static int idecd_release(struct inode * inode, struct file * file) 1993static int idecd_release(struct inode *inode, struct file *file)
2106{ 1994{
2107 struct gendisk *disk = inode->i_bdev->bd_disk; 1995 struct gendisk *disk = inode->i_bdev->bd_disk;
2108 struct cdrom_info *info = ide_cd_g(disk); 1996 struct cdrom_info *info = ide_cd_g(disk);
2109 1997
2110 cdrom_release (&info->devinfo, file); 1998 cdrom_release(&info->devinfo, file);
2111 1999
2112 ide_cd_put(info); 2000 ide_cd_put(info);
2113 2001
@@ -2139,7 +2027,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
2139 struct packet_command cgc; 2027 struct packet_command cgc;
2140 char buffer[16]; 2028 char buffer[16];
2141 int stat; 2029 int stat;
2142 char spindown; 2030 char spindown;
2143 2031
2144 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); 2032 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
2145 2033
@@ -2148,12 +2036,12 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
2148 return stat; 2036 return stat;
2149 2037
2150 spindown = buffer[11] & 0x0f; 2038 spindown = buffer[11] & 0x0f;
2151 if (copy_to_user((void __user *)arg, &spindown, sizeof (char))) 2039 if (copy_to_user((void __user *)arg, &spindown, sizeof(char)))
2152 return -EFAULT; 2040 return -EFAULT;
2153 return 0; 2041 return 0;
2154} 2042}
2155 2043
2156static int idecd_ioctl (struct inode *inode, struct file *file, 2044static int idecd_ioctl(struct inode *inode, struct file *file,
2157 unsigned int cmd, unsigned long arg) 2045 unsigned int cmd, unsigned long arg)
2158{ 2046{
2159 struct block_device *bdev = inode->i_bdev; 2047 struct block_device *bdev = inode->i_bdev;
@@ -2161,13 +2049,13 @@ static int idecd_ioctl (struct inode *inode, struct file *file,
2161 int err; 2049 int err;
2162 2050
2163 switch (cmd) { 2051 switch (cmd) {
2164 case CDROMSETSPINDOWN: 2052 case CDROMSETSPINDOWN:
2165 return idecd_set_spindown(&info->devinfo, arg); 2053 return idecd_set_spindown(&info->devinfo, arg);
2166 case CDROMGETSPINDOWN: 2054 case CDROMGETSPINDOWN:
2167 return idecd_get_spindown(&info->devinfo, arg); 2055 return idecd_get_spindown(&info->devinfo, arg);
2168 default: 2056 default:
2169 break; 2057 break;
2170 } 2058 }
2171 2059
2172 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); 2060 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
2173 if (err == -EINVAL) 2061 if (err == -EINVAL)
@@ -2193,16 +2081,16 @@ static int idecd_revalidate_disk(struct gendisk *disk)
2193} 2081}
2194 2082
2195static struct block_device_operations idecd_ops = { 2083static struct block_device_operations idecd_ops = {
2196 .owner = THIS_MODULE, 2084 .owner = THIS_MODULE,
2197 .open = idecd_open, 2085 .open = idecd_open,
2198 .release = idecd_release, 2086 .release = idecd_release,
2199 .ioctl = idecd_ioctl, 2087 .ioctl = idecd_ioctl,
2200 .media_changed = idecd_media_changed, 2088 .media_changed = idecd_media_changed,
2201 .revalidate_disk= idecd_revalidate_disk 2089 .revalidate_disk = idecd_revalidate_disk
2202}; 2090};
2203 2091
2204/* options */ 2092/* module options */
2205static char *ignore = NULL; 2093static char *ignore;
2206 2094
2207module_param(ignore, charp, 0400); 2095module_param(ignore, charp, 0400);
2208MODULE_DESCRIPTION("ATAPI CD-ROM Driver"); 2096MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
@@ -2222,17 +2110,20 @@ static int ide_cd_probe(ide_drive_t *drive)
2222 /* skip drives that we were told to ignore */ 2110 /* skip drives that we were told to ignore */
2223 if (ignore != NULL) { 2111 if (ignore != NULL) {
2224 if (strstr(ignore, drive->name)) { 2112 if (strstr(ignore, drive->name)) {
2225 printk(KERN_INFO "ide-cd: ignoring drive %s\n", drive->name); 2113 printk(KERN_INFO "ide-cd: ignoring drive %s\n",
2114 drive->name);
2226 goto failed; 2115 goto failed;
2227 } 2116 }
2228 } 2117 }
2229 if (drive->scsi) { 2118 if (drive->scsi) {
2230 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name); 2119 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi "
2120 "emulation.\n", drive->name);
2231 goto failed; 2121 goto failed;
2232 } 2122 }
2233 info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL); 2123 info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
2234 if (info == NULL) { 2124 if (info == NULL) {
2235 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name); 2125 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n",
2126 drive->name);
2236 goto failed; 2127 goto failed;
2237 } 2128 }
2238 2129
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 22e3751a681e..a58801c4484d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -119,10 +119,6 @@ struct cdrom_info {
119 119
120 struct atapi_toc *toc; 120 struct atapi_toc *toc;
121 121
122 unsigned long sector_buffered;
123 unsigned long nsectors_buffered;
124 unsigned char *buffer;
125
126 /* The result of the last successful request sense command 122 /* The result of the last successful request sense command
127 on this device. */ 123 on this device. */
128 struct request_sense sense_data; 124 struct request_sense sense_data;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 39501d130256..8e08d083fce9 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -16,8 +16,6 @@
16 16
17#define IDEDISK_VERSION "1.18" 17#define IDEDISK_VERSION "1.18"
18 18
19//#define DEBUG
20
21#include <linux/module.h> 19#include <linux/module.h>
22#include <linux/types.h> 20#include <linux/types.h>
23#include <linux/string.h> 21#include <linux/string.h>
@@ -88,7 +86,7 @@ static void ide_disk_put(struct ide_disk_obj *idkp)
88 * 86 *
89 * It is called only once for each drive. 87 * It is called only once for each drive.
90 */ 88 */
91static int lba_capacity_is_ok (struct hd_driveid *id) 89static int lba_capacity_is_ok(struct hd_driveid *id)
92{ 90{
93 unsigned long lba_sects, chs_sects, head, tail; 91 unsigned long lba_sects, chs_sects, head, tail;
94 92
@@ -176,7 +174,8 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
176 * __ide_do_rw_disk() issues READ and WRITE commands to a disk, 174 * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
177 * using LBA if supported, or CHS otherwise, to address sectors. 175 * using LBA if supported, or CHS otherwise, to address sectors.
178 */ 176 */
179static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) 177static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
178 sector_t block)
180{ 179{
181 ide_hwif_t *hwif = HWIF(drive); 180 ide_hwif_t *hwif = HWIF(drive);
182 unsigned int dma = drive->using_dma; 181 unsigned int dma = drive->using_dma;
@@ -228,7 +227,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
228 tf->device = (block >> 8) & 0xf; 227 tf->device = (block >> 8) & 0xf;
229 } 228 }
230 } else { 229 } else {
231 unsigned int sect,head,cyl,track; 230 unsigned int sect, head, cyl, track;
231
232 track = (int)block / drive->sect; 232 track = (int)block / drive->sect;
233 sect = (int)block % drive->sect + 1; 233 sect = (int)block % drive->sect + 1;
234 head = track % drive->head; 234 head = track % drive->head;
@@ -271,7 +271,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
271 * 1073741822 == 549756 MB or 48bit addressing fake drive 271 * 1073741822 == 549756 MB or 48bit addressing fake drive
272 */ 272 */
273 273
274static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector_t block) 274static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
275 sector_t block)
275{ 276{
276 ide_hwif_t *hwif = HWIF(drive); 277 ide_hwif_t *hwif = HWIF(drive);
277 278
@@ -452,7 +453,7 @@ static void idedisk_check_hpa(ide_drive_t *drive)
452 * in above order (i.e., if value of higher priority is available, 453 * in above order (i.e., if value of higher priority is available,
453 * reset will be ignored). 454 * reset will be ignored).
454 */ 455 */
455static void init_idedisk_capacity (ide_drive_t *drive) 456static void init_idedisk_capacity(ide_drive_t *drive)
456{ 457{
457 struct hd_driveid *id = drive->id; 458 struct hd_driveid *id = drive->id;
458 /* 459 /*
@@ -479,7 +480,7 @@ static void init_idedisk_capacity (ide_drive_t *drive)
479 } 480 }
480} 481}
481 482
482static sector_t idedisk_capacity (ide_drive_t *drive) 483static sector_t idedisk_capacity(ide_drive_t *drive)
483{ 484{
484 return drive->capacity64 - drive->sect0; 485 return drive->capacity64 - drive->sect0;
485} 486}
@@ -524,10 +525,11 @@ static int proc_idedisk_read_cache
524 int len; 525 int len;
525 526
526 if (drive->id_read) 527 if (drive->id_read)
527 len = sprintf(out,"%i\n", drive->id->buf_size / 2); 528 len = sprintf(out, "%i\n", drive->id->buf_size / 2);
528 else 529 else
529 len = sprintf(out,"(none)\n"); 530 len = sprintf(out, "(none)\n");
530 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 531
532 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
531} 533}
532 534
533static int proc_idedisk_read_capacity 535static int proc_idedisk_read_capacity
@@ -536,54 +538,52 @@ static int proc_idedisk_read_capacity
536 ide_drive_t*drive = (ide_drive_t *)data; 538 ide_drive_t*drive = (ide_drive_t *)data;
537 int len; 539 int len;
538 540
539 len = sprintf(page,"%llu\n", (long long)idedisk_capacity(drive)); 541 len = sprintf(page, "%llu\n", (long long)idedisk_capacity(drive));
540 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 542
543 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
541} 544}
542 545
543static int proc_idedisk_read_smart_thresholds 546static int proc_idedisk_read_smart(char *page, char **start, off_t off,
544 (char *page, char **start, off_t off, int count, int *eof, void *data) 547 int count, int *eof, void *data, u8 sub_cmd)
545{ 548{
546 ide_drive_t *drive = (ide_drive_t *)data; 549 ide_drive_t *drive = (ide_drive_t *)data;
547 int len = 0, i = 0; 550 int len = 0, i = 0;
548 551
549 if (get_smart_data(drive, page, SMART_READ_THRESHOLDS) == 0) { 552 if (get_smart_data(drive, page, sub_cmd) == 0) {
550 unsigned short *val = (unsigned short *) page; 553 unsigned short *val = (unsigned short *) page;
551 char *out = ((char *)val) + (SECTOR_WORDS * 4); 554 char *out = ((char *)val) + (SECTOR_WORDS * 4);
552 page = out; 555 page = out;
553 do { 556 do {
554 out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n'); 557 out += sprintf(out, "%04x%c", le16_to_cpu(*val),
558 (++i & 7) ? ' ' : '\n');
555 val += 1; 559 val += 1;
556 } while (i < (SECTOR_WORDS * 2)); 560 } while (i < (SECTOR_WORDS * 2));
557 len = out - page; 561 len = out - page;
558 } 562 }
559 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 563
564 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
560} 565}
561 566
562static int proc_idedisk_read_smart_values 567static int proc_idedisk_read_sv
563 (char *page, char **start, off_t off, int count, int *eof, void *data) 568 (char *page, char **start, off_t off, int count, int *eof, void *data)
564{ 569{
565 ide_drive_t *drive = (ide_drive_t *)data; 570 return proc_idedisk_read_smart(page, start, off, count, eof, data,
566 int len = 0, i = 0; 571 SMART_READ_VALUES);
572}
567 573
568 if (get_smart_data(drive, page, SMART_READ_VALUES) == 0) { 574static int proc_idedisk_read_st
569 unsigned short *val = (unsigned short *) page; 575 (char *page, char **start, off_t off, int count, int *eof, void *data)
570 char *out = ((char *)val) + (SECTOR_WORDS * 4); 576{
571 page = out; 577 return proc_idedisk_read_smart(page, start, off, count, eof, data,
572 do { 578 SMART_READ_THRESHOLDS);
573 out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n');
574 val += 1;
575 } while (i < (SECTOR_WORDS * 2));
576 len = out - page;
577 }
578 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
579} 579}
580 580
581static ide_proc_entry_t idedisk_proc[] = { 581static ide_proc_entry_t idedisk_proc[] = {
582 { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL }, 582 { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
583 { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL }, 583 { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL },
584 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL }, 584 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
585 { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL }, 585 { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_sv, NULL },
586 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL }, 586 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_st, NULL },
587 { NULL, 0, NULL, NULL } 587 { NULL, 0, NULL, NULL }
588}; 588};
589#endif /* CONFIG_IDE_PROC_FS */ 589#endif /* CONFIG_IDE_PROC_FS */
@@ -625,12 +625,13 @@ static int set_multcount(ide_drive_t *drive, int arg)
625 if (drive->special.b.set_multmode) 625 if (drive->special.b.set_multmode)
626 return -EBUSY; 626 return -EBUSY;
627 627
628 ide_init_drive_cmd (&rq); 628 ide_init_drive_cmd(&rq);
629 rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 629 rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
630 630
631 drive->mult_req = arg; 631 drive->mult_req = arg;
632 drive->special.b.set_multmode = 1; 632 drive->special.b.set_multmode = 1;
633 (void) ide_do_drive_cmd (drive, &rq, ide_wait); 633 (void)ide_do_drive_cmd(drive, &rq, ide_wait);
634
634 return (drive->mult_count == arg) ? 0 : -EIO; 635 return (drive->mult_count == arg) ? 0 : -EIO;
635} 636}
636 637
@@ -706,7 +707,7 @@ static int write_cache(ide_drive_t *drive, int arg)
706 return err; 707 return err;
707} 708}
708 709
709static int do_idedisk_flushcache (ide_drive_t *drive) 710static int do_idedisk_flushcache(ide_drive_t *drive)
710{ 711{
711 ide_task_t args; 712 ide_task_t args;
712 713
@@ -719,7 +720,7 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
719 return ide_no_data_taskfile(drive, &args); 720 return ide_no_data_taskfile(drive, &args);
720} 721}
721 722
722static int set_acoustic (ide_drive_t *drive, int arg) 723static int set_acoustic(ide_drive_t *drive, int arg)
723{ 724{
724 ide_task_t args; 725 ide_task_t args;
725 726
@@ -753,7 +754,7 @@ static int set_lba_addressing(ide_drive_t *drive, int arg)
753 return 0; 754 return 0;
754 755
755 if (!idedisk_supports_lba48(drive->id)) 756 if (!idedisk_supports_lba48(drive->id))
756 return -EIO; 757 return -EIO;
757 drive->addressing = arg; 758 drive->addressing = arg;
758 return 0; 759 return 0;
759} 760}
@@ -763,23 +764,35 @@ static void idedisk_add_settings(ide_drive_t *drive)
763{ 764{
764 struct hd_driveid *id = drive->id; 765 struct hd_driveid *id = drive->id;
765 766
766 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); 767 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
767 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 768 &drive->bios_cyl, NULL);
768 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 769 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1,
769 ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); 770 &drive->bios_head, NULL);
770 ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount); 771 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1,
771 ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); 772 &drive->bios_sect, NULL);
772 ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); 773 ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1,
773 ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); 774 &drive->addressing, set_lba_addressing);
774 ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); 775 ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0,
775 ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); 776 id->max_multsect, 1, 1, &drive->mult_count,
776 ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); 777 set_multcount);
778 ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
779 &drive->nowerr, set_nowerr);
780 ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1,
781 &drive->lun, NULL);
782 ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
783 &drive->wcache, write_cache);
784 ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1,
785 &drive->acoustic, set_acoustic);
786 ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
787 &drive->failures, NULL);
788 ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535,
789 1, 1, &drive->max_failures, NULL);
777} 790}
778#else 791#else
779static inline void idedisk_add_settings(ide_drive_t *drive) { ; } 792static inline void idedisk_add_settings(ide_drive_t *drive) { ; }
780#endif 793#endif
781 794
782static void idedisk_setup (ide_drive_t *drive) 795static void idedisk_setup(ide_drive_t *drive)
783{ 796{
784 ide_hwif_t *hwif = drive->hwif; 797 ide_hwif_t *hwif = drive->hwif;
785 struct hd_driveid *id = drive->id; 798 struct hd_driveid *id = drive->id;
@@ -792,11 +805,10 @@ static void idedisk_setup (ide_drive_t *drive)
792 805
793 if (drive->removable) { 806 if (drive->removable) {
794 /* 807 /*
795 * Removable disks (eg. SYQUEST); ignore 'WD' drives 808 * Removable disks (eg. SYQUEST); ignore 'WD' drives
796 */ 809 */
797 if (id->model[0] != 'W' || id->model[1] != 'D') { 810 if (id->model[0] != 'W' || id->model[1] != 'D')
798 drive->doorlocking = 1; 811 drive->doorlocking = 1;
799 }
800 } 812 }
801 813
802 (void)set_lba_addressing(drive, 1); 814 (void)set_lba_addressing(drive, 1);
@@ -810,10 +822,11 @@ static void idedisk_setup (ide_drive_t *drive)
810 blk_queue_max_sectors(drive->queue, max_s); 822 blk_queue_max_sectors(drive->queue, max_s);
811 } 823 }
812 824
813 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, drive->queue->max_sectors / 2); 825 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
826 drive->queue->max_sectors / 2);
814 827
815 /* calculate drive capacity, and select LBA if possible */ 828 /* calculate drive capacity, and select LBA if possible */
816 init_idedisk_capacity (drive); 829 init_idedisk_capacity(drive);
817 830
818 /* limit drive capacity to 137GB if LBA48 cannot be used */ 831 /* limit drive capacity to 137GB if LBA48 cannot be used */
819 if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) { 832 if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) {
@@ -826,9 +839,9 @@ static void idedisk_setup (ide_drive_t *drive)
826 839
827 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && drive->addressing) { 840 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && drive->addressing) {
828 if (drive->capacity64 > 1ULL << 28) { 841 if (drive->capacity64 > 1ULL << 28) {
829 printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode will" 842 printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
830 " be used for accessing sectors > %u\n", 843 " will be used for accessing sectors "
831 drive->name, 1 << 28); 844 "> %u\n", drive->name, 1 << 28);
832 } else 845 } else
833 drive->addressing = 0; 846 drive->addressing = 0;
834 } 847 }
@@ -837,7 +850,8 @@ static void idedisk_setup (ide_drive_t *drive)
837 * if possible, give fdisk access to more of the drive, 850 * if possible, give fdisk access to more of the drive,
838 * by correcting bios_cyls: 851 * by correcting bios_cyls:
839 */ 852 */
840 capacity = idedisk_capacity (drive); 853 capacity = idedisk_capacity(drive);
854
841 if (!drive->forced_geom) { 855 if (!drive->forced_geom) {
842 856
843 if (idedisk_supports_lba48(drive->id)) { 857 if (idedisk_supports_lba48(drive->id)) {
@@ -993,7 +1007,8 @@ static int idedisk_open(struct inode *inode, struct file *filp)
993 struct ide_disk_obj *idkp; 1007 struct ide_disk_obj *idkp;
994 ide_drive_t *drive; 1008 ide_drive_t *drive;
995 1009
996 if (!(idkp = ide_disk_get(disk))) 1010 idkp = ide_disk_get(disk);
1011 if (idkp == NULL)
997 return -ENXIO; 1012 return -ENXIO;
998 1013
999 drive = idkp->drive; 1014 drive = idkp->drive;
@@ -1115,13 +1130,13 @@ static int idedisk_revalidate_disk(struct gendisk *disk)
1115} 1130}
1116 1131
1117static struct block_device_operations idedisk_ops = { 1132static struct block_device_operations idedisk_ops = {
1118 .owner = THIS_MODULE, 1133 .owner = THIS_MODULE,
1119 .open = idedisk_open, 1134 .open = idedisk_open,
1120 .release = idedisk_release, 1135 .release = idedisk_release,
1121 .ioctl = idedisk_ioctl, 1136 .ioctl = idedisk_ioctl,
1122 .getgeo = idedisk_getgeo, 1137 .getgeo = idedisk_getgeo,
1123 .media_changed = idedisk_media_changed, 1138 .media_changed = idedisk_media_changed,
1124 .revalidate_disk= idedisk_revalidate_disk 1139 .revalidate_disk = idedisk_revalidate_disk
1125}; 1140};
1126 1141
1127MODULE_DESCRIPTION("ATA DISK Driver"); 1142MODULE_DESCRIPTION("ATA DISK Driver");
@@ -1184,7 +1199,7 @@ failed:
1184 return -ENODEV; 1199 return -ENODEV;
1185} 1200}
1186 1201
1187static void __exit idedisk_exit (void) 1202static void __exit idedisk_exit(void)
1188{ 1203{
1189 driver_unregister(&idedisk_driver.gen_driver); 1204 driver_unregister(&idedisk_driver.gen_driver);
1190} 1205}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d61e5788d310..c352cf27b6e7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 u8 stat = 0, dma_stat = 0; 103 u8 stat = 0, dma_stat = 0;
104 104
105 dma_stat = HWIF(drive)->ide_dma_end(drive); 105 dma_stat = drive->hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 106 stat = ide_read_status(drive);
107 107
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
394 drive->using_dma = 0; 394 drive->using_dma = 0;
395 ide_toggle_bounce(drive, 0); 395 ide_toggle_bounce(drive, 0);
396 396
397 drive->hwif->dma_host_set(drive, 0); 397 drive->hwif->dma_ops->dma_host_set(drive, 0);
398} 398}
399 399
400EXPORT_SYMBOL(ide_dma_off_quietly); 400EXPORT_SYMBOL(ide_dma_off_quietly);
@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
427 drive->using_dma = 1; 427 drive->using_dma = 1;
428 ide_toggle_bounce(drive, 1); 428 ide_toggle_bounce(drive, 1);
429 429
430 drive->hwif->dma_host_set(drive, 1); 430 drive->hwif->dma_ops->dma_host_set(drive, 1);
431} 431}
432 432
433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -482,11 +482,12 @@ int ide_dma_setup(ide_drive_t *drive)
482 482
483EXPORT_SYMBOL_GPL(ide_dma_setup); 483EXPORT_SYMBOL_GPL(ide_dma_setup);
484 484
485static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 485void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
486{ 486{
487 /* issue cmd to drive */ 487 /* issue cmd to drive */
488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
489} 489}
490EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
490 491
491void ide_dma_start(ide_drive_t *drive) 492void ide_dma_start(ide_drive_t *drive)
492{ 493{
@@ -532,7 +533,7 @@ int __ide_dma_end (ide_drive_t *drive)
532EXPORT_SYMBOL(__ide_dma_end); 533EXPORT_SYMBOL(__ide_dma_end);
533 534
534/* returns 1 if dma irq issued, 0 otherwise */ 535/* returns 1 if dma irq issued, 0 otherwise */
535static int __ide_dma_test_irq(ide_drive_t *drive) 536int ide_dma_test_irq(ide_drive_t *drive)
536{ 537{
537 ide_hwif_t *hwif = HWIF(drive); 538 ide_hwif_t *hwif = HWIF(drive);
538 u8 dma_stat = hwif->INB(hwif->dma_status); 539 u8 dma_stat = hwif->INB(hwif->dma_status);
@@ -542,9 +543,10 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
542 return 1; 543 return 1;
543 if (!drive->waiting_for_dma) 544 if (!drive->waiting_for_dma)
544 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 545 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
545 drive->name, __FUNCTION__); 546 drive->name, __func__);
546 return 0; 547 return 0;
547} 548}
549EXPORT_SYMBOL_GPL(ide_dma_test_irq);
548#else 550#else
549static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 551static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
550#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 552#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -574,6 +576,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
574{ 576{
575 struct hd_driveid *id = drive->id; 577 struct hd_driveid *id = drive->id;
576 ide_hwif_t *hwif = drive->hwif; 578 ide_hwif_t *hwif = drive->hwif;
579 const struct ide_port_ops *port_ops = hwif->port_ops;
577 unsigned int mask = 0; 580 unsigned int mask = 0;
578 581
579 switch(base) { 582 switch(base) {
@@ -581,8 +584,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
581 if ((id->field_valid & 4) == 0) 584 if ((id->field_valid & 4) == 0)
582 break; 585 break;
583 586
584 if (hwif->udma_filter) 587 if (port_ops && port_ops->udma_filter)
585 mask = hwif->udma_filter(drive); 588 mask = port_ops->udma_filter(drive);
586 else 589 else
587 mask = hwif->ultra_mask; 590 mask = hwif->ultra_mask;
588 mask &= id->dma_ultra; 591 mask &= id->dma_ultra;
@@ -598,8 +601,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
598 case XFER_MW_DMA_0: 601 case XFER_MW_DMA_0:
599 if ((id->field_valid & 2) == 0) 602 if ((id->field_valid & 2) == 0)
600 break; 603 break;
601 if (hwif->mdma_filter) 604 if (port_ops && port_ops->mdma_filter)
602 mask = hwif->mdma_filter(drive); 605 mask = port_ops->mdma_filter(drive);
603 else 606 else
604 mask = hwif->mwdma_mask; 607 mask = hwif->mwdma_mask;
605 mask &= id->dma_mword; 608 mask &= id->dma_mword;
@@ -703,17 +706,8 @@ static int ide_tune_dma(ide_drive_t *drive)
703 706
704 speed = ide_max_dma_mode(drive); 707 speed = ide_max_dma_mode(drive);
705 708
706 if (!speed) { 709 if (!speed)
707 /* is this really correct/needed? */ 710 return 0;
708 if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
709 ide_dma_good_drive(drive))
710 return 1;
711 else
712 return 0;
713 }
714
715 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
716 return 1;
717 711
718 if (ide_set_dma_mode(drive, speed)) 712 if (ide_set_dma_mode(drive, speed))
719 return 0; 713 return 0;
@@ -810,15 +804,15 @@ void ide_dma_timeout (ide_drive_t *drive)
810 804
811 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 805 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
812 806
813 if (hwif->ide_dma_test_irq(drive)) 807 if (hwif->dma_ops->dma_test_irq(drive))
814 return; 808 return;
815 809
816 hwif->ide_dma_end(drive); 810 hwif->dma_ops->dma_end(drive);
817} 811}
818 812
819EXPORT_SYMBOL(ide_dma_timeout); 813EXPORT_SYMBOL(ide_dma_timeout);
820 814
821static void ide_release_dma_engine(ide_hwif_t *hwif) 815void ide_release_dma_engine(ide_hwif_t *hwif)
822{ 816{
823 if (hwif->dmatable_cpu) { 817 if (hwif->dmatable_cpu) {
824 struct pci_dev *pdev = to_pci_dev(hwif->dev); 818 struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -829,28 +823,7 @@ static void ide_release_dma_engine(ide_hwif_t *hwif)
829 } 823 }
830} 824}
831 825
832static int ide_release_iomio_dma(ide_hwif_t *hwif) 826int ide_allocate_dma_engine(ide_hwif_t *hwif)
833{
834 release_region(hwif->dma_base, 8);
835 if (hwif->extra_ports)
836 release_region(hwif->extra_base, hwif->extra_ports);
837 return 1;
838}
839
840/*
841 * Needed for allowing full modular support of ide-driver
842 */
843int ide_release_dma(ide_hwif_t *hwif)
844{
845 ide_release_dma_engine(hwif);
846
847 if (hwif->mmio)
848 return 1;
849 else
850 return ide_release_iomio_dma(hwif);
851}
852
853static int ide_allocate_dma_engine(ide_hwif_t *hwif)
854{ 827{
855 struct pci_dev *pdev = to_pci_dev(hwif->dev); 828 struct pci_dev *pdev = to_pci_dev(hwif->dev);
856 829
@@ -862,65 +835,25 @@ static int ide_allocate_dma_engine(ide_hwif_t *hwif)
862 return 0; 835 return 0;
863 836
864 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 837 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
865 hwif->cds->name); 838 hwif->name);
866 839
867 return 1; 840 return 1;
868} 841}
869 842EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
870static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base) 843
871{ 844static const struct ide_dma_ops sff_dma_ops = {
872 printk(KERN_INFO " %s: MMIO-DMA ", hwif->name); 845 .dma_host_set = ide_dma_host_set,
873 846 .dma_setup = ide_dma_setup,
874 return 0; 847 .dma_exec_cmd = ide_dma_exec_cmd,
875} 848 .dma_start = ide_dma_start,
876 849 .dma_end = __ide_dma_end,
877static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base) 850 .dma_test_irq = ide_dma_test_irq,
878{ 851 .dma_timeout = ide_dma_timeout,
879 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx", 852 .dma_lost_irq = ide_dma_lost_irq,
880 hwif->name, base, base + 7); 853};
881
882 if (!request_region(base, 8, hwif->name)) {
883 printk(" -- Error, ports in use.\n");
884 return 1;
885 }
886
887 if (hwif->cds->extra) {
888 hwif->extra_base = base + (hwif->channel ? 8 : 16);
889
890 if (!hwif->mate || !hwif->mate->extra_ports) {
891 if (!request_region(hwif->extra_base,
892 hwif->cds->extra, hwif->cds->name)) {
893 printk(" -- Error, extra ports in use.\n");
894 release_region(base, 8);
895 return 1;
896 }
897 hwif->extra_ports = hwif->cds->extra;
898 }
899 }
900
901 return 0;
902}
903
904static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
905{
906 if (hwif->mmio)
907 return ide_mapped_mmio_dma(hwif, base);
908
909 return ide_iomio_dma(hwif, base);
910}
911 854
912void ide_setup_dma(ide_hwif_t *hwif, unsigned long base) 855void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
913{ 856{
914 u8 dma_stat;
915
916 if (ide_dma_iobase(hwif, base))
917 return;
918
919 if (ide_allocate_dma_engine(hwif)) {
920 ide_release_dma(hwif);
921 return;
922 }
923
924 hwif->dma_base = base; 857 hwif->dma_base = base;
925 858
926 if (!hwif->dma_command) 859 if (!hwif->dma_command)
@@ -934,27 +867,7 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
934 if (!hwif->dma_prdtable) 867 if (!hwif->dma_prdtable)
935 hwif->dma_prdtable = hwif->dma_base + 4; 868 hwif->dma_prdtable = hwif->dma_base + 4;
936 869
937 if (!hwif->dma_host_set) 870 hwif->dma_ops = &sff_dma_ops;
938 hwif->dma_host_set = &ide_dma_host_set;
939 if (!hwif->dma_setup)
940 hwif->dma_setup = &ide_dma_setup;
941 if (!hwif->dma_exec_cmd)
942 hwif->dma_exec_cmd = &ide_dma_exec_cmd;
943 if (!hwif->dma_start)
944 hwif->dma_start = &ide_dma_start;
945 if (!hwif->ide_dma_end)
946 hwif->ide_dma_end = &__ide_dma_end;
947 if (!hwif->ide_dma_test_irq)
948 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
949 if (!hwif->dma_timeout)
950 hwif->dma_timeout = &ide_dma_timeout;
951 if (!hwif->dma_lost_irq)
952 hwif->dma_lost_irq = &ide_dma_lost_irq;
953
954 dma_stat = hwif->INB(hwif->dma_status);
955 printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
956 hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
957 hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
958} 871}
959 872
960EXPORT_SYMBOL_GPL(ide_setup_dma); 873EXPORT_SYMBOL_GPL(ide_setup_dma);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 5f133dfb541c..489079b8ed03 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -396,7 +396,7 @@ static void idefloppy_retry_pc(ide_drive_t *drive)
396} 396}
397 397
398/* The usual interrupt handler called during a packet command. */ 398/* The usual interrupt handler called during a packet command. */
399static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) 399static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
400{ 400{
401 idefloppy_floppy_t *floppy = drive->driver_data; 401 idefloppy_floppy_t *floppy = drive->driver_data;
402 ide_hwif_t *hwif = drive->hwif; 402 ide_hwif_t *hwif = drive->hwif;
@@ -411,7 +411,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
411 debug_log("Reached %s interrupt handler\n", __func__); 411 debug_log("Reached %s interrupt handler\n", __func__);
412 412
413 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 413 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
414 dma_error = hwif->ide_dma_end(drive); 414 dma_error = hwif->dma_ops->dma_end(drive);
415 if (dma_error) { 415 if (dma_error) {
416 printk(KERN_ERR "%s: DMA %s error\n", drive->name, 416 printk(KERN_ERR "%s: DMA %s error\n", drive->name,
417 rq_data_dir(rq) ? "write" : "read"); 417 rq_data_dir(rq) ? "write" : "read");
@@ -465,10 +465,10 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
465 } 465 }
466 466
467 /* Get the number of bytes to transfer */ 467 /* Get the number of bytes to transfer */
468 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 468 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
469 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 469 hwif->INB(hwif->io_ports.lbam_addr);
470 /* on this interrupt */ 470 /* on this interrupt */
471 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 471 ireason = hwif->INB(hwif->io_ports.nsect_addr);
472 472
473 if (ireason & CD) { 473 if (ireason & CD) {
474 printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__); 474 printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -539,7 +539,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
539 "initiated yet DRQ isn't asserted\n"); 539 "initiated yet DRQ isn't asserted\n");
540 return startstop; 540 return startstop;
541 } 541 }
542 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 542 ireason = hwif->INB(hwif->io_ports.nsect_addr);
543 if ((ireason & CD) == 0 || (ireason & IO)) { 543 if ((ireason & CD) == 0 || (ireason & IO)) {
544 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while " 544 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
545 "issuing a packet command\n"); 545 "issuing a packet command\n");
@@ -586,7 +586,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
586 "initiated yet DRQ isn't asserted\n"); 586 "initiated yet DRQ isn't asserted\n");
587 return startstop; 587 return startstop;
588 } 588 }
589 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 589 ireason = hwif->INB(hwif->io_ports.nsect_addr);
590 if ((ireason & CD) == 0 || (ireason & IO)) { 590 if ((ireason & CD) == 0 || (ireason & IO)) {
591 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) " 591 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
592 "while issuing a packet command\n"); 592 "while issuing a packet command\n");
@@ -663,7 +663,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
663 dma = 0; 663 dma = 0;
664 664
665 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma) 665 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
666 dma = !hwif->dma_setup(drive); 666 dma = !hwif->dma_ops->dma_setup(drive);
667 667
668 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | 668 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
669 IDE_TFLAG_OUT_DEVICE, bcount, dma); 669 IDE_TFLAG_OUT_DEVICE, bcount, dma);
@@ -671,7 +671,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
671 if (dma) { 671 if (dma) {
672 /* Begin DMA, if necessary */ 672 /* Begin DMA, if necessary */
673 pc->flags |= PC_FLAG_DMA_IN_PROGRESS; 673 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
674 hwif->dma_start(drive); 674 hwif->dma_ops->dma_start(drive);
675 } 675 }
676 676
677 /* Can we transfer the packet when we get the interrupt or wait? */ 677 /* Can we transfer the packet when we get the interrupt or wait? */
@@ -692,7 +692,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
692 return ide_started; 692 return ide_started;
693 } else { 693 } else {
694 /* Issue the packet command */ 694 /* Issue the packet command */
695 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 695 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
696 return (*pkt_xfer_routine) (drive); 696 return (*pkt_xfer_routine) (drive);
697 } 697 }
698} 698}
@@ -1596,13 +1596,13 @@ static int idefloppy_revalidate_disk(struct gendisk *disk)
1596} 1596}
1597 1597
1598static struct block_device_operations idefloppy_ops = { 1598static struct block_device_operations idefloppy_ops = {
1599 .owner = THIS_MODULE, 1599 .owner = THIS_MODULE,
1600 .open = idefloppy_open, 1600 .open = idefloppy_open,
1601 .release = idefloppy_release, 1601 .release = idefloppy_release,
1602 .ioctl = idefloppy_ioctl, 1602 .ioctl = idefloppy_ioctl,
1603 .getgeo = idefloppy_getgeo, 1603 .getgeo = idefloppy_getgeo,
1604 .media_changed = idefloppy_media_changed, 1604 .media_changed = idefloppy_media_changed,
1605 .revalidate_disk= idefloppy_revalidate_disk 1605 .revalidate_disk = idefloppy_revalidate_disk
1606}; 1606};
1607 1607
1608static int ide_floppy_probe(ide_drive_t *drive) 1608static int ide_floppy_probe(ide_drive_t *drive)
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 25fda0a3263f..a6073e248f45 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -33,7 +33,7 @@ static ssize_t store_add(struct class *cls, const char *buf, size_t n)
33 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) 33 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
34 return -EINVAL; 34 return -EINVAL;
35 35
36 hwif = ide_find_port(base); 36 hwif = ide_find_port();
37 if (hwif == NULL) 37 if (hwif == NULL)
38 return -ENOENT; 38 return -ENOENT;
39 39
@@ -90,19 +90,45 @@ static int __init ide_generic_init(void)
90 int i; 90 int i;
91 91
92 for (i = 0; i < MAX_HWIFS; i++) { 92 for (i = 0; i < MAX_HWIFS; i++) {
93 ide_hwif_t *hwif = &ide_hwifs[i]; 93 ide_hwif_t *hwif;
94 unsigned long io_addr = ide_default_io_base(i); 94 unsigned long io_addr = ide_default_io_base(i);
95 hw_regs_t hw; 95 hw_regs_t hw;
96 96
97 if (hwif->chipset == ide_unknown && io_addr) { 97 idx[i] = 0xff;
98
99 if (io_addr) {
100 if (!request_region(io_addr, 8, DRV_NAME)) {
101 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
102 "not free.\n",
103 DRV_NAME, io_addr, io_addr + 7);
104 continue;
105 }
106
107 if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
108 printk(KERN_ERR "%s: I/O resource 0x%lX "
109 "not free.\n",
110 DRV_NAME, io_addr + 0x206);
111 release_region(io_addr, 8);
112 continue;
113 }
114
115 /*
116 * Skip probing if the corresponding
117 * slot is already occupied.
118 */
119 hwif = ide_find_port();
120 if (hwif == NULL || hwif->index != i) {
121 idx[i] = 0xff;
122 continue;
123 }
124
98 memset(&hw, 0, sizeof(hw)); 125 memset(&hw, 0, sizeof(hw));
99 ide_std_init_ports(&hw, io_addr, io_addr + 0x206); 126 ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
100 hw.irq = ide_default_irq(io_addr); 127 hw.irq = ide_default_irq(io_addr);
101 ide_init_port_hw(hwif, &hw); 128 ide_init_port_hw(hwif, &hw);
102 129
103 idx[i] = i; 130 idx[i] = i;
104 } else 131 }
105 idx[i] = 0xff;
106 } 132 }
107 133
108 ide_device_add_all(idx, NULL); 134 ide_device_add_all(idx, NULL);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 31e5afadb7e9..3a2d8930d17f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -218,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
218 * we could be smarter and check for current xfer_speed 218 * we could be smarter and check for current xfer_speed
219 * in struct drive etc... 219 * in struct drive etc...
220 */ 220 */
221 if (drive->hwif->dma_host_set == NULL) 221 if (drive->hwif->dma_ops == NULL)
222 break; 222 break;
223 /* 223 /*
224 * TODO: respect ->using_dma setting 224 * TODO: respect ->using_dma setting
@@ -298,48 +298,43 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
298void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 298void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
299{ 299{
300 ide_hwif_t *hwif = drive->hwif; 300 ide_hwif_t *hwif = drive->hwif;
301 struct ide_io_ports *io_ports = &hwif->io_ports;
301 struct ide_taskfile *tf = &task->tf; 302 struct ide_taskfile *tf = &task->tf;
302 303
303 if (task->tf_flags & IDE_TFLAG_IN_DATA) { 304 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
304 u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]); 305 u16 data = hwif->INW(io_ports->data_addr);
305 306
306 tf->data = data & 0xff; 307 tf->data = data & 0xff;
307 tf->hob_data = (data >> 8) & 0xff; 308 tf->hob_data = (data >> 8) & 0xff;
308 } 309 }
309 310
310 /* be sure we're looking at the low order bits */ 311 /* be sure we're looking at the low order bits */
311 hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]); 312 hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
312 313
313 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 314 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
314 tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); 315 tf->nsect = hwif->INB(io_ports->nsect_addr);
315 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 316 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
316 tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); 317 tf->lbal = hwif->INB(io_ports->lbal_addr);
317 if (task->tf_flags & IDE_TFLAG_IN_LBAM) 318 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
318 tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); 319 tf->lbam = hwif->INB(io_ports->lbam_addr);
319 if (task->tf_flags & IDE_TFLAG_IN_LBAH) 320 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
320 tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); 321 tf->lbah = hwif->INB(io_ports->lbah_addr);
321 if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 322 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
322 tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]); 323 tf->device = hwif->INB(io_ports->device_addr);
323 324
324 if (task->tf_flags & IDE_TFLAG_LBA48) { 325 if (task->tf_flags & IDE_TFLAG_LBA48) {
325 hwif->OUTB(drive->ctl | 0x80, 326 hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
326 hwif->io_ports[IDE_CONTROL_OFFSET]);
327 327
328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
329 tf->hob_feature = 329 tf->hob_feature = hwif->INB(io_ports->feature_addr);
330 hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
331 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 330 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
332 tf->hob_nsect = 331 tf->hob_nsect = hwif->INB(io_ports->nsect_addr);
333 hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 332 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
335 tf->hob_lbal = 333 tf->hob_lbal = hwif->INB(io_ports->lbal_addr);
336 hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
337 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
338 tf->hob_lbam = 335 tf->hob_lbam = hwif->INB(io_ports->lbam_addr);
339 hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
340 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 336 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
341 tf->hob_lbah = 337 tf->hob_lbah = hwif->INB(io_ports->lbah_addr);
342 hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
343 } 338 }
344} 339}
345 340
@@ -454,7 +449,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
454 if (err == ABRT_ERR) { 449 if (err == ABRT_ERR) {
455 if (drive->select.b.lba && 450 if (drive->select.b.lba &&
456 /* some newer drives don't support WIN_SPECIFY */ 451 /* some newer drives don't support WIN_SPECIFY */
457 hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) == 452 hwif->INB(hwif->io_ports.command_addr) ==
458 WIN_SPECIFY) 453 WIN_SPECIFY)
459 return ide_stopped; 454 return ide_stopped;
460 } else if ((err & BAD_CRC) == BAD_CRC) { 455 } else if ((err & BAD_CRC) == BAD_CRC) {
@@ -507,8 +502,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
507 502
508 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 503 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
509 /* force an abort */ 504 /* force an abort */
510 hwif->OUTB(WIN_IDLEIMMEDIATE, 505 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
511 hwif->io_ports[IDE_COMMAND_OFFSET]);
512 506
513 if (rq->errors >= ERROR_MAX) { 507 if (rq->errors >= ERROR_MAX) {
514 ide_kill_rq(drive, rq); 508 ide_kill_rq(drive, rq);
@@ -721,15 +715,12 @@ static ide_startstop_t do_special (ide_drive_t *drive)
721#endif 715#endif
722 if (s->b.set_tune) { 716 if (s->b.set_tune) {
723 ide_hwif_t *hwif = drive->hwif; 717 ide_hwif_t *hwif = drive->hwif;
718 const struct ide_port_ops *port_ops = hwif->port_ops;
724 u8 req_pio = drive->tune_req; 719 u8 req_pio = drive->tune_req;
725 720
726 s->b.set_tune = 0; 721 s->b.set_tune = 0;
727 722
728 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 723 if (set_pio_mode_abuse(drive->hwif, req_pio)) {
729
730 if (hwif->set_pio_mode == NULL)
731 return ide_stopped;
732
733 /* 724 /*
734 * take ide_lock for drive->[no_]unmask/[no_]io_32bit 725 * take ide_lock for drive->[no_]unmask/[no_]io_32bit
735 */ 726 */
@@ -737,10 +728,10 @@ static ide_startstop_t do_special (ide_drive_t *drive)
737 unsigned long flags; 728 unsigned long flags;
738 729
739 spin_lock_irqsave(&ide_lock, flags); 730 spin_lock_irqsave(&ide_lock, flags);
740 hwif->set_pio_mode(drive, req_pio); 731 port_ops->set_pio_mode(drive, req_pio);
741 spin_unlock_irqrestore(&ide_lock, flags); 732 spin_unlock_irqrestore(&ide_lock, flags);
742 } else 733 } else
743 hwif->set_pio_mode(drive, req_pio); 734 port_ops->set_pio_mode(drive, req_pio);
744 } else { 735 } else {
745 int keep_dma = drive->using_dma; 736 int keep_dma = drive->using_dma;
746 737
@@ -1241,12 +1232,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1241 1232
1242 if (error < 0) { 1233 if (error < 0) {
1243 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1234 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1244 (void)HWIF(drive)->ide_dma_end(drive); 1235 (void)hwif->dma_ops->dma_end(drive);
1245 ret = ide_error(drive, "dma timeout error", 1236 ret = ide_error(drive, "dma timeout error",
1246 ide_read_status(drive)); 1237 ide_read_status(drive));
1247 } else { 1238 } else {
1248 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1239 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1249 hwif->dma_timeout(drive); 1240 hwif->dma_ops->dma_timeout(drive);
1250 } 1241 }
1251 1242
1252 /* 1243 /*
@@ -1358,7 +1349,7 @@ void ide_timer_expiry (unsigned long data)
1358 startstop = handler(drive); 1349 startstop = handler(drive);
1359 } else if (drive_is_ready(drive)) { 1350 } else if (drive_is_ready(drive)) {
1360 if (drive->waiting_for_dma) 1351 if (drive->waiting_for_dma)
1361 hwgroup->hwif->dma_lost_irq(drive); 1352 hwif->dma_ops->dma_lost_irq(drive);
1362 (void)ide_ack_intr(hwif); 1353 (void)ide_ack_intr(hwif);
1363 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1354 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1364 startstop = handler(drive); 1355 startstop = handler(drive);
@@ -1424,7 +1415,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1424 */ 1415 */
1425 do { 1416 do {
1426 if (hwif->irq == irq) { 1417 if (hwif->irq == irq) {
1427 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1418 stat = hwif->INB(hwif->io_ports.status_addr);
1428 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1419 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1429 /* Try to not flood the console with msgs */ 1420 /* Try to not flood the console with msgs */
1430 static unsigned long last_msgtime, count; 1421 static unsigned long last_msgtime, count;
@@ -1514,7 +1505,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1514 * Whack the status register, just in case 1505 * Whack the status register, just in case
1515 * we have a leftover pending IRQ. 1506 * we have a leftover pending IRQ.
1516 */ 1507 */
1517 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1508 (void) hwif->INB(hwif->io_ports.status_addr);
1518#endif /* CONFIG_BLK_DEV_IDEPCI */ 1509#endif /* CONFIG_BLK_DEV_IDEPCI */
1519 } 1510 }
1520 spin_unlock_irqrestore(&ide_lock, flags); 1511 spin_unlock_irqrestore(&ide_lock, flags);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 45944219eea0..5425d3038ec2 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -159,17 +159,20 @@ EXPORT_SYMBOL(default_hwif_mmiops);
159void SELECT_DRIVE (ide_drive_t *drive) 159void SELECT_DRIVE (ide_drive_t *drive)
160{ 160{
161 ide_hwif_t *hwif = drive->hwif; 161 ide_hwif_t *hwif = drive->hwif;
162 const struct ide_port_ops *port_ops = hwif->port_ops;
162 163
163 if (hwif->selectproc) 164 if (port_ops && port_ops->selectproc)
164 hwif->selectproc(drive); 165 port_ops->selectproc(drive);
165 166
166 hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]); 167 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
167} 168}
168 169
169void SELECT_MASK (ide_drive_t *drive, int mask) 170void SELECT_MASK (ide_drive_t *drive, int mask)
170{ 171{
171 if (HWIF(drive)->maskproc) 172 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
172 HWIF(drive)->maskproc(drive, mask); 173
174 if (port_ops && port_ops->maskproc)
175 port_ops->maskproc(drive, mask);
173} 176}
174 177
175/* 178/*
@@ -191,24 +194,22 @@ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
191 */ 194 */
192static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) 195static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
193{ 196{
194 ide_hwif_t *hwif = HWIF(drive); 197 ide_hwif_t *hwif = drive->hwif;
195 u8 io_32bit = drive->io_32bit; 198 struct ide_io_ports *io_ports = &hwif->io_ports;
199 u8 io_32bit = drive->io_32bit;
196 200
197 if (io_32bit) { 201 if (io_32bit) {
198 if (io_32bit & 2) { 202 if (io_32bit & 2) {
199 unsigned long flags; 203 unsigned long flags;
200 204
201 local_irq_save(flags); 205 local_irq_save(flags);
202 ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); 206 ata_vlb_sync(drive, io_ports->nsect_addr);
203 hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 207 hwif->INSL(io_ports->data_addr, buffer, wcount);
204 wcount);
205 local_irq_restore(flags); 208 local_irq_restore(flags);
206 } else 209 } else
207 hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 210 hwif->INSL(io_ports->data_addr, buffer, wcount);
208 wcount);
209 } else 211 } else
210 hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, 212 hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
211 wcount << 1);
212} 213}
213 214
214/* 215/*
@@ -216,24 +217,22 @@ static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
216 */ 217 */
217static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount) 218static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
218{ 219{
219 ide_hwif_t *hwif = HWIF(drive); 220 ide_hwif_t *hwif = drive->hwif;
220 u8 io_32bit = drive->io_32bit; 221 struct ide_io_ports *io_ports = &hwif->io_ports;
222 u8 io_32bit = drive->io_32bit;
221 223
222 if (io_32bit) { 224 if (io_32bit) {
223 if (io_32bit & 2) { 225 if (io_32bit & 2) {
224 unsigned long flags; 226 unsigned long flags;
225 227
226 local_irq_save(flags); 228 local_irq_save(flags);
227 ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); 229 ata_vlb_sync(drive, io_ports->nsect_addr);
228 hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 230 hwif->OUTSL(io_ports->data_addr, buffer, wcount);
229 wcount);
230 local_irq_restore(flags); 231 local_irq_restore(flags);
231 } else 232 } else
232 hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 233 hwif->OUTSL(io_ports->data_addr, buffer, wcount);
233 wcount);
234 } else 234 } else
235 hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, 235 hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
236 wcount << 1);
237} 236}
238 237
239/* 238/*
@@ -252,14 +251,13 @@ static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
252#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 251#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
253 if (MACH_IS_ATARI || MACH_IS_Q40) { 252 if (MACH_IS_ATARI || MACH_IS_Q40) {
254 /* Atari has a byte-swapped IDE interface */ 253 /* Atari has a byte-swapped IDE interface */
255 insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, 254 insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
256 bytecount / 2);
257 return; 255 return;
258 } 256 }
259#endif /* CONFIG_ATARI || CONFIG_Q40 */ 257#endif /* CONFIG_ATARI || CONFIG_Q40 */
260 hwif->ata_input_data(drive, buffer, bytecount / 4); 258 hwif->ata_input_data(drive, buffer, bytecount / 4);
261 if ((bytecount & 0x03) >= 2) 259 if ((bytecount & 0x03) >= 2)
262 hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], 260 hwif->INSW(hwif->io_ports.data_addr,
263 (u8 *)buffer + (bytecount & ~0x03), 1); 261 (u8 *)buffer + (bytecount & ~0x03), 1);
264} 262}
265 263
@@ -271,14 +269,13 @@ static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
271#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 269#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
272 if (MACH_IS_ATARI || MACH_IS_Q40) { 270 if (MACH_IS_ATARI || MACH_IS_Q40) {
273 /* Atari has a byte-swapped IDE interface */ 271 /* Atari has a byte-swapped IDE interface */
274 outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, 272 outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
275 bytecount / 2);
276 return; 273 return;
277 } 274 }
278#endif /* CONFIG_ATARI || CONFIG_Q40 */ 275#endif /* CONFIG_ATARI || CONFIG_Q40 */
279 hwif->ata_output_data(drive, buffer, bytecount / 4); 276 hwif->ata_output_data(drive, buffer, bytecount / 4);
280 if ((bytecount & 0x03) >= 2) 277 if ((bytecount & 0x03) >= 2)
281 hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], 278 hwif->OUTSW(hwif->io_ports.data_addr,
282 (u8 *)buffer + (bytecount & ~0x03), 1); 279 (u8 *)buffer + (bytecount & ~0x03), 1);
283} 280}
284 281
@@ -429,7 +426,7 @@ int drive_is_ready (ide_drive_t *drive)
429 u8 stat = 0; 426 u8 stat = 0;
430 427
431 if (drive->waiting_for_dma) 428 if (drive->waiting_for_dma)
432 return hwif->ide_dma_test_irq(drive); 429 return hwif->dma_ops->dma_test_irq(drive);
433 430
434#if 0 431#if 0
435 /* need to guarantee 400ns since last command was issued */ 432 /* need to guarantee 400ns since last command was issued */
@@ -442,7 +439,7 @@ int drive_is_ready (ide_drive_t *drive)
442 * an interrupt with another pci card/device. We make no assumptions 439 * an interrupt with another pci card/device. We make no assumptions
443 * about possible isa-pnp and pci-pnp issues yet. 440 * about possible isa-pnp and pci-pnp issues yet.
444 */ 441 */
445 if (hwif->io_ports[IDE_CONTROL_OFFSET]) 442 if (hwif->io_ports.ctl_addr)
446 stat = ide_read_altstatus(drive); 443 stat = ide_read_altstatus(drive);
447 else 444 else
448 /* Note: this may clear a pending IRQ!! */ 445 /* Note: this may clear a pending IRQ!! */
@@ -644,7 +641,7 @@ int ide_driveid_update(ide_drive_t *drive)
644 SELECT_MASK(drive, 1); 641 SELECT_MASK(drive, 1);
645 ide_set_irq(drive, 1); 642 ide_set_irq(drive, 1);
646 msleep(50); 643 msleep(50);
647 hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]); 644 hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
648 timeout = jiffies + WAIT_WORSTCASE; 645 timeout = jiffies + WAIT_WORSTCASE;
649 do { 646 do {
650 if (time_after(jiffies, timeout)) { 647 if (time_after(jiffies, timeout)) {
@@ -693,6 +690,7 @@ int ide_driveid_update(ide_drive_t *drive)
693int ide_config_drive_speed(ide_drive_t *drive, u8 speed) 690int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
694{ 691{
695 ide_hwif_t *hwif = drive->hwif; 692 ide_hwif_t *hwif = drive->hwif;
693 struct ide_io_ports *io_ports = &hwif->io_ports;
696 int error = 0; 694 int error = 0;
697 u8 stat; 695 u8 stat;
698 696
@@ -700,8 +698,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
700// msleep(50); 698// msleep(50);
701 699
702#ifdef CONFIG_BLK_DEV_IDEDMA 700#ifdef CONFIG_BLK_DEV_IDEDMA
703 if (hwif->dma_host_set) /* check if host supports DMA */ 701 if (hwif->dma_ops) /* check if host supports DMA */
704 hwif->dma_host_set(drive, 0); 702 hwif->dma_ops->dma_host_set(drive, 0);
705#endif 703#endif
706 704
707 /* Skip setting PIO flow-control modes on pre-EIDE drives */ 705 /* Skip setting PIO flow-control modes on pre-EIDE drives */
@@ -731,10 +729,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
731 SELECT_MASK(drive, 0); 729 SELECT_MASK(drive, 0);
732 udelay(1); 730 udelay(1);
733 ide_set_irq(drive, 0); 731 ide_set_irq(drive, 0);
734 hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]); 732 hwif->OUTB(speed, io_ports->nsect_addr);
735 hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]); 733 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
736 hwif->OUTBSYNC(drive, WIN_SETFEATURES, 734 hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
737 hwif->io_ports[IDE_COMMAND_OFFSET]);
738 if (drive->quirk_list == 2) 735 if (drive->quirk_list == 2)
739 ide_set_irq(drive, 1); 736 ide_set_irq(drive, 1);
740 737
@@ -759,8 +756,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
759#ifdef CONFIG_BLK_DEV_IDEDMA 756#ifdef CONFIG_BLK_DEV_IDEDMA
760 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && 757 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
761 drive->using_dma) 758 drive->using_dma)
762 hwif->dma_host_set(drive, 1); 759 hwif->dma_ops->dma_host_set(drive, 1);
763 else if (hwif->dma_host_set) /* check if host supports DMA */ 760 else if (hwif->dma_ops) /* check if host supports DMA */
764 ide_dma_off_quietly(drive); 761 ide_dma_off_quietly(drive);
765#endif 762#endif
766 763
@@ -842,7 +839,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
842 839
843 spin_lock_irqsave(&ide_lock, flags); 840 spin_lock_irqsave(&ide_lock, flags);
844 __ide_set_handler(drive, handler, timeout, expiry); 841 __ide_set_handler(drive, handler, timeout, expiry);
845 hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); 842 hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
846 /* 843 /*
847 * Drive takes 400nS to respond, we must avoid the IRQ being 844 * Drive takes 400nS to respond, we must avoid the IRQ being
848 * serviced before that. 845 * serviced before that.
@@ -905,10 +902,11 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
905{ 902{
906 ide_hwgroup_t *hwgroup = HWGROUP(drive); 903 ide_hwgroup_t *hwgroup = HWGROUP(drive);
907 ide_hwif_t *hwif = HWIF(drive); 904 ide_hwif_t *hwif = HWIF(drive);
905 const struct ide_port_ops *port_ops = hwif->port_ops;
908 u8 tmp; 906 u8 tmp;
909 907
910 if (hwif->reset_poll != NULL) { 908 if (port_ops && port_ops->reset_poll) {
911 if (hwif->reset_poll(drive)) { 909 if (port_ops->reset_poll(drive)) {
912 printk(KERN_ERR "%s: host reset_poll failure for %s.\n", 910 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
913 hwif->name, drive->name); 911 hwif->name, drive->name);
914 return ide_stopped; 912 return ide_stopped;
@@ -974,6 +972,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
974 972
975static void pre_reset(ide_drive_t *drive) 973static void pre_reset(ide_drive_t *drive)
976{ 974{
975 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
976
977 if (drive->media == ide_disk) 977 if (drive->media == ide_disk)
978 ide_disk_pre_reset(drive); 978 ide_disk_pre_reset(drive);
979 else 979 else
@@ -994,8 +994,8 @@ static void pre_reset(ide_drive_t *drive)
994 return; 994 return;
995 } 995 }
996 996
997 if (HWIF(drive)->pre_reset != NULL) 997 if (port_ops && port_ops->pre_reset)
998 HWIF(drive)->pre_reset(drive); 998 port_ops->pre_reset(drive);
999 999
1000 if (drive->current_speed != 0xff) 1000 if (drive->current_speed != 0xff)
1001 drive->desired_speed = drive->current_speed; 1001 drive->desired_speed = drive->current_speed;
@@ -1023,12 +1023,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1023 unsigned long flags; 1023 unsigned long flags;
1024 ide_hwif_t *hwif; 1024 ide_hwif_t *hwif;
1025 ide_hwgroup_t *hwgroup; 1025 ide_hwgroup_t *hwgroup;
1026 struct ide_io_ports *io_ports;
1027 const struct ide_port_ops *port_ops;
1026 u8 ctl; 1028 u8 ctl;
1027 1029
1028 spin_lock_irqsave(&ide_lock, flags); 1030 spin_lock_irqsave(&ide_lock, flags);
1029 hwif = HWIF(drive); 1031 hwif = HWIF(drive);
1030 hwgroup = HWGROUP(drive); 1032 hwgroup = HWGROUP(drive);
1031 1033
1034 io_ports = &hwif->io_ports;
1035
1032 /* We must not reset with running handlers */ 1036 /* We must not reset with running handlers */
1033 BUG_ON(hwgroup->handler != NULL); 1037 BUG_ON(hwgroup->handler != NULL);
1034 1038
@@ -1038,8 +1042,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1038 pre_reset(drive); 1042 pre_reset(drive);
1039 SELECT_DRIVE(drive); 1043 SELECT_DRIVE(drive);
1040 udelay (20); 1044 udelay (20);
1041 hwif->OUTBSYNC(drive, WIN_SRST, 1045 hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
1042 hwif->io_ports[IDE_COMMAND_OFFSET]);
1043 ndelay(400); 1046 ndelay(400);
1044 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1047 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1045 hwgroup->polling = 1; 1048 hwgroup->polling = 1;
@@ -1055,7 +1058,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1055 for (unit = 0; unit < MAX_DRIVES; ++unit) 1058 for (unit = 0; unit < MAX_DRIVES; ++unit)
1056 pre_reset(&hwif->drives[unit]); 1059 pre_reset(&hwif->drives[unit]);
1057 1060
1058 if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) { 1061 if (io_ports->ctl_addr == 0) {
1059 spin_unlock_irqrestore(&ide_lock, flags); 1062 spin_unlock_irqrestore(&ide_lock, flags);
1060 return ide_stopped; 1063 return ide_stopped;
1061 } 1064 }
@@ -1070,14 +1073,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1070 * recover from reset very quickly, saving us the first 50ms wait time. 1073 * recover from reset very quickly, saving us the first 50ms wait time.
1071 */ 1074 */
1072 /* set SRST and nIEN */ 1075 /* set SRST and nIEN */
1073 hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]); 1076 hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
1074 /* more than enough time */ 1077 /* more than enough time */
1075 udelay(10); 1078 udelay(10);
1076 if (drive->quirk_list == 2) 1079 if (drive->quirk_list == 2)
1077 ctl = drive->ctl; /* clear SRST and nIEN */ 1080 ctl = drive->ctl; /* clear SRST and nIEN */
1078 else 1081 else
1079 ctl = drive->ctl | 2; /* clear SRST, leave nIEN */ 1082 ctl = drive->ctl | 2; /* clear SRST, leave nIEN */
1080 hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]); 1083 hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
1081 /* more than enough time */ 1084 /* more than enough time */
1082 udelay(10); 1085 udelay(10);
1083 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1086 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1089,8 +1092,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1089 * state when the disks are reset this way. At least, the Winbond 1092 * state when the disks are reset this way. At least, the Winbond
1090 * 553 documentation says that 1093 * 553 documentation says that
1091 */ 1094 */
1092 if (hwif->resetproc) 1095 port_ops = hwif->port_ops;
1093 hwif->resetproc(drive); 1096 if (port_ops && port_ops->resetproc)
1097 port_ops->resetproc(drive);
1094 1098
1095 spin_unlock_irqrestore(&ide_lock, flags); 1099 spin_unlock_irqrestore(&ide_lock, flags);
1096 return ide_started; 1100 return ide_started;
@@ -1121,7 +1125,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1121 * about locking issues (2.5 work ?). 1125 * about locking issues (2.5 work ?).
1122 */ 1126 */
1123 mdelay(1); 1127 mdelay(1);
1124 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1128 stat = hwif->INB(hwif->io_ports.status_addr);
1125 if ((stat & BUSY_STAT) == 0) 1129 if ((stat & BUSY_STAT) == 0)
1126 return 0; 1130 return 0;
1127 /* 1131 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 7031a8dcf692..6f04ea3e93a8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -85,7 +85,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
85 mode = XFER_PIO_4; 85 mode = XFER_PIO_4;
86 } 86 }
87 87
88// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed); 88/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
89 89
90 return min(speed, mode); 90 return min(speed, mode);
91} 91}
@@ -274,16 +274,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
274 if (overridden) 274 if (overridden)
275 printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n", 275 printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
276 drive->name); 276 drive->name);
277
278 /*
279 * Conservative "downgrade" for all pre-ATA2 drives
280 */
281 if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_DOWNGRADE) == 0 &&
282 pio_mode && pio_mode < 4) {
283 pio_mode--;
284 printk(KERN_INFO "%s: applying conservative "
285 "PIO \"downgrade\"\n", drive->name);
286 }
287 } 277 }
288 278
289 if (pio_mode > max_mode) 279 if (pio_mode > max_mode)
@@ -298,9 +288,11 @@ EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
298void ide_set_pio(ide_drive_t *drive, u8 req_pio) 288void ide_set_pio(ide_drive_t *drive, u8 req_pio)
299{ 289{
300 ide_hwif_t *hwif = drive->hwif; 290 ide_hwif_t *hwif = drive->hwif;
291 const struct ide_port_ops *port_ops = hwif->port_ops;
301 u8 host_pio, pio; 292 u8 host_pio, pio;
302 293
303 if (hwif->set_pio_mode == NULL) 294 if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
295 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
304 return; 296 return;
305 297
306 BUG_ON(hwif->pio_mask == 0x00); 298 BUG_ON(hwif->pio_mask == 0x00);
@@ -352,26 +344,30 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
352int ide_set_pio_mode(ide_drive_t *drive, const u8 mode) 344int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
353{ 345{
354 ide_hwif_t *hwif = drive->hwif; 346 ide_hwif_t *hwif = drive->hwif;
347 const struct ide_port_ops *port_ops = hwif->port_ops;
348
349 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
350 return 0;
355 351
356 if (hwif->set_pio_mode == NULL) 352 if (port_ops == NULL || port_ops->set_pio_mode == NULL)
357 return -1; 353 return -1;
358 354
359 /* 355 /*
360 * TODO: temporary hack for some legacy host drivers that didn't 356 * TODO: temporary hack for some legacy host drivers that didn't
361 * set transfer mode on the device in ->set_pio_mode method... 357 * set transfer mode on the device in ->set_pio_mode method...
362 */ 358 */
363 if (hwif->set_dma_mode == NULL) { 359 if (port_ops->set_dma_mode == NULL) {
364 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 360 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
365 return 0; 361 return 0;
366 } 362 }
367 363
368 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 364 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
369 if (ide_config_drive_speed(drive, mode)) 365 if (ide_config_drive_speed(drive, mode))
370 return -1; 366 return -1;
371 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 367 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
372 return 0; 368 return 0;
373 } else { 369 } else {
374 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 370 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
375 return ide_config_drive_speed(drive, mode); 371 return ide_config_drive_speed(drive, mode);
376 } 372 }
377} 373}
@@ -379,17 +375,21 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
379int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) 375int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
380{ 376{
381 ide_hwif_t *hwif = drive->hwif; 377 ide_hwif_t *hwif = drive->hwif;
378 const struct ide_port_ops *port_ops = hwif->port_ops;
379
380 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
381 return 0;
382 382
383 if (hwif->set_dma_mode == NULL) 383 if (port_ops == NULL || port_ops->set_dma_mode == NULL)
384 return -1; 384 return -1;
385 385
386 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 386 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
387 if (ide_config_drive_speed(drive, mode)) 387 if (ide_config_drive_speed(drive, mode))
388 return -1; 388 return -1;
389 hwif->set_dma_mode(drive, mode); 389 port_ops->set_dma_mode(drive, mode);
390 return 0; 390 return 0;
391 } else { 391 } else {
392 hwif->set_dma_mode(drive, mode); 392 port_ops->set_dma_mode(drive, mode);
393 return ide_config_drive_speed(drive, mode); 393 return ide_config_drive_speed(drive, mode);
394 } 394 }
395} 395}
@@ -409,8 +409,10 @@ EXPORT_SYMBOL_GPL(ide_set_dma_mode);
409int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) 409int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
410{ 410{
411 ide_hwif_t *hwif = drive->hwif; 411 ide_hwif_t *hwif = drive->hwif;
412 const struct ide_port_ops *port_ops = hwif->port_ops;
412 413
413 if (hwif->set_dma_mode == NULL) 414 if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
415 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
414 return -1; 416 return -1;
415 417
416 rate = ide_rate_filter(drive, rate); 418 rate = ide_rate_filter(drive, rate);
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 34c2ad36ce54..6a8953f68e9f 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -11,34 +11,52 @@
11 * 11 *
12 * You should have received a copy of the GNU General Public License 12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free 13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */ 15 */
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pnp.h> 18#include <linux/pnp.h>
19#include <linux/ide.h> 19#include <linux/ide.h>
20 20
21#define DRV_NAME "ide-pnp"
22
21/* Add your devices here :)) */ 23/* Add your devices here :)) */
22static struct pnp_device_id idepnp_devices[] = { 24static struct pnp_device_id idepnp_devices[] = {
23 /* Generic ESDI/IDE/ATA compatible hard disk controller */ 25 /* Generic ESDI/IDE/ATA compatible hard disk controller */
24 {.id = "PNP0600", .driver_data = 0}, 26 {.id = "PNP0600", .driver_data = 0},
25 {.id = ""} 27 {.id = ""}
26}; 28};
27 29
28static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id) 30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
29{ 31{
30 hw_regs_t hw; 32 hw_regs_t hw;
31 ide_hwif_t *hwif; 33 ide_hwif_t *hwif;
34 unsigned long base, ctl;
32 35
33 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) 36 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
34 return -1; 37 return -1;
35 38
39 base = pnp_port_start(dev, 0);
40 ctl = pnp_port_start(dev, 1);
41
42 if (!request_region(base, 8, DRV_NAME)) {
43 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
44 DRV_NAME, base, base + 7);
45 return -EBUSY;
46 }
47
48 if (!request_region(ctl, 1, DRV_NAME)) {
49 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
50 DRV_NAME, ctl);
51 release_region(base, 8);
52 return -EBUSY;
53 }
54
36 memset(&hw, 0, sizeof(hw)); 55 memset(&hw, 0, sizeof(hw));
37 ide_std_init_ports(&hw, pnp_port_start(dev, 0), 56 ide_std_init_ports(&hw, base, ctl);
38 pnp_port_start(dev, 1));
39 hw.irq = pnp_irq(dev, 0); 57 hw.irq = pnp_irq(dev, 0);
40 58
41 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 59 hwif = ide_find_port();
42 if (hwif) { 60 if (hwif) {
43 u8 index = hwif->index; 61 u8 index = hwif->index;
44 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 62 u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -47,24 +65,27 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
47 ide_init_port_hw(hwif, &hw); 65 ide_init_port_hw(hwif, &hw);
48 66
49 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); 67 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
50 pnp_set_drvdata(dev,hwif); 68 pnp_set_drvdata(dev, hwif);
51 69
52 ide_device_add(idx, NULL); 70 ide_device_add(idx, NULL);
53 71
54 return 0; 72 return 0;
55 } 73 }
56 74
75 release_region(ctl, 1);
76 release_region(base, 8);
77
57 return -1; 78 return -1;
58} 79}
59 80
60static void idepnp_remove(struct pnp_dev * dev) 81static void idepnp_remove(struct pnp_dev *dev)
61{ 82{
62 ide_hwif_t *hwif = pnp_get_drvdata(dev); 83 ide_hwif_t *hwif = pnp_get_drvdata(dev);
63 84
64 if (hwif) 85 ide_unregister(hwif);
65 ide_unregister(hwif->index); 86
66 else 87 release_region(pnp_port_start(dev, 1), 1);
67 printk(KERN_ERR "idepnp: Unable to remove device, please report.\n"); 88 release_region(pnp_port_start(dev, 0), 8);
68} 89}
69 90
70static struct pnp_driver idepnp_driver = { 91static struct pnp_driver idepnp_driver = {
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6a196c27b0aa..862f02603f9b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,6 +264,7 @@ err_misc:
264static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) 264static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
265{ 265{
266 ide_hwif_t *hwif = HWIF(drive); 266 ide_hwif_t *hwif = HWIF(drive);
267 struct ide_io_ports *io_ports = &hwif->io_ports;
267 int use_altstatus = 0, rc; 268 int use_altstatus = 0, rc;
268 unsigned long timeout; 269 unsigned long timeout;
269 u8 s = 0, a = 0; 270 u8 s = 0, a = 0;
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
271 /* take a deep breath */ 272 /* take a deep breath */
272 msleep(50); 273 msleep(50);
273 274
274 if (hwif->io_ports[IDE_CONTROL_OFFSET]) { 275 if (io_ports->ctl_addr) {
275 a = ide_read_altstatus(drive); 276 a = ide_read_altstatus(drive);
276 s = ide_read_status(drive); 277 s = ide_read_status(drive);
277 if ((a ^ s) & ~INDEX_STAT) 278 if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
289 */ 290 */
290 if ((cmd == WIN_PIDENTIFY)) 291 if ((cmd == WIN_PIDENTIFY))
291 /* disable dma & overlap */ 292 /* disable dma & overlap */
292 hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]); 293 hwif->OUTB(0, io_ports->feature_addr);
293 294
294 /* ask drive for ID */ 295 /* ask drive for ID */
295 hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); 296 hwif->OUTB(cmd, io_ports->command_addr);
296 297
297 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 298 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
298 timeout += jiffies; 299 timeout += jiffies;
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
353 * interrupts during the identify-phase that 354 * interrupts during the identify-phase that
354 * the irq handler isn't expecting. 355 * the irq handler isn't expecting.
355 */ 356 */
356 if (hwif->io_ports[IDE_CONTROL_OFFSET]) { 357 if (hwif->io_ports.ctl_addr) {
357 if (!hwif->irq) { 358 if (!hwif->irq) {
358 autoprobe = 1; 359 autoprobe = 1;
359 cookie = probe_irq_on(); 360 cookie = probe_irq_on();
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
393 394
394 do { 395 do {
395 msleep(50); 396 msleep(50);
396 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 397 stat = hwif->INB(hwif->io_ports.status_addr);
397 if ((stat & BUSY_STAT) == 0) 398 if ((stat & BUSY_STAT) == 0)
398 return 0; 399 return 0;
399 } while (time_before(jiffies, timeout)); 400 } while (time_before(jiffies, timeout));
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
425static int do_probe (ide_drive_t *drive, u8 cmd) 426static int do_probe (ide_drive_t *drive, u8 cmd)
426{ 427{
427 ide_hwif_t *hwif = HWIF(drive); 428 ide_hwif_t *hwif = HWIF(drive);
429 struct ide_io_ports *io_ports = &hwif->io_ports;
428 int rc; 430 int rc;
429 u8 stat; 431 u8 stat;
430 432
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
445 msleep(50); 447 msleep(50);
446 SELECT_DRIVE(drive); 448 SELECT_DRIVE(drive);
447 msleep(50); 449 msleep(50);
448 if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all && 450 if (hwif->INB(io_ports->device_addr) != drive->select.all &&
449 !drive->present) { 451 !drive->present) {
450 if (drive->select.b.unit != 0) { 452 if (drive->select.b.unit != 0) {
451 /* exit with drive0 selected */ 453 /* exit with drive0 selected */
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
472 if (stat == (BUSY_STAT | READY_STAT)) 474 if (stat == (BUSY_STAT | READY_STAT))
473 return 4; 475 return 4;
474 476
475 if ((rc == 1 && cmd == WIN_PIDENTIFY) && 477 if (rc == 1 && cmd == WIN_PIDENTIFY) {
476 ((drive->autotune == IDE_TUNE_DEFAULT) ||
477 (drive->autotune == IDE_TUNE_AUTO))) {
478 printk(KERN_ERR "%s: no response (status = 0x%02x), " 478 printk(KERN_ERR "%s: no response (status = 0x%02x), "
479 "resetting drive\n", drive->name, stat); 479 "resetting drive\n", drive->name, stat);
480 msleep(50); 480 msleep(50);
481 hwif->OUTB(drive->select.all, 481 hwif->OUTB(drive->select.all, io_ports->device_addr);
482 hwif->io_ports[IDE_SELECT_OFFSET]);
483 msleep(50); 482 msleep(50);
484 hwif->OUTB(WIN_SRST, 483 hwif->OUTB(WIN_SRST, io_ports->command_addr);
485 hwif->io_ports[IDE_COMMAND_OFFSET]);
486 (void)ide_busy_sleep(hwif); 484 (void)ide_busy_sleep(hwif);
487 rc = try_to_identify(drive, cmd); 485 rc = try_to_identify(drive, cmd);
488 } 486 }
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
518 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 516 printk("%s: enabling %s -- ", hwif->name, drive->id->model);
519 SELECT_DRIVE(drive); 517 SELECT_DRIVE(drive);
520 msleep(50); 518 msleep(50);
521 hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]); 519 hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
522 520
523 if (ide_busy_sleep(hwif)) { 521 if (ide_busy_sleep(hwif)) {
524 printk(KERN_CONT "failed (timeout)\n"); 522 printk(KERN_CONT "failed (timeout)\n");
@@ -644,7 +642,7 @@ static int ide_register_port(ide_hwif_t *hwif)
644 ret = device_register(&hwif->gendev); 642 ret = device_register(&hwif->gendev);
645 if (ret < 0) { 643 if (ret < 0) {
646 printk(KERN_WARNING "IDE: %s: device_register error: %d\n", 644 printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
647 __FUNCTION__, ret); 645 __func__, ret);
648 goto out; 646 goto out;
649 } 647 }
650 648
@@ -773,8 +771,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
773 771
774 BUG_ON(hwif->present); 772 BUG_ON(hwif->present);
775 773
776 if (hwif->noprobe || 774 if (hwif->drives[0].noprobe && hwif->drives[1].noprobe)
777 (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
778 return -EACCES; 775 return -EACCES;
779 776
780 /* 777 /*
@@ -801,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif)
801 if (drive->present) 798 if (drive->present)
802 rc = 0; 799 rc = 0;
803 } 800 }
804 if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { 801
805 printk(KERN_WARNING "%s: reset\n", hwif->name);
806 hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
807 udelay(10);
808 hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
809 (void)ide_busy_sleep(hwif);
810 }
811 local_irq_restore(flags); 802 local_irq_restore(flags);
803
812 /* 804 /*
813 * Use cached IRQ number. It might be (and is...) changed by probe 805 * Use cached IRQ number. It might be (and is...) changed by probe
814 * code above 806 * code above
@@ -821,29 +813,25 @@ static int ide_probe_port(ide_hwif_t *hwif)
821 813
822static void ide_port_tune_devices(ide_hwif_t *hwif) 814static void ide_port_tune_devices(ide_hwif_t *hwif)
823{ 815{
816 const struct ide_port_ops *port_ops = hwif->port_ops;
824 int unit; 817 int unit;
825 818
826 for (unit = 0; unit < MAX_DRIVES; unit++) { 819 for (unit = 0; unit < MAX_DRIVES; unit++) {
827 ide_drive_t *drive = &hwif->drives[unit]; 820 ide_drive_t *drive = &hwif->drives[unit];
828 821
829 if (drive->present && hwif->quirkproc) 822 if (drive->present && port_ops && port_ops->quirkproc)
830 hwif->quirkproc(drive); 823 port_ops->quirkproc(drive);
831 } 824 }
832 825
833 for (unit = 0; unit < MAX_DRIVES; ++unit) { 826 for (unit = 0; unit < MAX_DRIVES; ++unit) {
834 ide_drive_t *drive = &hwif->drives[unit]; 827 ide_drive_t *drive = &hwif->drives[unit];
835 828
836 if (drive->present) { 829 if (drive->present) {
837 if (drive->autotune == IDE_TUNE_AUTO) 830 ide_set_max_pio(drive);
838 ide_set_max_pio(drive);
839
840 if (drive->autotune != IDE_TUNE_DEFAULT &&
841 drive->autotune != IDE_TUNE_AUTO)
842 continue;
843 831
844 drive->nice1 = 1; 832 drive->nice1 = 1;
845 833
846 if (hwif->dma_host_set) 834 if (hwif->dma_ops)
847 ide_set_dma(drive); 835 ide_set_dma(drive);
848 } 836 }
849 } 837 }
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
994 */ 982 */
995static int init_irq (ide_hwif_t *hwif) 983static int init_irq (ide_hwif_t *hwif)
996{ 984{
985 struct ide_io_ports *io_ports = &hwif->io_ports;
997 unsigned int index; 986 unsigned int index;
998 ide_hwgroup_t *hwgroup; 987 ide_hwgroup_t *hwgroup;
999 ide_hwif_t *match = NULL; 988 ide_hwif_t *match = NULL;
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif)
1077 if (IDE_CHIPSET_IS_PCI(hwif->chipset)) 1066 if (IDE_CHIPSET_IS_PCI(hwif->chipset))
1078 sa = IRQF_SHARED; 1067 sa = IRQF_SHARED;
1079 1068
1080 if (hwif->io_ports[IDE_CONTROL_OFFSET]) 1069 if (io_ports->ctl_addr)
1081 /* clear nIEN */ 1070 /* clear nIEN */
1082 hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]); 1071 hwif->OUTB(0x08, io_ports->ctl_addr);
1083 1072
1084 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) 1073 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
1085 goto out_unlink; 1074 goto out_unlink;
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif)
1095 1084
1096#if !defined(__mc68000__) 1085#if !defined(__mc68000__)
1097 printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 1086 printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
1098 hwif->io_ports[IDE_DATA_OFFSET], 1087 io_ports->data_addr, io_ports->status_addr,
1099 hwif->io_ports[IDE_DATA_OFFSET]+7, 1088 io_ports->ctl_addr, hwif->irq);
1100 hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq);
1101#else 1089#else
1102 printk("%s at 0x%08lx on irq %d", hwif->name, 1090 printk("%s at 0x%08lx on irq %d", hwif->name,
1103 hwif->io_ports[IDE_DATA_OFFSET], hwif->irq); 1091 io_ports->data_addr, hwif->irq);
1104#endif /* __mc68000__ */ 1092#endif /* __mc68000__ */
1105 if (match) 1093 if (match)
1106 printk(" (%sed with %s)", 1094 printk(" (%sed with %s)",
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif)
1242 int old_irq; 1230 int old_irq;
1243 1231
1244 if (!hwif->irq) { 1232 if (!hwif->irq) {
1245 if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) 1233 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
1246 { 1234 if (!hwif->irq) {
1247 printk("%s: DISABLED, NO IRQ\n", hwif->name); 1235 printk("%s: DISABLED, NO IRQ\n", hwif->name);
1248 return 0; 1236 return 0;
1249 } 1237 }
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif)
1272 * It failed to initialise. Find the default IRQ for 1260 * It failed to initialise. Find the default IRQ for
1273 * this port and try that. 1261 * this port and try that.
1274 */ 1262 */
1275 if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) { 1263 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
1264 if (!hwif->irq) {
1276 printk("%s: Disabled unable to get IRQ %d.\n", 1265 printk("%s: Disabled unable to get IRQ %d.\n",
1277 hwif->name, old_irq); 1266 hwif->name, old_irq);
1278 goto out; 1267 goto out;
@@ -1324,6 +1313,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
1324 1313
1325static void ide_port_init_devices(ide_hwif_t *hwif) 1314static void ide_port_init_devices(ide_hwif_t *hwif)
1326{ 1315{
1316 const struct ide_port_ops *port_ops = hwif->port_ops;
1327 int i; 1317 int i;
1328 1318
1329 for (i = 0; i < MAX_DRIVES; i++) { 1319 for (i = 0; i < MAX_DRIVES; i++) {
@@ -1335,12 +1325,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1335 drive->unmask = 1; 1325 drive->unmask = 1;
1336 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) 1326 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
1337 drive->no_unmask = 1; 1327 drive->no_unmask = 1;
1338 if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0)
1339 drive->autotune = 1;
1340 } 1328 }
1341 1329
1342 if (hwif->port_init_devs) 1330 if (port_ops && port_ops->port_init_devs)
1343 hwif->port_init_devs(hwif); 1331 port_ops->port_init_devs(hwif);
1344} 1332}
1345 1333
1346static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1334static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1355,9 +1343,6 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1355 if (d->init_iops) 1343 if (d->init_iops)
1356 d->init_iops(hwif); 1344 d->init_iops(hwif);
1357 1345
1358 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0)
1359 ide_hwif_setup_dma(hwif, d);
1360
1361 if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) || 1346 if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
1362 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) 1347 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
1363 hwif->irq = port ? 15 : 14; 1348 hwif->irq = port ? 15 : 14;
@@ -1365,16 +1350,36 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1365 hwif->host_flags = d->host_flags; 1350 hwif->host_flags = d->host_flags;
1366 hwif->pio_mask = d->pio_mask; 1351 hwif->pio_mask = d->pio_mask;
1367 1352
1368 if ((d->host_flags & IDE_HFLAG_SERIALIZE) && hwif->mate) 1353 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1369 hwif->mate->serialized = hwif->serialized = 1; 1354 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1355 hwif->port_ops = d->port_ops;
1356
1357 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1358 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
1359 if (hwif->mate)
1360 hwif->mate->serialized = hwif->serialized = 1;
1361 }
1370 1362
1371 hwif->swdma_mask = d->swdma_mask; 1363 hwif->swdma_mask = d->swdma_mask;
1372 hwif->mwdma_mask = d->mwdma_mask; 1364 hwif->mwdma_mask = d->mwdma_mask;
1373 hwif->ultra_mask = d->udma_mask; 1365 hwif->ultra_mask = d->udma_mask;
1374 1366
1375 /* reset DMA masks only for SFF-style DMA controllers */ 1367 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
1376 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0) 1368 int rc;
1377 hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0; 1369
1370 if (d->init_dma)
1371 rc = d->init_dma(hwif, d);
1372 else
1373 rc = ide_hwif_setup_dma(hwif, d);
1374
1375 if (rc < 0) {
1376 printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1377 hwif->swdma_mask = 0;
1378 hwif->mwdma_mask = 0;
1379 hwif->ultra_mask = 0;
1380 } else if (d->dma_ops)
1381 hwif->dma_ops = d->dma_ops;
1382 }
1378 1383
1379 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1384 if (d->host_flags & IDE_HFLAG_RQSIZE_256)
1380 hwif->rqsize = 256; 1385 hwif->rqsize = 256;
@@ -1386,9 +1391,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1386 1391
1387static void ide_port_cable_detect(ide_hwif_t *hwif) 1392static void ide_port_cable_detect(ide_hwif_t *hwif)
1388{ 1393{
1389 if (hwif->cable_detect && (hwif->ultra_mask & 0x78)) { 1394 const struct ide_port_ops *port_ops = hwif->port_ops;
1395
1396 if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
1390 if (hwif->cbl != ATA_CBL_PATA40_SHORT) 1397 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
1391 hwif->cbl = hwif->cable_detect(hwif); 1398 hwif->cbl = port_ops->cable_detect(hwif);
1392 } 1399 }
1393} 1400}
1394 1401
@@ -1444,19 +1451,74 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
1444 return rc; 1451 return rc;
1445} 1452}
1446 1453
1454/**
1455 * ide_find_port_slot - find free ide_hwifs[] slot
1456 * @d: IDE port info
1457 *
1458 * Return the new hwif. If we are out of free slots return NULL.
1459 */
1460
1461ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1462{
1463 ide_hwif_t *hwif;
1464 int i;
1465 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1466
1467 /*
1468 * Claim an unassigned slot.
1469 *
1470 * Give preference to claiming other slots before claiming ide0/ide1,
1471 * just in case there's another interface yet-to-be-scanned
1472 * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
1473 *
1474 * Unless there is a bootable card that does not use the standard
1475 * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1476 */
1477 if (bootable) {
1478 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
1479
1480 for (; i < MAX_HWIFS; i++) {
1481 hwif = &ide_hwifs[i];
1482 if (hwif->chipset == ide_unknown)
1483 return hwif;
1484 }
1485 } else {
1486 for (i = 2; i < MAX_HWIFS; i++) {
1487 hwif = &ide_hwifs[i];
1488 if (hwif->chipset == ide_unknown)
1489 return hwif;
1490 }
1491 for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
1492 hwif = &ide_hwifs[i];
1493 if (hwif->chipset == ide_unknown)
1494 return hwif;
1495 }
1496 }
1497
1498 return NULL;
1499}
1500EXPORT_SYMBOL_GPL(ide_find_port_slot);
1501
1447int ide_device_add_all(u8 *idx, const struct ide_port_info *d) 1502int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1448{ 1503{
1449 ide_hwif_t *hwif, *mate = NULL; 1504 ide_hwif_t *hwif, *mate = NULL;
1450 int i, rc = 0; 1505 int i, rc = 0;
1451 1506
1452 for (i = 0; i < MAX_HWIFS; i++) { 1507 for (i = 0; i < MAX_HWIFS; i++) {
1453 if (d == NULL || idx[i] == 0xff) { 1508 if (idx[i] == 0xff) {
1454 mate = NULL; 1509 mate = NULL;
1455 continue; 1510 continue;
1456 } 1511 }
1457 1512
1458 hwif = &ide_hwifs[idx[i]]; 1513 hwif = &ide_hwifs[idx[i]];
1459 1514
1515 ide_port_apply_params(hwif);
1516
1517 if (d == NULL) {
1518 mate = NULL;
1519 continue;
1520 }
1521
1460 if (d->chipset != ide_etrax100 && (i & 1) && mate) { 1522 if (d->chipset != ide_etrax100 && (i & 1) && mate) {
1461 hwif->mate = mate; 1523 hwif->mate = mate;
1462 mate->mate = hwif; 1524 mate->mate = hwif;
@@ -1475,25 +1537,15 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1475 1537
1476 hwif = &ide_hwifs[idx[i]]; 1538 hwif = &ide_hwifs[idx[i]];
1477 1539
1478 if ((hwif->chipset != ide_4drives || !hwif->mate || 1540 if (ide_probe_port(hwif) == 0)
1479 !hwif->mate->present) && ide_hwif_request_regions(hwif)) { 1541 hwif->present = 1;
1480 printk(KERN_ERR "%s: ports already in use, "
1481 "skipping probe\n", hwif->name);
1482 continue;
1483 }
1484
1485 if (ide_probe_port(hwif) < 0) {
1486 ide_hwif_release_regions(hwif);
1487 continue;
1488 }
1489
1490 hwif->present = 1;
1491 1542
1492 if (hwif->chipset != ide_4drives || !hwif->mate || 1543 if (hwif->chipset != ide_4drives || !hwif->mate ||
1493 !hwif->mate->present) 1544 !hwif->mate->present)
1494 ide_register_port(hwif); 1545 ide_register_port(hwif);
1495 1546
1496 ide_port_tune_devices(hwif); 1547 if (hwif->present)
1548 ide_port_tune_devices(hwif);
1497 } 1549 }
1498 1550
1499 for (i = 0; i < MAX_HWIFS; i++) { 1551 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1502,9 +1554,6 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1502 1554
1503 hwif = &ide_hwifs[idx[i]]; 1555 hwif = &ide_hwifs[idx[i]];
1504 1556
1505 if (!hwif->present)
1506 continue;
1507
1508 if (hwif_init(hwif) == 0) { 1557 if (hwif_init(hwif) == 0) {
1509 printk(KERN_INFO "%s: failed to initialize IDE " 1558 printk(KERN_INFO "%s: failed to initialize IDE "
1510 "interface\n", hwif->name); 1559 "interface\n", hwif->name);
@@ -1513,10 +1562,13 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1513 continue; 1562 continue;
1514 } 1563 }
1515 1564
1516 ide_port_setup_devices(hwif); 1565 if (hwif->present)
1566 ide_port_setup_devices(hwif);
1517 1567
1518 ide_acpi_init(hwif); 1568 ide_acpi_init(hwif);
1519 ide_acpi_port_init_devices(hwif); 1569
1570 if (hwif->present)
1571 ide_acpi_port_init_devices(hwif);
1520 } 1572 }
1521 1573
1522 for (i = 0; i < MAX_HWIFS; i++) { 1574 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1525,11 +1577,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1525 1577
1526 hwif = &ide_hwifs[idx[i]]; 1578 hwif = &ide_hwifs[idx[i]];
1527 1579
1528 if (hwif->present) { 1580 if (hwif->chipset == ide_unknown)
1529 if (hwif->chipset == ide_unknown) 1581 hwif->chipset = ide_generic;
1530 hwif->chipset = ide_generic; 1582
1583 if (hwif->present)
1531 hwif_register_devices(hwif); 1584 hwif_register_devices(hwif);
1532 }
1533 } 1585 }
1534 1586
1535 for (i = 0; i < MAX_HWIFS; i++) { 1587 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1538,11 +1590,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1538 1590
1539 hwif = &ide_hwifs[idx[i]]; 1591 hwif = &ide_hwifs[idx[i]];
1540 1592
1541 if (hwif->present) { 1593 ide_sysfs_register_port(hwif);
1542 ide_sysfs_register_port(hwif); 1594 ide_proc_register_port(hwif);
1543 ide_proc_register_port(hwif); 1595
1596 if (hwif->present)
1544 ide_proc_port_register_devices(hwif); 1597 ide_proc_port_register_devices(hwif);
1545 }
1546 } 1598 }
1547 1599
1548 return rc; 1600 return rc;
@@ -1563,6 +1615,7 @@ EXPORT_SYMBOL_GPL(ide_device_add);
1563 1615
1564void ide_port_scan(ide_hwif_t *hwif) 1616void ide_port_scan(ide_hwif_t *hwif)
1565{ 1617{
1618 ide_port_apply_params(hwif);
1566 ide_port_cable_detect(hwif); 1619 ide_port_cable_detect(hwif);
1567 ide_port_init_devices(hwif); 1620 ide_port_init_devices(hwif);
1568 1621
@@ -1578,3 +1631,67 @@ void ide_port_scan(ide_hwif_t *hwif)
1578 ide_proc_port_register_devices(hwif); 1631 ide_proc_port_register_devices(hwif);
1579} 1632}
1580EXPORT_SYMBOL_GPL(ide_port_scan); 1633EXPORT_SYMBOL_GPL(ide_port_scan);
1634
1635static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
1636 const struct ide_port_info *d,
1637 unsigned long config)
1638{
1639 ide_hwif_t *hwif;
1640 unsigned long base, ctl;
1641 int irq;
1642
1643 if (port_no == 0) {
1644 base = 0x1f0;
1645 ctl = 0x3f6;
1646 irq = 14;
1647 } else {
1648 base = 0x170;
1649 ctl = 0x376;
1650 irq = 15;
1651 }
1652
1653 if (!request_region(base, 8, d->name)) {
1654 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
1655 d->name, base, base + 7);
1656 return;
1657 }
1658
1659 if (!request_region(ctl, 1, d->name)) {
1660 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
1661 d->name, ctl);
1662 release_region(base, 8);
1663 return;
1664 }
1665
1666 ide_std_init_ports(hw, base, ctl);
1667 hw->irq = irq;
1668
1669 hwif = ide_find_port_slot(d);
1670 if (hwif) {
1671 ide_init_port_hw(hwif, hw);
1672 if (config)
1673 hwif->config_data = config;
1674 idx[port_no] = hwif->index;
1675 }
1676}
1677
1678int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1679{
1680 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
1681 hw_regs_t hw[2];
1682
1683 memset(&hw, 0, sizeof(hw));
1684
1685 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1686 ide_legacy_init_one(idx, &hw[0], 0, d, config);
1687 ide_legacy_init_one(idx, &hw[1], 1, d, config);
1688
1689 if (idx[0] == 0xff && idx[1] == 0xff &&
1690 (d->host_flags & IDE_HFLAG_SINGLE))
1691 return -ENOENT;
1692
1693 ide_device_add(idx, d);
1694
1695 return 0;
1696}
1697EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index edd7f186dc4d..7b2f3815a838 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -47,28 +47,28 @@ static int proc_ide_read_imodel
47 const char *name; 47 const char *name;
48 48
49 switch (hwif->chipset) { 49 switch (hwif->chipset) {
50 case ide_generic: name = "generic"; break; 50 case ide_generic: name = "generic"; break;
51 case ide_pci: name = "pci"; break; 51 case ide_pci: name = "pci"; break;
52 case ide_cmd640: name = "cmd640"; break; 52 case ide_cmd640: name = "cmd640"; break;
53 case ide_dtc2278: name = "dtc2278"; break; 53 case ide_dtc2278: name = "dtc2278"; break;
54 case ide_ali14xx: name = "ali14xx"; break; 54 case ide_ali14xx: name = "ali14xx"; break;
55 case ide_qd65xx: name = "qd65xx"; break; 55 case ide_qd65xx: name = "qd65xx"; break;
56 case ide_umc8672: name = "umc8672"; break; 56 case ide_umc8672: name = "umc8672"; break;
57 case ide_ht6560b: name = "ht6560b"; break; 57 case ide_ht6560b: name = "ht6560b"; break;
58 case ide_rz1000: name = "rz1000"; break; 58 case ide_rz1000: name = "rz1000"; break;
59 case ide_trm290: name = "trm290"; break; 59 case ide_trm290: name = "trm290"; break;
60 case ide_cmd646: name = "cmd646"; break; 60 case ide_cmd646: name = "cmd646"; break;
61 case ide_cy82c693: name = "cy82c693"; break; 61 case ide_cy82c693: name = "cy82c693"; break;
62 case ide_4drives: name = "4drives"; break; 62 case ide_4drives: name = "4drives"; break;
63 case ide_pmac: name = "mac-io"; break; 63 case ide_pmac: name = "mac-io"; break;
64 case ide_au1xxx: name = "au1xxx"; break; 64 case ide_au1xxx: name = "au1xxx"; break;
65 case ide_palm3710: name = "palm3710"; break; 65 case ide_palm3710: name = "palm3710"; break;
66 case ide_etrax100: name = "etrax100"; break; 66 case ide_etrax100: name = "etrax100"; break;
67 case ide_acorn: name = "acorn"; break; 67 case ide_acorn: name = "acorn"; break;
68 default: name = "(unknown)"; break; 68 default: name = "(unknown)"; break;
69 } 69 }
70 len = sprintf(page, "%s\n", name); 70 len = sprintf(page, "%s\n", name);
71 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 71 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
72} 72}
73 73
74static int proc_ide_read_mate 74static int proc_ide_read_mate
@@ -81,7 +81,7 @@ static int proc_ide_read_mate
81 len = sprintf(page, "%s\n", hwif->mate->name); 81 len = sprintf(page, "%s\n", hwif->mate->name);
82 else 82 else
83 len = sprintf(page, "(none)\n"); 83 len = sprintf(page, "(none)\n");
84 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 84 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
85} 85}
86 86
87static int proc_ide_read_channel 87static int proc_ide_read_channel
@@ -93,7 +93,7 @@ static int proc_ide_read_channel
93 page[0] = hwif->channel ? '1' : '0'; 93 page[0] = hwif->channel ? '1' : '0';
94 page[1] = '\n'; 94 page[1] = '\n';
95 len = 2; 95 len = 2;
96 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 96 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
97} 97}
98 98
99static int proc_ide_read_identify 99static int proc_ide_read_identify
@@ -120,7 +120,7 @@ static int proc_ide_read_identify
120 len = out - page; 120 len = out - page;
121 } 121 }
122 } 122 }
123 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 123 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
124} 124}
125 125
126/** 126/**
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(ide_add_setting);
197 * The caller must hold the setting semaphore. 197 * The caller must hold the setting semaphore.
198 */ 198 */
199 199
200static void __ide_remove_setting (ide_drive_t *drive, char *name) 200static void __ide_remove_setting(ide_drive_t *drive, char *name)
201{ 201{
202 ide_settings_t **p, *setting; 202 ide_settings_t **p, *setting;
203 203
@@ -205,7 +205,8 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
205 205
206 while ((*p) && strcmp((*p)->name, name)) 206 while ((*p) && strcmp((*p)->name, name))
207 p = &((*p)->next); 207 p = &((*p)->next);
208 if ((setting = (*p)) == NULL) 208 setting = (*p);
209 if (setting == NULL)
209 return; 210 return;
210 211
211 (*p) = setting->next; 212 (*p) = setting->next;
@@ -223,7 +224,7 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
223 * caller must hold ide_setting_mtx. 224 * caller must hold ide_setting_mtx.
224 */ 225 */
225 226
226static void auto_remove_settings (ide_drive_t *drive) 227static void auto_remove_settings(ide_drive_t *drive)
227{ 228{
228 ide_settings_t *setting; 229 ide_settings_t *setting;
229repeat: 230repeat:
@@ -279,16 +280,16 @@ static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
279 280
280 if ((setting->rw & SETTING_READ)) { 281 if ((setting->rw & SETTING_READ)) {
281 spin_lock_irqsave(&ide_lock, flags); 282 spin_lock_irqsave(&ide_lock, flags);
282 switch(setting->data_type) { 283 switch (setting->data_type) {
283 case TYPE_BYTE: 284 case TYPE_BYTE:
284 val = *((u8 *) setting->data); 285 val = *((u8 *) setting->data);
285 break; 286 break;
286 case TYPE_SHORT: 287 case TYPE_SHORT:
287 val = *((u16 *) setting->data); 288 val = *((u16 *) setting->data);
288 break; 289 break;
289 case TYPE_INT: 290 case TYPE_INT:
290 val = *((u32 *) setting->data); 291 val = *((u32 *) setting->data);
291 break; 292 break;
292 } 293 }
293 spin_unlock_irqrestore(&ide_lock, flags); 294 spin_unlock_irqrestore(&ide_lock, flags);
294 } 295 }
@@ -326,15 +327,15 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
326 if (ide_spin_wait_hwgroup(drive)) 327 if (ide_spin_wait_hwgroup(drive))
327 return -EBUSY; 328 return -EBUSY;
328 switch (setting->data_type) { 329 switch (setting->data_type) {
329 case TYPE_BYTE: 330 case TYPE_BYTE:
330 *((u8 *) setting->data) = val; 331 *((u8 *) setting->data) = val;
331 break; 332 break;
332 case TYPE_SHORT: 333 case TYPE_SHORT:
333 *((u16 *) setting->data) = val; 334 *((u16 *) setting->data) = val;
334 break; 335 break;
335 case TYPE_INT: 336 case TYPE_INT:
336 *((u32 *) setting->data) = val; 337 *((u32 *) setting->data) = val;
337 break; 338 break;
338 } 339 }
339 spin_unlock_irq(&ide_lock); 340 spin_unlock_irq(&ide_lock);
340 return 0; 341 return 0;
@@ -390,7 +391,7 @@ void ide_add_generic_settings (ide_drive_t *drive)
390 391
391static void proc_ide_settings_warn(void) 392static void proc_ide_settings_warn(void)
392{ 393{
393 static int warned = 0; 394 static int warned;
394 395
395 if (warned) 396 if (warned)
396 return; 397 return;
@@ -413,11 +414,12 @@ static int proc_ide_read_settings
413 mutex_lock(&ide_setting_mtx); 414 mutex_lock(&ide_setting_mtx);
414 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n"); 415 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
415 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n"); 416 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
416 while(setting) { 417 while (setting) {
417 mul_factor = setting->mul_factor; 418 mul_factor = setting->mul_factor;
418 div_factor = setting->div_factor; 419 div_factor = setting->div_factor;
419 out += sprintf(out, "%-24s", setting->name); 420 out += sprintf(out, "%-24s", setting->name);
420 if ((rc = ide_read_setting(drive, setting)) >= 0) 421 rc = ide_read_setting(drive, setting);
422 if (rc >= 0)
421 out += sprintf(out, "%-16d", rc * mul_factor / div_factor); 423 out += sprintf(out, "%-16d", rc * mul_factor / div_factor);
422 else 424 else
423 out += sprintf(out, "%-16s", "write-only"); 425 out += sprintf(out, "%-16s", "write-only");
@@ -431,7 +433,7 @@ static int proc_ide_read_settings
431 } 433 }
432 len = out - page; 434 len = out - page;
433 mutex_unlock(&ide_setting_mtx); 435 mutex_unlock(&ide_setting_mtx);
434 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 436 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
435} 437}
436 438
437#define MAX_LEN 30 439#define MAX_LEN 30
@@ -512,8 +514,7 @@ static int proc_ide_write_settings(struct file *file, const char __user *buffer,
512 514
513 mutex_lock(&ide_setting_mtx); 515 mutex_lock(&ide_setting_mtx);
514 setting = ide_find_setting_by_name(drive, name); 516 setting = ide_find_setting_by_name(drive, name);
515 if (!setting) 517 if (!setting) {
516 {
517 mutex_unlock(&ide_setting_mtx); 518 mutex_unlock(&ide_setting_mtx);
518 goto parse_error; 519 goto parse_error;
519 } 520 }
@@ -533,8 +534,8 @@ parse_error:
533int proc_ide_read_capacity 534int proc_ide_read_capacity
534 (char *page, char **start, off_t off, int count, int *eof, void *data) 535 (char *page, char **start, off_t off, int count, int *eof, void *data)
535{ 536{
536 int len = sprintf(page,"%llu\n", (long long)0x7fffffff); 537 int len = sprintf(page, "%llu\n", (long long)0x7fffffff);
537 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 538 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
538} 539}
539 540
540EXPORT_SYMBOL_GPL(proc_ide_read_capacity); 541EXPORT_SYMBOL_GPL(proc_ide_read_capacity);
@@ -546,13 +547,13 @@ int proc_ide_read_geometry
546 char *out = page; 547 char *out = page;
547 int len; 548 int len;
548 549
549 out += sprintf(out,"physical %d/%d/%d\n", 550 out += sprintf(out, "physical %d/%d/%d\n",
550 drive->cyl, drive->head, drive->sect); 551 drive->cyl, drive->head, drive->sect);
551 out += sprintf(out,"logical %d/%d/%d\n", 552 out += sprintf(out, "logical %d/%d/%d\n",
552 drive->bios_cyl, drive->bios_head, drive->bios_sect); 553 drive->bios_cyl, drive->bios_head, drive->bios_sect);
553 554
554 len = out - page; 555 len = out - page;
555 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 556 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
556} 557}
557 558
558EXPORT_SYMBOL(proc_ide_read_geometry); 559EXPORT_SYMBOL(proc_ide_read_geometry);
@@ -566,7 +567,7 @@ static int proc_ide_read_dmodel
566 567
567 len = sprintf(page, "%.40s\n", 568 len = sprintf(page, "%.40s\n",
568 (id && id->model[0]) ? (char *)id->model : "(none)"); 569 (id && id->model[0]) ? (char *)id->model : "(none)");
569 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 570 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
570} 571}
571 572
572static int proc_ide_read_driver 573static int proc_ide_read_driver
@@ -583,7 +584,7 @@ static int proc_ide_read_driver
583 dev->driver->name, ide_drv->version); 584 dev->driver->name, ide_drv->version);
584 } else 585 } else
585 len = sprintf(page, "ide-default version 0.9.newide\n"); 586 len = sprintf(page, "ide-default version 0.9.newide\n");
586 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 587 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
587} 588}
588 589
589static int ide_replace_subdriver(ide_drive_t *drive, const char *driver) 590static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
@@ -598,14 +599,14 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
598 err = device_attach(dev); 599 err = device_attach(dev);
599 if (err < 0) 600 if (err < 0)
600 printk(KERN_WARNING "IDE: %s: device_attach error: %d\n", 601 printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
601 __FUNCTION__, err); 602 __func__, err);
602 drive->driver_req[0] = 0; 603 drive->driver_req[0] = 0;
603 if (dev->driver == NULL) { 604 if (dev->driver == NULL) {
604 err = device_attach(dev); 605 err = device_attach(dev);
605 if (err < 0) 606 if (err < 0)
606 printk(KERN_WARNING 607 printk(KERN_WARNING
607 "IDE: %s: device_attach(2) error: %d\n", 608 "IDE: %s: device_attach(2) error: %d\n",
608 __FUNCTION__, err); 609 __func__, err);
609 } 610 }
610 if (dev->driver && !strcmp(dev->driver->name, driver)) 611 if (dev->driver && !strcmp(dev->driver->name, driver))
611 ret = 0; 612 ret = 0;
@@ -639,30 +640,26 @@ static int proc_ide_read_media
639 int len; 640 int len;
640 641
641 switch (drive->media) { 642 switch (drive->media) {
642 case ide_disk: media = "disk\n"; 643 case ide_disk: media = "disk\n"; break;
643 break; 644 case ide_cdrom: media = "cdrom\n"; break;
644 case ide_cdrom: media = "cdrom\n"; 645 case ide_tape: media = "tape\n"; break;
645 break; 646 case ide_floppy: media = "floppy\n"; break;
646 case ide_tape: media = "tape\n"; 647 case ide_optical: media = "optical\n"; break;
647 break; 648 default: media = "UNKNOWN\n"; break;
648 case ide_floppy:media = "floppy\n";
649 break;
650 case ide_optical:media = "optical\n";
651 break;
652 default: media = "UNKNOWN\n";
653 break;
654 } 649 }
655 strcpy(page,media); 650 strcpy(page, media);
656 len = strlen(media); 651 len = strlen(media);
657 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 652 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
658} 653}
659 654
660static ide_proc_entry_t generic_drive_entries[] = { 655static ide_proc_entry_t generic_drive_entries[] = {
661 { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver, proc_ide_write_driver }, 656 { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver,
662 { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL }, 657 proc_ide_write_driver },
663 { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL }, 658 { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
664 { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL }, 659 { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
665 { "settings", S_IFREG|S_IRUSR|S_IWUSR,proc_ide_read_settings, proc_ide_write_settings }, 660 { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
661 { "settings", S_IFREG|S_IRUSR|S_IWUSR, proc_ide_read_settings,
662 proc_ide_write_settings },
666 { NULL, 0, NULL, NULL } 663 { NULL, 0, NULL, NULL }
667}; 664};
668 665
@@ -734,7 +731,6 @@ void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
734 spin_unlock_irqrestore(&ide_lock, flags); 731 spin_unlock_irqrestore(&ide_lock, flags);
735 mutex_unlock(&ide_setting_mtx); 732 mutex_unlock(&ide_setting_mtx);
736} 733}
737
738EXPORT_SYMBOL(ide_proc_unregister_driver); 734EXPORT_SYMBOL(ide_proc_unregister_driver);
739 735
740void ide_proc_port_register_devices(ide_hwif_t *hwif) 736void ide_proc_port_register_devices(ide_hwif_t *hwif)
@@ -755,7 +751,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
755 drive->proc = proc_mkdir(drive->name, parent); 751 drive->proc = proc_mkdir(drive->name, parent);
756 if (drive->proc) 752 if (drive->proc)
757 ide_add_proc_entries(drive->proc, generic_drive_entries, drive); 753 ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
758 sprintf(name,"ide%d/%s", (drive->name[2]-'a')/2, drive->name); 754 sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
759 ent = proc_symlink(drive->name, proc_ide_root, name); 755 ent = proc_symlink(drive->name, proc_ide_root, name);
760 if (!ent) return; 756 if (!ent) return;
761 } 757 }
@@ -790,15 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif)
790 } 786 }
791} 787}
792 788
793#ifdef CONFIG_BLK_DEV_IDEPCI
794void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
795{
796 create_proc_info_entry(name, 0, proc_ide_root, get_info);
797}
798
799EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
800#endif
801
802void ide_proc_unregister_port(ide_hwif_t *hwif) 789void ide_proc_unregister_port(ide_hwif_t *hwif)
803{ 790{
804 if (hwif->proc) { 791 if (hwif->proc) {
@@ -825,7 +812,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
825 err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver); 812 err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
826 if (err < 0) 813 if (err < 0)
827 printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n", 814 printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
828 __FUNCTION__, err); 815 __func__, err);
829 return 0; 816 return 0;
830} 817}
831 818
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 98888da1b600..0e79efff1deb 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -102,7 +102,7 @@ static int __init ide_scan_pcibus(void)
102 if (__pci_register_driver(d, d->driver.owner, 102 if (__pci_register_driver(d, d->driver.owner,
103 d->driver.mod_name)) 103 d->driver.mod_name))
104 printk(KERN_ERR "%s: failed to register %s driver\n", 104 printk(KERN_ERR "%s: failed to register %s driver\n",
105 __FUNCTION__, d->driver.mod_name); 105 __func__, d->driver.mod_name);
106 } 106 }
107 107
108 return 0; 108 return 0;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f43fd070f1b6..29870c415110 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -72,26 +72,6 @@ enum {
72#endif 72#endif
73 73
74/**************************** Tunable parameters *****************************/ 74/**************************** Tunable parameters *****************************/
75
76
77/*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 *
88 * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
89 * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
90 */
91#define IDETAPE_MIN_PIPELINE_STAGES 1
92#define IDETAPE_MAX_PIPELINE_STAGES 400
93#define IDETAPE_INCREASE_STAGES_RATE 20
94
95/* 75/*
96 * After each failed packet command we issue a request sense command and retry 76 * After each failed packet command we issue a request sense command and retry
97 * the packet command IDETAPE_MAX_PC_RETRIES times. 77 * the packet command IDETAPE_MAX_PC_RETRIES times.
@@ -224,28 +204,17 @@ enum {
224 /* 0 When the tape position is unknown */ 204 /* 0 When the tape position is unknown */
225 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1), 205 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
226 /* Device already opened */ 206 /* Device already opened */
227 IDETAPE_FLAG_BUSY = (1 << 2), 207 IDETAPE_FLAG_BUSY = (1 << 2),
228 /* Error detected in a pipeline stage */
229 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
230 /* Attempt to auto-detect the current user block size */ 208 /* Attempt to auto-detect the current user block size */
231 IDETAPE_FLAG_DETECT_BS = (1 << 4), 209 IDETAPE_FLAG_DETECT_BS = (1 << 3),
232 /* Currently on a filemark */ 210 /* Currently on a filemark */
233 IDETAPE_FLAG_FILEMARK = (1 << 5), 211 IDETAPE_FLAG_FILEMARK = (1 << 4),
234 /* DRQ interrupt device */ 212 /* DRQ interrupt device */
235 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6), 213 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
236 /* pipeline active */
237 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
238 /* 0 = no tape is loaded, so we don't rewind after ejecting */ 214 /* 0 = no tape is loaded, so we don't rewind after ejecting */
239 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8), 215 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
240}; 216};
241 217
242/* A pipeline stage. */
243typedef struct idetape_stage_s {
244 struct request rq; /* The corresponding request */
245 struct idetape_bh *bh; /* The data buffers */
246 struct idetape_stage_s *next; /* Pointer to the next stage */
247} idetape_stage_t;
248
249/* 218/*
250 * Most of our global data which we need to save even as we leave the driver due 219 * Most of our global data which we need to save even as we leave the driver due
251 * to an interrupt or a timer event is stored in the struct defined below. 220 * to an interrupt or a timer event is stored in the struct defined below.
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj {
289 * While polling for DSC we use postponed_rq to postpone the current 258 * While polling for DSC we use postponed_rq to postpone the current
290 * request so that ide.c will be able to service pending requests on the 259 * request so that ide.c will be able to service pending requests on the
291 * other device. Note that at most we will have only one DSC (usually 260 * other device. Note that at most we will have only one DSC (usually
292 * data transfer) request in the device request queue. Additional 261 * data transfer) request in the device request queue.
293 * requests can be queued in our internal pipeline, but they will be
294 * visible to ide.c only one at a time.
295 */ 262 */
296 struct request *postponed_rq; 263 struct request *postponed_rq;
297 /* The time in which we started polling for DSC */ 264 /* The time in which we started polling for DSC */
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj {
331 * At most, there is only one ide-tape originated data transfer request 298 * At most, there is only one ide-tape originated data transfer request
332 * in the device request queue. This allows ide.c to easily service 299 * in the device request queue. This allows ide.c to easily service
333 * requests from the other device when we postpone our active request. 300 * requests from the other device when we postpone our active request.
334 * In the pipelined operation mode, we use our internal pipeline
335 * structure to hold more data requests. The data buffer size is chosen
336 * based on the tape's recommendation.
337 */ 301 */
338 /* ptr to the request which is waiting in the device request queue */ 302
339 struct request *active_data_rq;
340 /* Data buffer size chosen based on the tape's recommendation */ 303 /* Data buffer size chosen based on the tape's recommendation */
341 int stage_size; 304 int buffer_size;
342 idetape_stage_t *merge_stage; 305 /* merge buffer */
343 int merge_stage_size; 306 struct idetape_bh *merge_bh;
307 /* size of the merge buffer */
308 int merge_bh_size;
309 /* pointer to current buffer head within the merge buffer */
344 struct idetape_bh *bh; 310 struct idetape_bh *bh;
345 char *b_data; 311 char *b_data;
346 int b_count; 312 int b_count;
347 313
348 /* 314 int pages_per_buffer;
349 * Pipeline parameters.
350 *
351 * To accomplish non-pipelined mode, we simply set the following
352 * variables to zero (or NULL, where appropriate).
353 */
354 /* Number of currently used stages */
355 int nr_stages;
356 /* Number of pending stages */
357 int nr_pending_stages;
358 /* We will not allocate more than this number of stages */
359 int max_stages, min_pipeline, max_pipeline;
360 /* The first stage which will be removed from the pipeline */
361 idetape_stage_t *first_stage;
362 /* The currently active stage */
363 idetape_stage_t *active_stage;
364 /* Will be serviced after the currently active request */
365 idetape_stage_t *next_stage;
366 /* New requests will be added to the pipeline here */
367 idetape_stage_t *last_stage;
368 /* Optional free stage which we can use */
369 idetape_stage_t *cache_stage;
370 int pages_per_stage;
371 /* Wasted space in each stage */ 315 /* Wasted space in each stage */
372 int excess_bh_size; 316 int excess_bh_size;
373 317
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj {
388 /* the tape is write protected (hardware or opened as read-only) */ 332 /* the tape is write protected (hardware or opened as read-only) */
389 char write_prot; 333 char write_prot;
390 334
391 /*
392 * Limit the number of times a request can be postponed, to avoid an
393 * infinite postpone deadlock.
394 */
395 int postpone_cnt;
396
397 /*
398 * Measures number of frames:
399 *
400 * 1. written/read to/from the driver pipeline (pipeline_head).
401 * 2. written/read to/from the tape buffers (idetape_bh).
402 * 3. written/read by the tape to/from the media (tape_head).
403 */
404 int pipeline_head;
405 int buffer_head;
406 int tape_head;
407 int last_tape_head;
408
409 /* Speed control at the tape buffers input/output */
410 unsigned long insert_time;
411 int insert_size;
412 int insert_speed;
413 int max_insert_speed;
414 int measure_insert_time;
415
416 /* Speed regulation negative feedback loop */
417 int speed_control;
418 int pipeline_head_speed;
419 int controlled_pipeline_head_speed;
420 int uncontrolled_pipeline_head_speed;
421 int controlled_last_pipeline_head;
422 unsigned long uncontrolled_pipeline_head_time;
423 unsigned long controlled_pipeline_head_time;
424 int controlled_previous_pipeline_head;
425 int uncontrolled_previous_pipeline_head;
426 unsigned long controlled_previous_head_time;
427 unsigned long uncontrolled_previous_head_time;
428 int restart_speed_control_req;
429
430 u32 debug_mask; 335 u32 debug_mask;
431} idetape_tape_t; 336} idetape_tape_t;
432 337
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
674 } 579 }
675} 580}
676 581
677static void idetape_activate_next_stage(ide_drive_t *drive) 582/* Free data buffers completely. */
583static void ide_tape_kfree_buffer(idetape_tape_t *tape)
678{ 584{
679 idetape_tape_t *tape = drive->driver_data; 585 struct idetape_bh *prev_bh, *bh = tape->merge_bh;
680 idetape_stage_t *stage = tape->next_stage;
681 struct request *rq = &stage->rq;
682 586
683 debug_log(DBG_PROCS, "Enter %s\n", __func__); 587 while (bh) {
588 u32 size = bh->b_size;
684 589
685 if (stage == NULL) { 590 while (size) {
686 printk(KERN_ERR "ide-tape: bug: Trying to activate a non" 591 unsigned int order = fls(size >> PAGE_SHIFT)-1;
687 " existing stage\n");
688 return;
689 }
690 592
691 rq->rq_disk = tape->disk; 593 if (bh->b_data)
692 rq->buffer = NULL; 594 free_pages((unsigned long)bh->b_data, order);
693 rq->special = (void *)stage->bh; 595
694 tape->active_data_rq = rq; 596 size &= (order-1);
695 tape->active_stage = stage; 597 bh->b_data += (1 << order) * PAGE_SIZE;
696 tape->next_stage = stage->next;
697}
698
699/* Free a stage along with its related buffers completely. */
700static void __idetape_kfree_stage(idetape_stage_t *stage)
701{
702 struct idetape_bh *prev_bh, *bh = stage->bh;
703 int size;
704
705 while (bh != NULL) {
706 if (bh->b_data != NULL) {
707 size = (int) bh->b_size;
708 while (size > 0) {
709 free_page((unsigned long) bh->b_data);
710 size -= PAGE_SIZE;
711 bh->b_data += PAGE_SIZE;
712 }
713 } 598 }
714 prev_bh = bh; 599 prev_bh = bh;
715 bh = bh->b_reqnext; 600 bh = bh->b_reqnext;
716 kfree(prev_bh); 601 kfree(prev_bh);
717 } 602 }
718 kfree(stage); 603 kfree(tape->merge_bh);
719}
720
721static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
722{
723 __idetape_kfree_stage(stage);
724} 604}
725 605
726/*
727 * Remove tape->first_stage from the pipeline. The caller should avoid race
728 * conditions.
729 */
730static void idetape_remove_stage_head(ide_drive_t *drive)
731{
732 idetape_tape_t *tape = drive->driver_data;
733 idetape_stage_t *stage;
734
735 debug_log(DBG_PROCS, "Enter %s\n", __func__);
736
737 if (tape->first_stage == NULL) {
738 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
739 return;
740 }
741 if (tape->active_stage == tape->first_stage) {
742 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
743 "pipeline stage\n");
744 return;
745 }
746 stage = tape->first_stage;
747 tape->first_stage = stage->next;
748 idetape_kfree_stage(tape, stage);
749 tape->nr_stages--;
750 if (tape->first_stage == NULL) {
751 tape->last_stage = NULL;
752 if (tape->next_stage != NULL)
753 printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
754 " NULL\n");
755 if (tape->nr_stages)
756 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
757 "now\n");
758 }
759}
760
761/*
762 * This will free all the pipeline stages starting from new_last_stage->next
763 * to the end of the list, and point tape->last_stage to new_last_stage.
764 */
765static void idetape_abort_pipeline(ide_drive_t *drive,
766 idetape_stage_t *new_last_stage)
767{
768 idetape_tape_t *tape = drive->driver_data;
769 idetape_stage_t *stage = new_last_stage->next;
770 idetape_stage_t *nstage;
771
772 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
773
774 while (stage) {
775 nstage = stage->next;
776 idetape_kfree_stage(tape, stage);
777 --tape->nr_stages;
778 --tape->nr_pending_stages;
779 stage = nstage;
780 }
781 if (new_last_stage)
782 new_last_stage->next = NULL;
783 tape->last_stage = new_last_stage;
784 tape->next_stage = NULL;
785}
786
787/*
788 * Finish servicing a request and insert a pending pipeline request into the
789 * main device queue.
790 */
791static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) 606static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
792{ 607{
793 struct request *rq = HWGROUP(drive)->rq; 608 struct request *rq = HWGROUP(drive)->rq;
794 idetape_tape_t *tape = drive->driver_data; 609 idetape_tape_t *tape = drive->driver_data;
795 unsigned long flags; 610 unsigned long flags;
796 int error; 611 int error;
797 int remove_stage = 0;
798 idetape_stage_t *active_stage;
799 612
800 debug_log(DBG_PROCS, "Enter %s\n", __func__); 613 debug_log(DBG_PROCS, "Enter %s\n", __func__);
801 614
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
815 628
816 spin_lock_irqsave(&tape->lock, flags); 629 spin_lock_irqsave(&tape->lock, flags);
817 630
818 /* The request was a pipelined data transfer request */
819 if (tape->active_data_rq == rq) {
820 active_stage = tape->active_stage;
821 tape->active_stage = NULL;
822 tape->active_data_rq = NULL;
823 tape->nr_pending_stages--;
824 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
825 remove_stage = 1;
826 if (error) {
827 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
828 &tape->flags);
829 if (error == IDETAPE_ERROR_EOD)
830 idetape_abort_pipeline(drive,
831 active_stage);
832 }
833 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
834 if (error == IDETAPE_ERROR_EOD) {
835 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
836 &tape->flags);
837 idetape_abort_pipeline(drive, active_stage);
838 }
839 }
840 if (tape->next_stage != NULL) {
841 idetape_activate_next_stage(drive);
842
843 /* Insert the next request into the request queue. */
844 (void)ide_do_drive_cmd(drive, tape->active_data_rq,
845 ide_end);
846 } else if (!error) {
847 /*
848 * This is a part of the feedback loop which tries to
849 * find the optimum number of stages. We are starting
850 * from a minimum maximum number of stages, and if we
851 * sense that the pipeline is empty, we try to increase
852 * it, until we reach the user compile time memory
853 * limit.
854 */
855 int i = (tape->max_pipeline - tape->min_pipeline) / 10;
856
857 tape->max_stages += max(i, 1);
858 tape->max_stages = max(tape->max_stages,
859 tape->min_pipeline);
860 tape->max_stages = min(tape->max_stages,
861 tape->max_pipeline);
862 }
863 }
864 ide_end_drive_cmd(drive, 0, 0); 631 ide_end_drive_cmd(drive, 0, 0);
865 632
866 if (remove_stage)
867 idetape_remove_stage_head(drive);
868 if (tape->active_data_rq == NULL)
869 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
870 spin_unlock_irqrestore(&tape->lock, flags); 633 spin_unlock_irqrestore(&tape->lock, flags);
871 return 0; 634 return 0;
872} 635}
@@ -993,7 +756,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
993 stat = ide_read_status(drive); 756 stat = ide_read_status(drive);
994 757
995 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 758 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
996 if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) { 759 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
997 /* 760 /*
998 * A DMA error is sometimes expected. For example, 761 * A DMA error is sometimes expected. For example,
999 * if the tape is crossing a filemark during a 762 * if the tape is crossing a filemark during a
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
1083 return ide_do_reset(drive); 846 return ide_do_reset(drive);
1084 } 847 }
1085 /* Get the number of bytes to transfer on this interrupt. */ 848 /* Get the number of bytes to transfer on this interrupt. */
1086 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 849 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
1087 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 850 hwif->INB(hwif->io_ports.lbam_addr);
1088 851
1089 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 852 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1090 853
1091 if (ireason & CD) { 854 if (ireason & CD) {
1092 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__); 855 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1190 "yet DRQ isn't asserted\n"); 953 "yet DRQ isn't asserted\n");
1191 return startstop; 954 return startstop;
1192 } 955 }
1193 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 956 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1194 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { 957 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1195 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing " 958 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1196 "a packet command, retrying\n"); 959 "a packet command, retrying\n");
1197 udelay(100); 960 udelay(100);
1198 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 961 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1199 if (retries == 0) { 962 if (retries == 0) {
1200 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while " 963 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1201 "issuing a packet command, ignoring\n"); 964 "issuing a packet command, ignoring\n");
@@ -1213,7 +976,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1213#ifdef CONFIG_BLK_DEV_IDEDMA 976#ifdef CONFIG_BLK_DEV_IDEDMA
1214 /* Begin DMA, if necessary */ 977 /* Begin DMA, if necessary */
1215 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) 978 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1216 hwif->dma_start(drive); 979 hwif->dma_ops->dma_start(drive);
1217#endif 980#endif
1218 /* Send the actual packet */ 981 /* Send the actual packet */
1219 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12); 982 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
@@ -1279,7 +1042,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1279 ide_dma_off(drive); 1042 ide_dma_off(drive);
1280 } 1043 }
1281 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma) 1044 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1282 dma_ok = !hwif->dma_setup(drive); 1045 dma_ok = !hwif->dma_ops->dma_setup(drive);
1283 1046
1284 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | 1047 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1285 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok); 1048 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
@@ -1292,7 +1055,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1292 IDETAPE_WAIT_CMD, NULL); 1055 IDETAPE_WAIT_CMD, NULL);
1293 return ide_started; 1056 return ide_started;
1294 } else { 1057 } else {
1295 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 1058 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
1296 return idetape_transfer_pc(drive); 1059 return idetape_transfer_pc(drive);
1297 } 1060 }
1298} 1061}
@@ -1335,69 +1098,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1335 pc->idetape_callback = &idetape_pc_callback; 1098 pc->idetape_callback = &idetape_pc_callback;
1336} 1099}
1337 1100
1338static void idetape_calculate_speeds(ide_drive_t *drive)
1339{
1340 idetape_tape_t *tape = drive->driver_data;
1341
1342 if (time_after(jiffies,
1343 tape->controlled_pipeline_head_time + 120 * HZ)) {
1344 tape->controlled_previous_pipeline_head =
1345 tape->controlled_last_pipeline_head;
1346 tape->controlled_previous_head_time =
1347 tape->controlled_pipeline_head_time;
1348 tape->controlled_last_pipeline_head = tape->pipeline_head;
1349 tape->controlled_pipeline_head_time = jiffies;
1350 }
1351 if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
1352 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1353 tape->controlled_last_pipeline_head) * 32 * HZ /
1354 (jiffies - tape->controlled_pipeline_head_time);
1355 else if (time_after(jiffies, tape->controlled_previous_head_time))
1356 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1357 tape->controlled_previous_pipeline_head) * 32 *
1358 HZ / (jiffies - tape->controlled_previous_head_time);
1359
1360 if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
1361 /* -1 for read mode error recovery */
1362 if (time_after(jiffies, tape->uncontrolled_previous_head_time +
1363 10 * HZ)) {
1364 tape->uncontrolled_pipeline_head_time = jiffies;
1365 tape->uncontrolled_pipeline_head_speed =
1366 (tape->pipeline_head -
1367 tape->uncontrolled_previous_pipeline_head) *
1368 32 * HZ / (jiffies -
1369 tape->uncontrolled_previous_head_time);
1370 }
1371 } else {
1372 tape->uncontrolled_previous_head_time = jiffies;
1373 tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
1374 if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
1375 30 * HZ))
1376 tape->uncontrolled_pipeline_head_time = jiffies;
1377
1378 }
1379 tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
1380 tape->controlled_pipeline_head_speed);
1381
1382 if (tape->speed_control == 1) {
1383 if (tape->nr_pending_stages >= tape->max_stages / 2)
1384 tape->max_insert_speed = tape->pipeline_head_speed +
1385 (1100 - tape->pipeline_head_speed) * 2 *
1386 (tape->nr_pending_stages - tape->max_stages / 2)
1387 / tape->max_stages;
1388 else
1389 tape->max_insert_speed = 500 +
1390 (tape->pipeline_head_speed - 500) * 2 *
1391 tape->nr_pending_stages / tape->max_stages;
1392
1393 if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
1394 tape->max_insert_speed = 5000;
1395 } else
1396 tape->max_insert_speed = tape->speed_control;
1397
1398 tape->max_insert_speed = max(tape->max_insert_speed, 500);
1399}
1400
1401static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) 1101static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1402{ 1102{
1403 idetape_tape_t *tape = drive->driver_data; 1103 idetape_tape_t *tape = drive->driver_data;
@@ -1432,17 +1132,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1432 int blocks = tape->pc->xferred / tape->blk_size; 1132 int blocks = tape->pc->xferred / tape->blk_size;
1433 1133
1434 tape->avg_size += blocks * tape->blk_size; 1134 tape->avg_size += blocks * tape->blk_size;
1435 tape->insert_size += blocks * tape->blk_size; 1135
1436 if (tape->insert_size > 1024 * 1024)
1437 tape->measure_insert_time = 1;
1438 if (tape->measure_insert_time) {
1439 tape->measure_insert_time = 0;
1440 tape->insert_time = jiffies;
1441 tape->insert_size = 0;
1442 }
1443 if (time_after(jiffies, tape->insert_time))
1444 tape->insert_speed = tape->insert_size / 1024 * HZ /
1445 (jiffies - tape->insert_time);
1446 if (time_after_eq(jiffies, tape->avg_time + HZ)) { 1136 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1447 tape->avg_speed = tape->avg_size * HZ / 1137 tape->avg_speed = tape->avg_size * HZ /
1448 (jiffies - tape->avg_time) / 1024; 1138 (jiffies - tape->avg_time) / 1024;
@@ -1475,7 +1165,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape,
1475 pc->buf = NULL; 1165 pc->buf = NULL;
1476 pc->buf_size = length * tape->blk_size; 1166 pc->buf_size = length * tape->blk_size;
1477 pc->req_xfer = pc->buf_size; 1167 pc->req_xfer = pc->buf_size;
1478 if (pc->req_xfer == tape->stage_size) 1168 if (pc->req_xfer == tape->buffer_size)
1479 pc->flags |= PC_FLAG_DMA_RECOMMENDED; 1169 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1480} 1170}
1481 1171
@@ -1495,7 +1185,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
1495 pc->buf = NULL; 1185 pc->buf = NULL;
1496 pc->buf_size = length * tape->blk_size; 1186 pc->buf_size = length * tape->blk_size;
1497 pc->req_xfer = pc->buf_size; 1187 pc->req_xfer = pc->buf_size;
1498 if (pc->req_xfer == tape->stage_size) 1188 if (pc->req_xfer == tape->buffer_size)
1499 pc->flags |= PC_FLAG_DMA_RECOMMENDED; 1189 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1500} 1190}
1501 1191
@@ -1547,10 +1237,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1547 drive->post_reset = 0; 1237 drive->post_reset = 0;
1548 } 1238 }
1549 1239
1550 if (time_after(jiffies, tape->insert_time))
1551 tape->insert_speed = tape->insert_size / 1024 * HZ /
1552 (jiffies - tape->insert_time);
1553 idetape_calculate_speeds(drive);
1554 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && 1240 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1555 (stat & SEEK_STAT) == 0) { 1241 (stat & SEEK_STAT) == 0) {
1556 if (postponed_rq == NULL) { 1242 if (postponed_rq == NULL) {
@@ -1574,16 +1260,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1574 return ide_stopped; 1260 return ide_stopped;
1575 } 1261 }
1576 if (rq->cmd[0] & REQ_IDETAPE_READ) { 1262 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1577 tape->buffer_head++;
1578 tape->postpone_cnt = 0;
1579 pc = idetape_next_pc_storage(drive); 1263 pc = idetape_next_pc_storage(drive);
1580 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, 1264 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1581 (struct idetape_bh *)rq->special); 1265 (struct idetape_bh *)rq->special);
1582 goto out; 1266 goto out;
1583 } 1267 }
1584 if (rq->cmd[0] & REQ_IDETAPE_WRITE) { 1268 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1585 tape->buffer_head++;
1586 tape->postpone_cnt = 0;
1587 pc = idetape_next_pc_storage(drive); 1269 pc = idetape_next_pc_storage(drive);
1588 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, 1270 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1589 (struct idetape_bh *)rq->special); 1271 (struct idetape_bh *)rq->special);
@@ -1604,111 +1286,91 @@ out:
1604 return idetape_issue_pc(drive, pc); 1286 return idetape_issue_pc(drive, pc);
1605} 1287}
1606 1288
1607/* Pipeline related functions */
1608static inline int idetape_pipeline_active(idetape_tape_t *tape)
1609{
1610 int rc1, rc2;
1611
1612 rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
1613 rc2 = (tape->active_data_rq != NULL);
1614 return rc1;
1615}
1616
1617/* 1289/*
1618 * The function below uses __get_free_page to allocate a pipeline stage, along 1290 * The function below uses __get_free_pages to allocate a data buffer of size
1619 * with all the necessary small buffers which together make a buffer of size 1291 * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
1620 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1621 * much as possible. 1292 * much as possible.
1622 * 1293 *
1623 * It returns a pointer to the new allocated stage, or NULL if we can't (or 1294 * It returns a pointer to the newly allocated buffer, or NULL in case of
1624 * don't want to) allocate a stage. 1295 * failure.
1625 *
1626 * Pipeline stages are optional and are used to increase performance. If we
1627 * can't allocate them, we'll manage without them.
1628 */ 1296 */
1629static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full, 1297static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
1630 int clear) 1298 int full, int clear)
1631{ 1299{
1632 idetape_stage_t *stage; 1300 struct idetape_bh *prev_bh, *bh, *merge_bh;
1633 struct idetape_bh *prev_bh, *bh; 1301 int pages = tape->pages_per_buffer;
1634 int pages = tape->pages_per_stage; 1302 unsigned int order, b_allocd;
1635 char *b_data = NULL; 1303 char *b_data = NULL;
1636 1304
1637 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL); 1305 merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1638 if (!stage) 1306 bh = merge_bh;
1639 return NULL;
1640 stage->next = NULL;
1641
1642 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1643 bh = stage->bh;
1644 if (bh == NULL) 1307 if (bh == NULL)
1645 goto abort; 1308 goto abort;
1646 bh->b_reqnext = NULL; 1309
1647 bh->b_data = (char *) __get_free_page(GFP_KERNEL); 1310 order = fls(pages) - 1;
1311 bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
1648 if (!bh->b_data) 1312 if (!bh->b_data)
1649 goto abort; 1313 goto abort;
1314 b_allocd = (1 << order) * PAGE_SIZE;
1315 pages &= (order-1);
1316
1650 if (clear) 1317 if (clear)
1651 memset(bh->b_data, 0, PAGE_SIZE); 1318 memset(bh->b_data, 0, b_allocd);
1652 bh->b_size = PAGE_SIZE; 1319 bh->b_reqnext = NULL;
1320 bh->b_size = b_allocd;
1653 atomic_set(&bh->b_count, full ? bh->b_size : 0); 1321 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1654 1322
1655 while (--pages) { 1323 while (pages) {
1656 b_data = (char *) __get_free_page(GFP_KERNEL); 1324 order = fls(pages) - 1;
1325 b_data = (char *) __get_free_pages(GFP_KERNEL, order);
1657 if (!b_data) 1326 if (!b_data)
1658 goto abort; 1327 goto abort;
1328 b_allocd = (1 << order) * PAGE_SIZE;
1329
1659 if (clear) 1330 if (clear)
1660 memset(b_data, 0, PAGE_SIZE); 1331 memset(b_data, 0, b_allocd);
1661 if (bh->b_data == b_data + PAGE_SIZE) { 1332
1662 bh->b_size += PAGE_SIZE; 1333 /* newly allocated page frames below buffer header or ...*/
1663 bh->b_data -= PAGE_SIZE; 1334 if (bh->b_data == b_data + b_allocd) {
1335 bh->b_size += b_allocd;
1336 bh->b_data -= b_allocd;
1664 if (full) 1337 if (full)
1665 atomic_add(PAGE_SIZE, &bh->b_count); 1338 atomic_add(b_allocd, &bh->b_count);
1666 continue; 1339 continue;
1667 } 1340 }
1341 /* they are above the header */
1668 if (b_data == bh->b_data + bh->b_size) { 1342 if (b_data == bh->b_data + bh->b_size) {
1669 bh->b_size += PAGE_SIZE; 1343 bh->b_size += b_allocd;
1670 if (full) 1344 if (full)
1671 atomic_add(PAGE_SIZE, &bh->b_count); 1345 atomic_add(b_allocd, &bh->b_count);
1672 continue; 1346 continue;
1673 } 1347 }
1674 prev_bh = bh; 1348 prev_bh = bh;
1675 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); 1349 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1676 if (!bh) { 1350 if (!bh) {
1677 free_page((unsigned long) b_data); 1351 free_pages((unsigned long) b_data, order);
1678 goto abort; 1352 goto abort;
1679 } 1353 }
1680 bh->b_reqnext = NULL; 1354 bh->b_reqnext = NULL;
1681 bh->b_data = b_data; 1355 bh->b_data = b_data;
1682 bh->b_size = PAGE_SIZE; 1356 bh->b_size = b_allocd;
1683 atomic_set(&bh->b_count, full ? bh->b_size : 0); 1357 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1684 prev_bh->b_reqnext = bh; 1358 prev_bh->b_reqnext = bh;
1359
1360 pages &= (order-1);
1685 } 1361 }
1362
1686 bh->b_size -= tape->excess_bh_size; 1363 bh->b_size -= tape->excess_bh_size;
1687 if (full) 1364 if (full)
1688 atomic_sub(tape->excess_bh_size, &bh->b_count); 1365 atomic_sub(tape->excess_bh_size, &bh->b_count);
1689 return stage; 1366 return merge_bh;
1690abort: 1367abort:
1691 __idetape_kfree_stage(stage); 1368 ide_tape_kfree_buffer(tape);
1692 return NULL; 1369 return NULL;
1693} 1370}
1694 1371
1695static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
1696{
1697 idetape_stage_t *cache_stage = tape->cache_stage;
1698
1699 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1700
1701 if (tape->nr_stages >= tape->max_stages)
1702 return NULL;
1703 if (cache_stage != NULL) {
1704 tape->cache_stage = NULL;
1705 return cache_stage;
1706 }
1707 return __idetape_kmalloc_stage(tape, 0, 0);
1708}
1709
1710static int idetape_copy_stage_from_user(idetape_tape_t *tape, 1372static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1711 idetape_stage_t *stage, const char __user *buf, int n) 1373 const char __user *buf, int n)
1712{ 1374{
1713 struct idetape_bh *bh = tape->bh; 1375 struct idetape_bh *bh = tape->bh;
1714 int count; 1376 int count;
@@ -1740,7 +1402,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1740} 1402}
1741 1403
1742static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, 1404static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1743 idetape_stage_t *stage, int n) 1405 int n)
1744{ 1406{
1745 struct idetape_bh *bh = tape->bh; 1407 struct idetape_bh *bh = tape->bh;
1746 int count; 1408 int count;
@@ -1771,11 +1433,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1771 return ret; 1433 return ret;
1772} 1434}
1773 1435
1774static void idetape_init_merge_stage(idetape_tape_t *tape) 1436static void idetape_init_merge_buffer(idetape_tape_t *tape)
1775{ 1437{
1776 struct idetape_bh *bh = tape->merge_stage->bh; 1438 struct idetape_bh *bh = tape->merge_bh;
1439 tape->bh = tape->merge_bh;
1777 1440
1778 tape->bh = bh;
1779 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) 1441 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1780 atomic_set(&bh->b_count, 0); 1442 atomic_set(&bh->b_count, 0);
1781 else { 1443 else {
@@ -1784,61 +1446,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape)
1784 } 1446 }
1785} 1447}
1786 1448
1787static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
1788{
1789 struct idetape_bh *tmp;
1790
1791 tmp = stage->bh;
1792 stage->bh = tape->merge_stage->bh;
1793 tape->merge_stage->bh = tmp;
1794 idetape_init_merge_stage(tape);
1795}
1796
1797/* Add a new stage at the end of the pipeline. */
1798static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
1799{
1800 idetape_tape_t *tape = drive->driver_data;
1801 unsigned long flags;
1802
1803 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1804
1805 spin_lock_irqsave(&tape->lock, flags);
1806 stage->next = NULL;
1807 if (tape->last_stage != NULL)
1808 tape->last_stage->next = stage;
1809 else
1810 tape->first_stage = stage;
1811 tape->next_stage = stage;
1812 tape->last_stage = stage;
1813 if (tape->next_stage == NULL)
1814 tape->next_stage = tape->last_stage;
1815 tape->nr_stages++;
1816 tape->nr_pending_stages++;
1817 spin_unlock_irqrestore(&tape->lock, flags);
1818}
1819
1820/* Install a completion in a pending request and sleep until it is serviced. The
1821 * caller should ensure that the request will not be serviced before we install
1822 * the completion (usually by disabling interrupts).
1823 */
1824static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1825{
1826 DECLARE_COMPLETION_ONSTACK(wait);
1827 idetape_tape_t *tape = drive->driver_data;
1828
1829 if (rq == NULL || !blk_special_request(rq)) {
1830 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1831 " request\n");
1832 return;
1833 }
1834 rq->end_io_data = &wait;
1835 rq->end_io = blk_end_sync_rq;
1836 spin_unlock_irq(&tape->lock);
1837 wait_for_completion(&wait);
1838 /* The stage and its struct request have been deallocated */
1839 spin_lock_irq(&tape->lock);
1840}
1841
1842static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive) 1449static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1843{ 1450{
1844 idetape_tape_t *tape = drive->driver_data; 1451 idetape_tape_t *tape = drive->driver_data;
@@ -1907,7 +1514,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1907 * to the request list without waiting for it to be serviced! In that case, we 1514 * to the request list without waiting for it to be serviced! In that case, we
1908 * usually use idetape_queue_pc_head(). 1515 * usually use idetape_queue_pc_head().
1909 */ 1516 */
1910static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) 1517static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1911{ 1518{
1912 struct ide_tape_obj *tape = drive->driver_data; 1519 struct ide_tape_obj *tape = drive->driver_data;
1913 struct request rq; 1520 struct request rq;
@@ -1939,7 +1546,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1939 timeout += jiffies; 1546 timeout += jiffies;
1940 while (time_before(jiffies, timeout)) { 1547 while (time_before(jiffies, timeout)) {
1941 idetape_create_test_unit_ready_cmd(&pc); 1548 idetape_create_test_unit_ready_cmd(&pc);
1942 if (!__idetape_queue_pc_tail(drive, &pc)) 1549 if (!idetape_queue_pc_tail(drive, &pc))
1943 return 0; 1550 return 0;
1944 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2) 1551 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1945 || (tape->asc == 0x3A)) { 1552 || (tape->asc == 0x3A)) {
@@ -1948,7 +1555,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1948 return -ENOMEDIUM; 1555 return -ENOMEDIUM;
1949 idetape_create_load_unload_cmd(drive, &pc, 1556 idetape_create_load_unload_cmd(drive, &pc,
1950 IDETAPE_LU_LOAD_MASK); 1557 IDETAPE_LU_LOAD_MASK);
1951 __idetape_queue_pc_tail(drive, &pc); 1558 idetape_queue_pc_tail(drive, &pc);
1952 load_attempted = 1; 1559 load_attempted = 1;
1953 /* not about to be ready */ 1560 /* not about to be ready */
1954 } else if (!(tape->sense_key == 2 && tape->asc == 4 && 1561 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
@@ -1959,11 +1566,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1959 return -EIO; 1566 return -EIO;
1960} 1567}
1961 1568
1962static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1963{
1964 return __idetape_queue_pc_tail(drive, pc);
1965}
1966
1967static int idetape_flush_tape_buffers(ide_drive_t *drive) 1569static int idetape_flush_tape_buffers(ide_drive_t *drive)
1968{ 1570{
1969 struct ide_atapi_pc pc; 1571 struct ide_atapi_pc pc;
@@ -2029,50 +1631,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive,
2029 return 1; 1631 return 1;
2030} 1632}
2031 1633
2032static int __idetape_discard_read_pipeline(ide_drive_t *drive) 1634static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
2033{ 1635{
2034 idetape_tape_t *tape = drive->driver_data; 1636 idetape_tape_t *tape = drive->driver_data;
2035 unsigned long flags;
2036 int cnt;
2037 1637
2038 if (tape->chrdev_dir != IDETAPE_DIR_READ) 1638 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2039 return 0; 1639 return;
2040 1640
2041 /* Remove merge stage. */ 1641 clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
2042 cnt = tape->merge_stage_size / tape->blk_size; 1642 tape->merge_bh_size = 0;
2043 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1643 if (tape->merge_bh != NULL) {
2044 ++cnt; /* Filemarks count as 1 sector */ 1644 ide_tape_kfree_buffer(tape);
2045 tape->merge_stage_size = 0; 1645 tape->merge_bh = NULL;
2046 if (tape->merge_stage != NULL) {
2047 __idetape_kfree_stage(tape->merge_stage);
2048 tape->merge_stage = NULL;
2049 } 1646 }
2050 1647
2051 /* Clear pipeline flags. */
2052 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2053 tape->chrdev_dir = IDETAPE_DIR_NONE; 1648 tape->chrdev_dir = IDETAPE_DIR_NONE;
2054
2055 /* Remove pipeline stages. */
2056 if (tape->first_stage == NULL)
2057 return 0;
2058
2059 spin_lock_irqsave(&tape->lock, flags);
2060 tape->next_stage = NULL;
2061 if (idetape_pipeline_active(tape))
2062 idetape_wait_for_request(drive, tape->active_data_rq);
2063 spin_unlock_irqrestore(&tape->lock, flags);
2064
2065 while (tape->first_stage != NULL) {
2066 struct request *rq_ptr = &tape->first_stage->rq;
2067
2068 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
2069 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2070 ++cnt;
2071 idetape_remove_stage_head(drive);
2072 }
2073 tape->nr_pending_stages = 0;
2074 tape->max_stages = tape->min_pipeline;
2075 return cnt;
2076} 1649}
2077 1650
2078/* 1651/*
@@ -2089,7 +1662,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
2089 struct ide_atapi_pc pc; 1662 struct ide_atapi_pc pc;
2090 1663
2091 if (tape->chrdev_dir == IDETAPE_DIR_READ) 1664 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2092 __idetape_discard_read_pipeline(drive); 1665 __ide_tape_discard_merge_buffer(drive);
2093 idetape_wait_ready(drive, 60 * 5 * HZ); 1666 idetape_wait_ready(drive, 60 * 5 * HZ);
2094 idetape_create_locate_cmd(drive, &pc, block, partition, skip); 1667 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
2095 retval = idetape_queue_pc_tail(drive, &pc); 1668 retval = idetape_queue_pc_tail(drive, &pc);
@@ -2100,20 +1673,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
2100 return (idetape_queue_pc_tail(drive, &pc)); 1673 return (idetape_queue_pc_tail(drive, &pc));
2101} 1674}
2102 1675
2103static void idetape_discard_read_pipeline(ide_drive_t *drive, 1676static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
2104 int restore_position) 1677 int restore_position)
2105{ 1678{
2106 idetape_tape_t *tape = drive->driver_data; 1679 idetape_tape_t *tape = drive->driver_data;
2107 int cnt;
2108 int seek, position; 1680 int seek, position;
2109 1681
2110 cnt = __idetape_discard_read_pipeline(drive); 1682 __ide_tape_discard_merge_buffer(drive);
2111 if (restore_position) { 1683 if (restore_position) {
2112 position = idetape_read_position(drive); 1684 position = idetape_read_position(drive);
2113 seek = position > cnt ? position - cnt : 0; 1685 seek = position > 0 ? position : 0;
2114 if (idetape_position_tape(drive, seek, 0, 0)) { 1686 if (idetape_position_tape(drive, seek, 0, 0)) {
2115 printk(KERN_INFO "ide-tape: %s: position_tape failed in" 1687 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
2116 " discard_pipeline()\n", tape->name); 1688 " %s\n", tape->name, __func__);
2117 return; 1689 return;
2118 } 1690 }
2119 } 1691 }
@@ -2131,12 +1703,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
2131 1703
2132 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); 1704 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
2133 1705
2134 if (idetape_pipeline_active(tape)) {
2135 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
2136 __func__);
2137 return (0);
2138 }
2139
2140 idetape_init_rq(&rq, cmd); 1706 idetape_init_rq(&rq, cmd);
2141 rq.rq_disk = tape->disk; 1707 rq.rq_disk = tape->disk;
2142 rq.special = (void *)bh; 1708 rq.special = (void *)bh;
@@ -2148,27 +1714,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
2148 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) 1714 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
2149 return 0; 1715 return 0;
2150 1716
2151 if (tape->merge_stage) 1717 if (tape->merge_bh)
2152 idetape_init_merge_stage(tape); 1718 idetape_init_merge_buffer(tape);
2153 if (rq.errors == IDETAPE_ERROR_GENERAL) 1719 if (rq.errors == IDETAPE_ERROR_GENERAL)
2154 return -EIO; 1720 return -EIO;
2155 return (tape->blk_size * (blocks-rq.current_nr_sectors)); 1721 return (tape->blk_size * (blocks-rq.current_nr_sectors));
2156} 1722}
2157 1723
2158/* start servicing the pipeline stages, starting from tape->next_stage. */
2159static void idetape_plug_pipeline(ide_drive_t *drive)
2160{
2161 idetape_tape_t *tape = drive->driver_data;
2162
2163 if (tape->next_stage == NULL)
2164 return;
2165 if (!idetape_pipeline_active(tape)) {
2166 set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
2167 idetape_activate_next_stage(drive);
2168 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
2169 }
2170}
2171
2172static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc) 1724static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
2173{ 1725{
2174 idetape_init_pc(pc); 1726 idetape_init_pc(pc);
@@ -2206,135 +1758,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
2206 pc->idetape_callback = &idetape_pc_callback; 1758 pc->idetape_callback = &idetape_pc_callback;
2207} 1759}
2208 1760
2209static void idetape_wait_first_stage(ide_drive_t *drive) 1761/* Queue up a character device originated write request. */
2210{
2211 idetape_tape_t *tape = drive->driver_data;
2212 unsigned long flags;
2213
2214 if (tape->first_stage == NULL)
2215 return;
2216 spin_lock_irqsave(&tape->lock, flags);
2217 if (tape->active_stage == tape->first_stage)
2218 idetape_wait_for_request(drive, tape->active_data_rq);
2219 spin_unlock_irqrestore(&tape->lock, flags);
2220}
2221
2222/*
2223 * Try to add a character device originated write request to our pipeline. In
2224 * case we don't succeed, we revert to non-pipelined operation mode for this
2225 * request. In order to accomplish that, we
2226 *
2227 * 1. Try to allocate a new pipeline stage.
2228 * 2. If we can't, wait for more and more requests to be serviced and try again
2229 * each time.
2230 * 3. If we still can't allocate a stage, fallback to non-pipelined operation
2231 * mode for this request.
2232 */
2233static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) 1762static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
2234{ 1763{
2235 idetape_tape_t *tape = drive->driver_data; 1764 idetape_tape_t *tape = drive->driver_data;
2236 idetape_stage_t *new_stage;
2237 unsigned long flags;
2238 struct request *rq;
2239 1765
2240 debug_log(DBG_CHRDEV, "Enter %s\n", __func__); 1766 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2241 1767
2242 /* Attempt to allocate a new stage. Beware possible race conditions. */ 1768 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2243 while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) { 1769 blocks, tape->merge_bh);
2244 spin_lock_irqsave(&tape->lock, flags);
2245 if (idetape_pipeline_active(tape)) {
2246 idetape_wait_for_request(drive, tape->active_data_rq);
2247 spin_unlock_irqrestore(&tape->lock, flags);
2248 } else {
2249 spin_unlock_irqrestore(&tape->lock, flags);
2250 idetape_plug_pipeline(drive);
2251 if (idetape_pipeline_active(tape))
2252 continue;
2253 /*
2254 * The machine is short on memory. Fallback to non-
2255 * pipelined operation mode for this request.
2256 */
2257 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2258 blocks, tape->merge_stage->bh);
2259 }
2260 }
2261 rq = &new_stage->rq;
2262 idetape_init_rq(rq, REQ_IDETAPE_WRITE);
2263 /* Doesn't actually matter - We always assume sequential access */
2264 rq->sector = tape->first_frame;
2265 rq->current_nr_sectors = blocks;
2266 rq->nr_sectors = blocks;
2267
2268 idetape_switch_buffers(tape, new_stage);
2269 idetape_add_stage_tail(drive, new_stage);
2270 tape->pipeline_head++;
2271 idetape_calculate_speeds(drive);
2272
2273 /*
2274 * Estimate whether the tape has stopped writing by checking if our
2275 * write pipeline is currently empty. If we are not writing anymore,
2276 * wait for the pipeline to be almost completely full (90%) before
2277 * starting to service requests, so that we will be able to keep up with
2278 * the higher speeds of the tape.
2279 */
2280 if (!idetape_pipeline_active(tape)) {
2281 if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
2282 tape->nr_stages >= tape->max_stages -
2283 tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
2284 tape->blk_size) {
2285 tape->measure_insert_time = 1;
2286 tape->insert_time = jiffies;
2287 tape->insert_size = 0;
2288 tape->insert_speed = 0;
2289 idetape_plug_pipeline(drive);
2290 }
2291 }
2292 if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2293 /* Return a deferred error */
2294 return -EIO;
2295 return blocks;
2296}
2297
2298/*
2299 * Wait until all pending pipeline requests are serviced. Typically called on
2300 * device close.
2301 */
2302static void idetape_wait_for_pipeline(ide_drive_t *drive)
2303{
2304 idetape_tape_t *tape = drive->driver_data;
2305 unsigned long flags;
2306
2307 while (tape->next_stage || idetape_pipeline_active(tape)) {
2308 idetape_plug_pipeline(drive);
2309 spin_lock_irqsave(&tape->lock, flags);
2310 if (idetape_pipeline_active(tape))
2311 idetape_wait_for_request(drive, tape->active_data_rq);
2312 spin_unlock_irqrestore(&tape->lock, flags);
2313 }
2314} 1770}
2315 1771
2316static void idetape_empty_write_pipeline(ide_drive_t *drive) 1772static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
2317{ 1773{
2318 idetape_tape_t *tape = drive->driver_data; 1774 idetape_tape_t *tape = drive->driver_data;
2319 int blocks, min; 1775 int blocks, min;
2320 struct idetape_bh *bh; 1776 struct idetape_bh *bh;
2321 1777
2322 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 1778 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2323 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline," 1779 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
2324 " but we are not writing.\n"); 1780 " but we are not writing.\n");
2325 return; 1781 return;
2326 } 1782 }
2327 if (tape->merge_stage_size > tape->stage_size) { 1783 if (tape->merge_bh_size > tape->buffer_size) {
2328 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); 1784 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2329 tape->merge_stage_size = tape->stage_size; 1785 tape->merge_bh_size = tape->buffer_size;
2330 } 1786 }
2331 if (tape->merge_stage_size) { 1787 if (tape->merge_bh_size) {
2332 blocks = tape->merge_stage_size / tape->blk_size; 1788 blocks = tape->merge_bh_size / tape->blk_size;
2333 if (tape->merge_stage_size % tape->blk_size) { 1789 if (tape->merge_bh_size % tape->blk_size) {
2334 unsigned int i; 1790 unsigned int i;
2335 1791
2336 blocks++; 1792 blocks++;
2337 i = tape->blk_size - tape->merge_stage_size % 1793 i = tape->blk_size - tape->merge_bh_size %
2338 tape->blk_size; 1794 tape->blk_size;
2339 bh = tape->bh->b_reqnext; 1795 bh = tape->bh->b_reqnext;
2340 while (bh) { 1796 while (bh) {
@@ -2358,74 +1814,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive)
2358 } 1814 }
2359 } 1815 }
2360 (void) idetape_add_chrdev_write_request(drive, blocks); 1816 (void) idetape_add_chrdev_write_request(drive, blocks);
2361 tape->merge_stage_size = 0; 1817 tape->merge_bh_size = 0;
2362 } 1818 }
2363 idetape_wait_for_pipeline(drive); 1819 if (tape->merge_bh != NULL) {
2364 if (tape->merge_stage != NULL) { 1820 ide_tape_kfree_buffer(tape);
2365 __idetape_kfree_stage(tape->merge_stage); 1821 tape->merge_bh = NULL;
2366 tape->merge_stage = NULL;
2367 } 1822 }
2368 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2369 tape->chrdev_dir = IDETAPE_DIR_NONE; 1823 tape->chrdev_dir = IDETAPE_DIR_NONE;
2370
2371 /*
2372 * On the next backup, perform the feedback loop again. (I don't want to
2373 * keep sense information between backups, as some systems are
2374 * constantly on, and the system load can be totally different on the
2375 * next backup).
2376 */
2377 tape->max_stages = tape->min_pipeline;
2378 if (tape->first_stage != NULL ||
2379 tape->next_stage != NULL ||
2380 tape->last_stage != NULL ||
2381 tape->nr_stages != 0) {
2382 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2383 "first_stage %p, next_stage %p, "
2384 "last_stage %p, nr_stages %d\n",
2385 tape->first_stage, tape->next_stage,
2386 tape->last_stage, tape->nr_stages);
2387 }
2388} 1824}
2389 1825
2390static void idetape_restart_speed_control(ide_drive_t *drive) 1826static int idetape_init_read(ide_drive_t *drive)
2391{ 1827{
2392 idetape_tape_t *tape = drive->driver_data; 1828 idetape_tape_t *tape = drive->driver_data;
2393
2394 tape->restart_speed_control_req = 0;
2395 tape->pipeline_head = 0;
2396 tape->controlled_last_pipeline_head = 0;
2397 tape->controlled_previous_pipeline_head = 0;
2398 tape->uncontrolled_previous_pipeline_head = 0;
2399 tape->controlled_pipeline_head_speed = 5000;
2400 tape->pipeline_head_speed = 5000;
2401 tape->uncontrolled_pipeline_head_speed = 0;
2402 tape->controlled_pipeline_head_time =
2403 tape->uncontrolled_pipeline_head_time = jiffies;
2404 tape->controlled_previous_head_time =
2405 tape->uncontrolled_previous_head_time = jiffies;
2406}
2407
2408static int idetape_init_read(ide_drive_t *drive, int max_stages)
2409{
2410 idetape_tape_t *tape = drive->driver_data;
2411 idetape_stage_t *new_stage;
2412 struct request rq;
2413 int bytes_read; 1829 int bytes_read;
2414 u16 blocks = *(u16 *)&tape->caps[12];
2415 1830
2416 /* Initialize read operation */ 1831 /* Initialize read operation */
2417 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1832 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2418 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { 1833 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2419 idetape_empty_write_pipeline(drive); 1834 ide_tape_flush_merge_buffer(drive);
2420 idetape_flush_tape_buffers(drive); 1835 idetape_flush_tape_buffers(drive);
2421 } 1836 }
2422 if (tape->merge_stage || tape->merge_stage_size) { 1837 if (tape->merge_bh || tape->merge_bh_size) {
2423 printk(KERN_ERR "ide-tape: merge_stage_size should be" 1838 printk(KERN_ERR "ide-tape: merge_bh_size should be"
2424 " 0 now\n"); 1839 " 0 now\n");
2425 tape->merge_stage_size = 0; 1840 tape->merge_bh_size = 0;
2426 } 1841 }
2427 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); 1842 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
2428 if (!tape->merge_stage) 1843 if (!tape->merge_bh)
2429 return -ENOMEM; 1844 return -ENOMEM;
2430 tape->chrdev_dir = IDETAPE_DIR_READ; 1845 tape->chrdev_dir = IDETAPE_DIR_READ;
2431 1846
@@ -2438,54 +1853,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages)
2438 if (drive->dsc_overlap) { 1853 if (drive->dsc_overlap) {
2439 bytes_read = idetape_queue_rw_tail(drive, 1854 bytes_read = idetape_queue_rw_tail(drive,
2440 REQ_IDETAPE_READ, 0, 1855 REQ_IDETAPE_READ, 0,
2441 tape->merge_stage->bh); 1856 tape->merge_bh);
2442 if (bytes_read < 0) { 1857 if (bytes_read < 0) {
2443 __idetape_kfree_stage(tape->merge_stage); 1858 ide_tape_kfree_buffer(tape);
2444 tape->merge_stage = NULL; 1859 tape->merge_bh = NULL;
2445 tape->chrdev_dir = IDETAPE_DIR_NONE; 1860 tape->chrdev_dir = IDETAPE_DIR_NONE;
2446 return bytes_read; 1861 return bytes_read;
2447 } 1862 }
2448 } 1863 }
2449 } 1864 }
2450 if (tape->restart_speed_control_req) 1865
2451 idetape_restart_speed_control(drive);
2452 idetape_init_rq(&rq, REQ_IDETAPE_READ);
2453 rq.sector = tape->first_frame;
2454 rq.nr_sectors = blocks;
2455 rq.current_nr_sectors = blocks;
2456 if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
2457 tape->nr_stages < max_stages) {
2458 new_stage = idetape_kmalloc_stage(tape);
2459 while (new_stage != NULL) {
2460 new_stage->rq = rq;
2461 idetape_add_stage_tail(drive, new_stage);
2462 if (tape->nr_stages >= max_stages)
2463 break;
2464 new_stage = idetape_kmalloc_stage(tape);
2465 }
2466 }
2467 if (!idetape_pipeline_active(tape)) {
2468 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2469 tape->measure_insert_time = 1;
2470 tape->insert_time = jiffies;
2471 tape->insert_size = 0;
2472 tape->insert_speed = 0;
2473 idetape_plug_pipeline(drive);
2474 }
2475 }
2476 return 0; 1866 return 0;
2477} 1867}
2478 1868
2479/* 1869/* called from idetape_chrdev_read() to service a chrdev read request. */
2480 * Called from idetape_chrdev_read() to service a character device read request
2481 * and add read-ahead requests to our pipeline.
2482 */
2483static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) 1870static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2484{ 1871{
2485 idetape_tape_t *tape = drive->driver_data; 1872 idetape_tape_t *tape = drive->driver_data;
2486 unsigned long flags;
2487 struct request *rq_ptr;
2488 int bytes_read;
2489 1873
2490 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 1874 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2491 1875
@@ -2493,39 +1877,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2493 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1877 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2494 return 0; 1878 return 0;
2495 1879
2496 /* Wait for the next block to reach the head of the pipeline. */ 1880 idetape_init_read(drive);
2497 idetape_init_read(drive, tape->max_stages);
2498 if (tape->first_stage == NULL) {
2499 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2500 return 0;
2501 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2502 tape->merge_stage->bh);
2503 }
2504 idetape_wait_first_stage(drive);
2505 rq_ptr = &tape->first_stage->rq;
2506 bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
2507 rq_ptr->current_nr_sectors);
2508 rq_ptr->nr_sectors = 0;
2509 rq_ptr->current_nr_sectors = 0;
2510 1881
2511 if (rq_ptr->errors == IDETAPE_ERROR_EOD) 1882 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2512 return 0; 1883 tape->merge_bh);
2513 else {
2514 idetape_switch_buffers(tape, tape->first_stage);
2515 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2516 set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
2517 spin_lock_irqsave(&tape->lock, flags);
2518 idetape_remove_stage_head(drive);
2519 spin_unlock_irqrestore(&tape->lock, flags);
2520 tape->pipeline_head++;
2521 idetape_calculate_speeds(drive);
2522 }
2523 if (bytes_read > blocks * tape->blk_size) {
2524 printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
2525 " than requested\n");
2526 bytes_read = blocks * tape->blk_size;
2527 }
2528 return (bytes_read);
2529} 1884}
2530 1885
2531static void idetape_pad_zeros(ide_drive_t *drive, int bcount) 1886static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
@@ -2537,8 +1892,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2537 while (bcount) { 1892 while (bcount) {
2538 unsigned int count; 1893 unsigned int count;
2539 1894
2540 bh = tape->merge_stage->bh; 1895 bh = tape->merge_bh;
2541 count = min(tape->stage_size, bcount); 1896 count = min(tape->buffer_size, bcount);
2542 bcount -= count; 1897 bcount -= count;
2543 blocks = count / tape->blk_size; 1898 blocks = count / tape->blk_size;
2544 while (count) { 1899 while (count) {
@@ -2549,31 +1904,10 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2549 bh = bh->b_reqnext; 1904 bh = bh->b_reqnext;
2550 } 1905 }
2551 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, 1906 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2552 tape->merge_stage->bh); 1907 tape->merge_bh);
2553 } 1908 }
2554} 1909}
2555 1910
2556static int idetape_pipeline_size(ide_drive_t *drive)
2557{
2558 idetape_tape_t *tape = drive->driver_data;
2559 idetape_stage_t *stage;
2560 struct request *rq;
2561 int size = 0;
2562
2563 idetape_wait_for_pipeline(drive);
2564 stage = tape->first_stage;
2565 while (stage != NULL) {
2566 rq = &stage->rq;
2567 size += tape->blk_size * (rq->nr_sectors -
2568 rq->current_nr_sectors);
2569 if (rq->errors == IDETAPE_ERROR_FILEMARK)
2570 size += tape->blk_size;
2571 stage = stage->next;
2572 }
2573 size += tape->merge_stage_size;
2574 return size;
2575}
2576
2577/* 1911/*
2578 * Rewinds the tape to the Beginning Of the current Partition (BOP). We 1912 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2579 * currently support only one partition. 1913 * currently support only one partition.
@@ -2619,11 +1953,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2619 if (copy_from_user(&config, argp, sizeof(config))) 1953 if (copy_from_user(&config, argp, sizeof(config)))
2620 return -EFAULT; 1954 return -EFAULT;
2621 tape->best_dsc_rw_freq = config.dsc_rw_frequency; 1955 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2622 tape->max_stages = config.nr_stages;
2623 break; 1956 break;
2624 case 0x0350: 1957 case 0x0350:
2625 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; 1958 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2626 config.nr_stages = tape->max_stages; 1959 config.nr_stages = 1;
2627 if (copy_to_user(argp, &config, sizeof(config))) 1960 if (copy_to_user(argp, &config, sizeof(config)))
2628 return -EFAULT; 1961 return -EFAULT;
2629 break; 1962 break;
@@ -2633,19 +1966,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2633 return 0; 1966 return 0;
2634} 1967}
2635 1968
2636/*
2637 * The function below is now a bit more complicated than just passing the
2638 * command to the tape since we may have crossed some filemarks during our
2639 * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
2640 * support MTFSFM when the filemark is in our internal pipeline even if the tape
2641 * doesn't support spacing over filemarks in the reverse direction.
2642 */
2643static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, 1969static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2644 int mt_count) 1970 int mt_count)
2645{ 1971{
2646 idetape_tape_t *tape = drive->driver_data; 1972 idetape_tape_t *tape = drive->driver_data;
2647 struct ide_atapi_pc pc; 1973 struct ide_atapi_pc pc;
2648 unsigned long flags;
2649 int retval, count = 0; 1974 int retval, count = 0;
2650 int sprev = !!(tape->caps[4] & 0x20); 1975 int sprev = !!(tape->caps[4] & 0x20);
2651 1976
@@ -2658,48 +1983,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2658 } 1983 }
2659 1984
2660 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1985 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2661 /* its a read-ahead buffer, scan it for crossed filemarks. */ 1986 tape->merge_bh_size = 0;
2662 tape->merge_stage_size = 0;
2663 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1987 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2664 ++count; 1988 ++count;
2665 while (tape->first_stage != NULL) { 1989 ide_tape_discard_merge_buffer(drive, 0);
2666 if (count == mt_count) {
2667 if (mt_op == MTFSFM)
2668 set_bit(IDETAPE_FLAG_FILEMARK,
2669 &tape->flags);
2670 return 0;
2671 }
2672 spin_lock_irqsave(&tape->lock, flags);
2673 if (tape->first_stage == tape->active_stage) {
2674 /*
2675 * We have reached the active stage in the read
2676 * pipeline. There is no point in allowing the
2677 * drive to continue reading any farther, so we
2678 * stop the pipeline.
2679 *
2680 * This section should be moved to a separate
2681 * subroutine because similar operations are
2682 * done in __idetape_discard_read_pipeline(),
2683 * for example.
2684 */
2685 tape->next_stage = NULL;
2686 spin_unlock_irqrestore(&tape->lock, flags);
2687 idetape_wait_first_stage(drive);
2688 tape->next_stage = tape->first_stage->next;
2689 } else
2690 spin_unlock_irqrestore(&tape->lock, flags);
2691 if (tape->first_stage->rq.errors ==
2692 IDETAPE_ERROR_FILEMARK)
2693 ++count;
2694 idetape_remove_stage_head(drive);
2695 }
2696 idetape_discard_read_pipeline(drive, 0);
2697 } 1990 }
2698 1991
2699 /*
2700 * The filemark was not found in our internal pipeline; now we can issue
2701 * the space command.
2702 */
2703 switch (mt_op) { 1992 switch (mt_op) {
2704 case MTFSF: 1993 case MTFSF:
2705 case MTBSF: 1994 case MTBSF:
@@ -2755,27 +2044,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2755 (count % tape->blk_size) == 0) 2044 (count % tape->blk_size) == 0)
2756 tape->user_bs_factor = count / tape->blk_size; 2045 tape->user_bs_factor = count / tape->blk_size;
2757 } 2046 }
2758 rc = idetape_init_read(drive, tape->max_stages); 2047 rc = idetape_init_read(drive);
2759 if (rc < 0) 2048 if (rc < 0)
2760 return rc; 2049 return rc;
2761 if (count == 0) 2050 if (count == 0)
2762 return (0); 2051 return (0);
2763 if (tape->merge_stage_size) { 2052 if (tape->merge_bh_size) {
2764 actually_read = min((unsigned int)(tape->merge_stage_size), 2053 actually_read = min((unsigned int)(tape->merge_bh_size),
2765 (unsigned int)count); 2054 (unsigned int)count);
2766 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2055 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2767 actually_read))
2768 ret = -EFAULT; 2056 ret = -EFAULT;
2769 buf += actually_read; 2057 buf += actually_read;
2770 tape->merge_stage_size -= actually_read; 2058 tape->merge_bh_size -= actually_read;
2771 count -= actually_read; 2059 count -= actually_read;
2772 } 2060 }
2773 while (count >= tape->stage_size) { 2061 while (count >= tape->buffer_size) {
2774 bytes_read = idetape_add_chrdev_read_request(drive, ctl); 2062 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2775 if (bytes_read <= 0) 2063 if (bytes_read <= 0)
2776 goto finish; 2064 goto finish;
2777 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2065 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2778 bytes_read))
2779 ret = -EFAULT; 2066 ret = -EFAULT;
2780 buf += bytes_read; 2067 buf += bytes_read;
2781 count -= bytes_read; 2068 count -= bytes_read;
@@ -2786,11 +2073,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2786 if (bytes_read <= 0) 2073 if (bytes_read <= 0)
2787 goto finish; 2074 goto finish;
2788 temp = min((unsigned long)count, (unsigned long)bytes_read); 2075 temp = min((unsigned long)count, (unsigned long)bytes_read);
2789 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2076 if (idetape_copy_stage_to_user(tape, buf, temp))
2790 temp))
2791 ret = -EFAULT; 2077 ret = -EFAULT;
2792 actually_read += temp; 2078 actually_read += temp;
2793 tape->merge_stage_size = bytes_read-temp; 2079 tape->merge_bh_size = bytes_read-temp;
2794 } 2080 }
2795finish: 2081finish:
2796 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { 2082 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
@@ -2821,17 +2107,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2821 /* Initialize write operation */ 2107 /* Initialize write operation */
2822 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 2108 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2823 if (tape->chrdev_dir == IDETAPE_DIR_READ) 2109 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2824 idetape_discard_read_pipeline(drive, 1); 2110 ide_tape_discard_merge_buffer(drive, 1);
2825 if (tape->merge_stage || tape->merge_stage_size) { 2111 if (tape->merge_bh || tape->merge_bh_size) {
2826 printk(KERN_ERR "ide-tape: merge_stage_size " 2112 printk(KERN_ERR "ide-tape: merge_bh_size "
2827 "should be 0 now\n"); 2113 "should be 0 now\n");
2828 tape->merge_stage_size = 0; 2114 tape->merge_bh_size = 0;
2829 } 2115 }
2830 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); 2116 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
2831 if (!tape->merge_stage) 2117 if (!tape->merge_bh)
2832 return -ENOMEM; 2118 return -ENOMEM;
2833 tape->chrdev_dir = IDETAPE_DIR_WRITE; 2119 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2834 idetape_init_merge_stage(tape); 2120 idetape_init_merge_buffer(tape);
2835 2121
2836 /* 2122 /*
2837 * Issue a write 0 command to ensure that DSC handshake is 2123 * Issue a write 0 command to ensure that DSC handshake is
@@ -2842,10 +2128,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2842 if (drive->dsc_overlap) { 2128 if (drive->dsc_overlap) {
2843 ssize_t retval = idetape_queue_rw_tail(drive, 2129 ssize_t retval = idetape_queue_rw_tail(drive,
2844 REQ_IDETAPE_WRITE, 0, 2130 REQ_IDETAPE_WRITE, 0,
2845 tape->merge_stage->bh); 2131 tape->merge_bh);
2846 if (retval < 0) { 2132 if (retval < 0) {
2847 __idetape_kfree_stage(tape->merge_stage); 2133 ide_tape_kfree_buffer(tape);
2848 tape->merge_stage = NULL; 2134 tape->merge_bh = NULL;
2849 tape->chrdev_dir = IDETAPE_DIR_NONE; 2135 tape->chrdev_dir = IDETAPE_DIR_NONE;
2850 return retval; 2136 return retval;
2851 } 2137 }
@@ -2853,49 +2139,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2853 } 2139 }
2854 if (count == 0) 2140 if (count == 0)
2855 return (0); 2141 return (0);
2856 if (tape->restart_speed_control_req) 2142 if (tape->merge_bh_size) {
2857 idetape_restart_speed_control(drive); 2143 if (tape->merge_bh_size >= tape->buffer_size) {
2858 if (tape->merge_stage_size) {
2859 if (tape->merge_stage_size >= tape->stage_size) {
2860 printk(KERN_ERR "ide-tape: bug: merge buf too big\n"); 2144 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2861 tape->merge_stage_size = 0; 2145 tape->merge_bh_size = 0;
2862 } 2146 }
2863 actually_written = min((unsigned int) 2147 actually_written = min((unsigned int)
2864 (tape->stage_size - tape->merge_stage_size), 2148 (tape->buffer_size - tape->merge_bh_size),
2865 (unsigned int)count); 2149 (unsigned int)count);
2866 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2150 if (idetape_copy_stage_from_user(tape, buf, actually_written))
2867 actually_written))
2868 ret = -EFAULT; 2151 ret = -EFAULT;
2869 buf += actually_written; 2152 buf += actually_written;
2870 tape->merge_stage_size += actually_written; 2153 tape->merge_bh_size += actually_written;
2871 count -= actually_written; 2154 count -= actually_written;
2872 2155
2873 if (tape->merge_stage_size == tape->stage_size) { 2156 if (tape->merge_bh_size == tape->buffer_size) {
2874 ssize_t retval; 2157 ssize_t retval;
2875 tape->merge_stage_size = 0; 2158 tape->merge_bh_size = 0;
2876 retval = idetape_add_chrdev_write_request(drive, ctl); 2159 retval = idetape_add_chrdev_write_request(drive, ctl);
2877 if (retval <= 0) 2160 if (retval <= 0)
2878 return (retval); 2161 return (retval);
2879 } 2162 }
2880 } 2163 }
2881 while (count >= tape->stage_size) { 2164 while (count >= tape->buffer_size) {
2882 ssize_t retval; 2165 ssize_t retval;
2883 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2166 if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
2884 tape->stage_size))
2885 ret = -EFAULT; 2167 ret = -EFAULT;
2886 buf += tape->stage_size; 2168 buf += tape->buffer_size;
2887 count -= tape->stage_size; 2169 count -= tape->buffer_size;
2888 retval = idetape_add_chrdev_write_request(drive, ctl); 2170 retval = idetape_add_chrdev_write_request(drive, ctl);
2889 actually_written += tape->stage_size; 2171 actually_written += tape->buffer_size;
2890 if (retval <= 0) 2172 if (retval <= 0)
2891 return (retval); 2173 return (retval);
2892 } 2174 }
2893 if (count) { 2175 if (count) {
2894 actually_written += count; 2176 actually_written += count;
2895 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2177 if (idetape_copy_stage_from_user(tape, buf, count))
2896 count))
2897 ret = -EFAULT; 2178 ret = -EFAULT;
2898 tape->merge_stage_size += count; 2179 tape->merge_bh_size += count;
2899 } 2180 }
2900 return ret ? ret : actually_written; 2181 return ret ? ret : actually_written;
2901} 2182}
@@ -2919,8 +2200,7 @@ static int idetape_write_filemark(ide_drive_t *drive)
2919 * 2200 *
2920 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support 2201 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2921 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also 2202 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2922 * usually not supported (it is supported in the rare case in which we crossed 2203 * usually not supported.
2923 * the filemark during our read-ahead pipelined operation mode).
2924 * 2204 *
2925 * The following commands are currently not supported: 2205 * The following commands are currently not supported:
2926 * 2206 *
@@ -2936,7 +2216,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2936 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n", 2216 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2937 mt_op, mt_count); 2217 mt_op, mt_count);
2938 2218
2939 /* Commands which need our pipelined read-ahead stages. */
2940 switch (mt_op) { 2219 switch (mt_op) {
2941 case MTFSF: 2220 case MTFSF:
2942 case MTFSFM: 2221 case MTFSFM:
@@ -2953,7 +2232,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2953 case MTWEOF: 2232 case MTWEOF:
2954 if (tape->write_prot) 2233 if (tape->write_prot)
2955 return -EACCES; 2234 return -EACCES;
2956 idetape_discard_read_pipeline(drive, 1); 2235 ide_tape_discard_merge_buffer(drive, 1);
2957 for (i = 0; i < mt_count; i++) { 2236 for (i = 0; i < mt_count; i++) {
2958 retval = idetape_write_filemark(drive); 2237 retval = idetape_write_filemark(drive);
2959 if (retval) 2238 if (retval)
@@ -2961,12 +2240,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2961 } 2240 }
2962 return 0; 2241 return 0;
2963 case MTREW: 2242 case MTREW:
2964 idetape_discard_read_pipeline(drive, 0); 2243 ide_tape_discard_merge_buffer(drive, 0);
2965 if (idetape_rewind_tape(drive)) 2244 if (idetape_rewind_tape(drive))
2966 return -EIO; 2245 return -EIO;
2967 return 0; 2246 return 0;
2968 case MTLOAD: 2247 case MTLOAD:
2969 idetape_discard_read_pipeline(drive, 0); 2248 ide_tape_discard_merge_buffer(drive, 0);
2970 idetape_create_load_unload_cmd(drive, &pc, 2249 idetape_create_load_unload_cmd(drive, &pc,
2971 IDETAPE_LU_LOAD_MASK); 2250 IDETAPE_LU_LOAD_MASK);
2972 return idetape_queue_pc_tail(drive, &pc); 2251 return idetape_queue_pc_tail(drive, &pc);
@@ -2981,7 +2260,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2981 if (!idetape_queue_pc_tail(drive, &pc)) 2260 if (!idetape_queue_pc_tail(drive, &pc))
2982 tape->door_locked = DOOR_UNLOCKED; 2261 tape->door_locked = DOOR_UNLOCKED;
2983 } 2262 }
2984 idetape_discard_read_pipeline(drive, 0); 2263 ide_tape_discard_merge_buffer(drive, 0);
2985 idetape_create_load_unload_cmd(drive, &pc, 2264 idetape_create_load_unload_cmd(drive, &pc,
2986 !IDETAPE_LU_LOAD_MASK); 2265 !IDETAPE_LU_LOAD_MASK);
2987 retval = idetape_queue_pc_tail(drive, &pc); 2266 retval = idetape_queue_pc_tail(drive, &pc);
@@ -2989,10 +2268,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2989 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 2268 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2990 return retval; 2269 return retval;
2991 case MTNOP: 2270 case MTNOP:
2992 idetape_discard_read_pipeline(drive, 0); 2271 ide_tape_discard_merge_buffer(drive, 0);
2993 return idetape_flush_tape_buffers(drive); 2272 return idetape_flush_tape_buffers(drive);
2994 case MTRETEN: 2273 case MTRETEN:
2995 idetape_discard_read_pipeline(drive, 0); 2274 ide_tape_discard_merge_buffer(drive, 0);
2996 idetape_create_load_unload_cmd(drive, &pc, 2275 idetape_create_load_unload_cmd(drive, &pc,
2997 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK); 2276 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2998 return idetape_queue_pc_tail(drive, &pc); 2277 return idetape_queue_pc_tail(drive, &pc);
@@ -3014,11 +2293,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
3014 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2293 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
3015 return 0; 2294 return 0;
3016 case MTSEEK: 2295 case MTSEEK:
3017 idetape_discard_read_pipeline(drive, 0); 2296 ide_tape_discard_merge_buffer(drive, 0);
3018 return idetape_position_tape(drive, 2297 return idetape_position_tape(drive,
3019 mt_count * tape->user_bs_factor, tape->partition, 0); 2298 mt_count * tape->user_bs_factor, tape->partition, 0);
3020 case MTSETPART: 2299 case MTSETPART:
3021 idetape_discard_read_pipeline(drive, 0); 2300 ide_tape_discard_merge_buffer(drive, 0);
3022 return idetape_position_tape(drive, 0, mt_count, 0); 2301 return idetape_position_tape(drive, 0, mt_count, 0);
3023 case MTFSR: 2302 case MTFSR:
3024 case MTBSR: 2303 case MTBSR:
@@ -3063,13 +2342,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3063 2342
3064 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd); 2343 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
3065 2344
3066 tape->restart_speed_control_req = 1;
3067 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { 2345 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
3068 idetape_empty_write_pipeline(drive); 2346 ide_tape_flush_merge_buffer(drive);
3069 idetape_flush_tape_buffers(drive); 2347 idetape_flush_tape_buffers(drive);
3070 } 2348 }
3071 if (cmd == MTIOCGET || cmd == MTIOCPOS) { 2349 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
3072 block_offset = idetape_pipeline_size(drive) / 2350 block_offset = tape->merge_bh_size /
3073 (tape->blk_size * tape->user_bs_factor); 2351 (tape->blk_size * tape->user_bs_factor);
3074 position = idetape_read_position(drive); 2352 position = idetape_read_position(drive);
3075 if (position < 0) 2353 if (position < 0)
@@ -3101,7 +2379,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3101 return 0; 2379 return 0;
3102 default: 2380 default:
3103 if (tape->chrdev_dir == IDETAPE_DIR_READ) 2381 if (tape->chrdev_dir == IDETAPE_DIR_READ)
3104 idetape_discard_read_pipeline(drive, 1); 2382 ide_tape_discard_merge_buffer(drive, 1);
3105 return idetape_blkdev_ioctl(drive, cmd, arg); 2383 return idetape_blkdev_ioctl(drive, cmd, arg);
3106 } 2384 }
3107} 2385}
@@ -3175,9 +2453,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
3175 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) 2453 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
3176 (void)idetape_rewind_tape(drive); 2454 (void)idetape_rewind_tape(drive);
3177 2455
3178 if (tape->chrdev_dir != IDETAPE_DIR_READ)
3179 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
3180
3181 /* Read block size and write protect status from drive. */ 2456 /* Read block size and write protect status from drive. */
3182 ide_tape_get_bsize_from_bdesc(drive); 2457 ide_tape_get_bsize_from_bdesc(drive);
3183 2458
@@ -3206,8 +2481,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
3206 } 2481 }
3207 } 2482 }
3208 } 2483 }
3209 idetape_restart_speed_control(drive);
3210 tape->restart_speed_control_req = 0;
3211 return 0; 2484 return 0;
3212 2485
3213out_put_tape: 2486out_put_tape:
@@ -3219,13 +2492,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
3219{ 2492{
3220 idetape_tape_t *tape = drive->driver_data; 2493 idetape_tape_t *tape = drive->driver_data;
3221 2494
3222 idetape_empty_write_pipeline(drive); 2495 ide_tape_flush_merge_buffer(drive);
3223 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0); 2496 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
3224 if (tape->merge_stage != NULL) { 2497 if (tape->merge_bh != NULL) {
3225 idetape_pad_zeros(drive, tape->blk_size * 2498 idetape_pad_zeros(drive, tape->blk_size *
3226 (tape->user_bs_factor - 1)); 2499 (tape->user_bs_factor - 1));
3227 __idetape_kfree_stage(tape->merge_stage); 2500 ide_tape_kfree_buffer(tape);
3228 tape->merge_stage = NULL; 2501 tape->merge_bh = NULL;
3229 } 2502 }
3230 idetape_write_filemark(drive); 2503 idetape_write_filemark(drive);
3231 idetape_flush_tape_buffers(drive); 2504 idetape_flush_tape_buffers(drive);
@@ -3248,14 +2521,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
3248 idetape_write_release(drive, minor); 2521 idetape_write_release(drive, minor);
3249 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 2522 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
3250 if (minor < 128) 2523 if (minor < 128)
3251 idetape_discard_read_pipeline(drive, 1); 2524 ide_tape_discard_merge_buffer(drive, 1);
3252 else
3253 idetape_wait_for_pipeline(drive);
3254 }
3255 if (tape->cache_stage != NULL) {
3256 __idetape_kfree_stage(tape->cache_stage);
3257 tape->cache_stage = NULL;
3258 } 2525 }
2526
3259 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) 2527 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
3260 (void) idetape_rewind_tape(drive); 2528 (void) idetape_rewind_tape(drive);
3261 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 2529 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
@@ -3392,33 +2660,15 @@ static void idetape_add_settings(ide_drive_t *drive)
3392 2660
3393 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, 2661 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3394 1, 2, (u16 *)&tape->caps[16], NULL); 2662 1, 2, (u16 *)&tape->caps[16], NULL);
3395 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
3396 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3397 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
3398 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3399 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
3400 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3401 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
3402 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
3403 NULL);
3404 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
3405 0xffff, tape->stage_size / 1024, 1,
3406 &tape->nr_pending_stages, NULL);
3407 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, 2663 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3408 1, 1, (u16 *)&tape->caps[14], NULL); 2664 1, 1, (u16 *)&tape->caps[14], NULL);
3409 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 2665 ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff,
3410 1024, &tape->stage_size, NULL); 2666 1, 1024, &tape->buffer_size, NULL);
3411 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, 2667 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
3412 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq, 2668 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
3413 NULL); 2669 NULL);
3414 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 2670 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
3415 1, &drive->dsc_overlap, NULL); 2671 1, &drive->dsc_overlap, NULL);
3416 ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
3417 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
3418 NULL);
3419 ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
3420 0, 0xffff, 1, 1,
3421 &tape->uncontrolled_pipeline_head_speed, NULL);
3422 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 2672 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
3423 1, 1, &tape->avg_speed, NULL); 2673 1, 1, &tape->avg_speed, NULL);
3424 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 2674 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
@@ -3441,11 +2691,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3441 */ 2691 */
3442static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) 2692static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3443{ 2693{
3444 unsigned long t1, tmid, tn, t; 2694 unsigned long t;
3445 int speed; 2695 int speed;
3446 int stage_size; 2696 int buffer_size;
3447 u8 gcw[2]; 2697 u8 gcw[2];
3448 struct sysinfo si;
3449 u16 *ctl = (u16 *)&tape->caps[12]; 2698 u16 *ctl = (u16 *)&tape->caps[12];
3450 2699
3451 spin_lock_init(&tape->lock); 2700 spin_lock_init(&tape->lock);
@@ -3464,65 +2713,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3464 tape->name[2] = '0' + minor; 2713 tape->name[2] = '0' + minor;
3465 tape->chrdev_dir = IDETAPE_DIR_NONE; 2714 tape->chrdev_dir = IDETAPE_DIR_NONE;
3466 tape->pc = tape->pc_stack; 2715 tape->pc = tape->pc_stack;
3467 tape->max_insert_speed = 10000;
3468 tape->speed_control = 1;
3469 *((unsigned short *) &gcw) = drive->id->config; 2716 *((unsigned short *) &gcw) = drive->id->config;
3470 2717
3471 /* Command packet DRQ type */ 2718 /* Command packet DRQ type */
3472 if (((gcw[0] & 0x60) >> 5) == 1) 2719 if (((gcw[0] & 0x60) >> 5) == 1)
3473 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); 2720 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
3474 2721
3475 tape->min_pipeline = 10;
3476 tape->max_pipeline = 10;
3477 tape->max_stages = 10;
3478
3479 idetape_get_inquiry_results(drive); 2722 idetape_get_inquiry_results(drive);
3480 idetape_get_mode_sense_results(drive); 2723 idetape_get_mode_sense_results(drive);
3481 ide_tape_get_bsize_from_bdesc(drive); 2724 ide_tape_get_bsize_from_bdesc(drive);
3482 tape->user_bs_factor = 1; 2725 tape->user_bs_factor = 1;
3483 tape->stage_size = *ctl * tape->blk_size; 2726 tape->buffer_size = *ctl * tape->blk_size;
3484 while (tape->stage_size > 0xffff) { 2727 while (tape->buffer_size > 0xffff) {
3485 printk(KERN_NOTICE "ide-tape: decreasing stage size\n"); 2728 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3486 *ctl /= 2; 2729 *ctl /= 2;
3487 tape->stage_size = *ctl * tape->blk_size; 2730 tape->buffer_size = *ctl * tape->blk_size;
3488 } 2731 }
3489 stage_size = tape->stage_size; 2732 buffer_size = tape->buffer_size;
3490 tape->pages_per_stage = stage_size / PAGE_SIZE; 2733 tape->pages_per_buffer = buffer_size / PAGE_SIZE;
3491 if (stage_size % PAGE_SIZE) { 2734 if (buffer_size % PAGE_SIZE) {
3492 tape->pages_per_stage++; 2735 tape->pages_per_buffer++;
3493 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE; 2736 tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
3494 } 2737 }
3495 2738
3496 /* Select the "best" DSC read/write polling freq and pipeline size. */ 2739 /* select the "best" DSC read/write polling freq */
3497 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); 2740 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3498 2741
3499 tape->max_stages = speed * 1000 * 10 / tape->stage_size; 2742 t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
3500
3501 /* Limit memory use for pipeline to 10% of physical memory */
3502 si_meminfo(&si);
3503 if (tape->max_stages * tape->stage_size >
3504 si.totalram * si.mem_unit / 10)
3505 tape->max_stages =
3506 si.totalram * si.mem_unit / (10 * tape->stage_size);
3507
3508 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3509 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3510 tape->max_pipeline =
3511 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3512 if (tape->max_stages == 0) {
3513 tape->max_stages = 1;
3514 tape->min_pipeline = 1;
3515 tape->max_pipeline = 1;
3516 }
3517
3518 t1 = (tape->stage_size * HZ) / (speed * 1000);
3519 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3520 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3521
3522 if (tape->max_stages)
3523 t = tn;
3524 else
3525 t = t1;
3526 2743
3527 /* 2744 /*
3528 * Ensure that the number we got makes sense; limit it within 2745 * Ensure that the number we got makes sense; limit it within
@@ -3532,11 +2749,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3532 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), 2749 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3533 IDETAPE_DSC_RW_MIN); 2750 IDETAPE_DSC_RW_MIN);
3534 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 2751 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3535 "%dkB pipeline, %lums tDSC%s\n", 2752 "%lums tDSC%s\n",
3536 drive->name, tape->name, *(u16 *)&tape->caps[14], 2753 drive->name, tape->name, *(u16 *)&tape->caps[14],
3537 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size, 2754 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
3538 tape->stage_size / 1024, 2755 tape->buffer_size / 1024,
3539 tape->max_stages * tape->stage_size / 1024,
3540 tape->best_dsc_rw_freq * 1000 / HZ, 2756 tape->best_dsc_rw_freq * 1000 / HZ,
3541 drive->using_dma ? ", DMA":""); 2757 drive->using_dma ? ", DMA":"");
3542 2758
@@ -3560,7 +2776,7 @@ static void ide_tape_release(struct kref *kref)
3560 ide_drive_t *drive = tape->drive; 2776 ide_drive_t *drive = tape->drive;
3561 struct gendisk *g = tape->disk; 2777 struct gendisk *g = tape->disk;
3562 2778
3563 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size); 2779 BUG_ON(tape->merge_bh_size);
3564 2780
3565 drive->dsc_overlap = 0; 2781 drive->dsc_overlap = 0;
3566 drive->driver_data = NULL; 2782 drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 155cc904f4eb..9f9ad9fb6b89 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -36,6 +36,7 @@
36void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 36void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
37{ 37{
38 ide_hwif_t *hwif = drive->hwif; 38 ide_hwif_t *hwif = drive->hwif;
39 struct ide_io_ports *io_ports = &hwif->io_ports;
39 struct ide_taskfile *tf = &task->tf; 40 struct ide_taskfile *tf = &task->tf;
40 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; 41 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
41 42
@@ -59,34 +60,33 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
59 SELECT_MASK(drive, 0); 60 SELECT_MASK(drive, 0);
60 61
61 if (task->tf_flags & IDE_TFLAG_OUT_DATA) 62 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
62 hwif->OUTW((tf->hob_data << 8) | tf->data, 63 hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
63 hwif->io_ports[IDE_DATA_OFFSET]);
64 64
65 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) 65 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
66 hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]); 66 hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
67 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) 67 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
68 hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); 68 hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
69 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) 69 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
70 hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); 70 hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
71 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) 71 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
72 hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]); 72 hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
73 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) 73 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
74 hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]); 74 hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
75 75
76 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) 76 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
77 hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]); 77 hwif->OUTB(tf->feature, io_ports->feature_addr);
78 if (task->tf_flags & IDE_TFLAG_OUT_NSECT) 78 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
79 hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); 79 hwif->OUTB(tf->nsect, io_ports->nsect_addr);
80 if (task->tf_flags & IDE_TFLAG_OUT_LBAL) 80 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
81 hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); 81 hwif->OUTB(tf->lbal, io_ports->lbal_addr);
82 if (task->tf_flags & IDE_TFLAG_OUT_LBAM) 82 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
83 hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]); 83 hwif->OUTB(tf->lbam, io_ports->lbam_addr);
84 if (task->tf_flags & IDE_TFLAG_OUT_LBAH) 84 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
85 hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]); 85 hwif->OUTB(tf->lbah, io_ports->lbah_addr);
86 86
87 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) 87 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
88 hwif->OUTB((tf->device & HIHI) | drive->select.all, 88 hwif->OUTB((tf->device & HIHI) | drive->select.all,
89 hwif->io_ports[IDE_SELECT_OFFSET]); 89 io_ports->device_addr);
90} 90}
91 91
92int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) 92int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -135,6 +135,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
135 ide_hwif_t *hwif = HWIF(drive); 135 ide_hwif_t *hwif = HWIF(drive);
136 struct ide_taskfile *tf = &task->tf; 136 struct ide_taskfile *tf = &task->tf;
137 ide_handler_t *handler = NULL; 137 ide_handler_t *handler = NULL;
138 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
138 139
139 if (task->data_phase == TASKFILE_MULTI_IN || 140 if (task->data_phase == TASKFILE_MULTI_IN ||
140 task->data_phase == TASKFILE_MULTI_OUT) { 141 task->data_phase == TASKFILE_MULTI_OUT) {
@@ -154,8 +155,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
154 switch (task->data_phase) { 155 switch (task->data_phase) {
155 case TASKFILE_MULTI_OUT: 156 case TASKFILE_MULTI_OUT:
156 case TASKFILE_OUT: 157 case TASKFILE_OUT:
157 hwif->OUTBSYNC(drive, tf->command, 158 hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
158 hwif->io_ports[IDE_COMMAND_OFFSET]);
159 ndelay(400); /* FIXME */ 159 ndelay(400); /* FIXME */
160 return pre_task_out_intr(drive, task->rq); 160 return pre_task_out_intr(drive, task->rq);
161 case TASKFILE_MULTI_IN: 161 case TASKFILE_MULTI_IN:
@@ -178,10 +178,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
178 return ide_started; 178 return ide_started;
179 default: 179 default:
180 if (task_dma_ok(task) == 0 || drive->using_dma == 0 || 180 if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
181 hwif->dma_setup(drive)) 181 dma_ops->dma_setup(drive))
182 return ide_stopped; 182 return ide_stopped;
183 hwif->dma_exec_cmd(drive, tf->command); 183 dma_ops->dma_exec_cmd(drive, tf->command);
184 hwif->dma_start(drive); 184 dma_ops->dma_start(drive);
185 return ide_started; 185 return ide_started;
186 } 186 }
187} 187}
@@ -455,7 +455,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
455 455
456 /* Error? */ 456 /* Error? */
457 if (stat & ERR_STAT) 457 if (stat & ERR_STAT)
458 return task_error(drive, rq, __FUNCTION__, stat); 458 return task_error(drive, rq, __func__, stat);
459 459
460 /* Didn't want any data? Odd. */ 460 /* Didn't want any data? Odd. */
461 if (!(stat & DRQ_STAT)) 461 if (!(stat & DRQ_STAT))
@@ -467,7 +467,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
467 if (!hwif->nleft) { 467 if (!hwif->nleft) {
468 stat = wait_drive_not_busy(drive); 468 stat = wait_drive_not_busy(drive);
469 if (!OK_STAT(stat, 0, BAD_STAT)) 469 if (!OK_STAT(stat, 0, BAD_STAT))
470 return task_error(drive, rq, __FUNCTION__, stat); 470 return task_error(drive, rq, __func__, stat);
471 task_end_request(drive, rq, stat); 471 task_end_request(drive, rq, stat);
472 return ide_stopped; 472 return ide_stopped;
473 } 473 }
@@ -488,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
488 u8 stat = ide_read_status(drive); 488 u8 stat = ide_read_status(drive);
489 489
490 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 490 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
491 return task_error(drive, rq, __FUNCTION__, stat); 491 return task_error(drive, rq, __func__, stat);
492 492
493 /* Deal with unexpected ATA data phase. */ 493 /* Deal with unexpected ATA data phase. */
494 if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft) 494 if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
495 return task_error(drive, rq, __FUNCTION__, stat); 495 return task_error(drive, rq, __func__, stat);
496 496
497 if (!hwif->nleft) { 497 if (!hwif->nleft) {
498 task_end_request(drive, rq, stat); 498 task_end_request(drive, rq, stat);
@@ -675,7 +675,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
675 /* (hs): give up if multcount is not set */ 675 /* (hs): give up if multcount is not set */
676 printk(KERN_ERR "%s: %s Multimode Write " \ 676 printk(KERN_ERR "%s: %s Multimode Write " \
677 "multcount is not set\n", 677 "multcount is not set\n",
678 drive->name, __FUNCTION__); 678 drive->name, __func__);
679 err = -EPERM; 679 err = -EPERM;
680 goto abort; 680 goto abort;
681 } 681 }
@@ -692,7 +692,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
692 /* (hs): give up if multcount is not set */ 692 /* (hs): give up if multcount is not set */
693 printk(KERN_ERR "%s: %s Multimode Read failure " \ 693 printk(KERN_ERR "%s: %s Multimode Read failure " \
694 "multcount is not set\n", 694 "multcount is not set\n",
695 drive->name, __FUNCTION__); 695 drive->name, __func__);
696 err = -EPERM; 696 err = -EPERM;
697 goto abort; 697 goto abort;
698 } 698 }
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 917c72dcd33d..999584c03d97 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -94,19 +94,8 @@ DEFINE_MUTEX(ide_cfg_mtx);
94 94
95int noautodma = 0; 95int noautodma = 0;
96 96
97#ifdef CONFIG_BLK_DEV_IDEACPI
98int ide_noacpi = 0;
99int ide_noacpitfs = 1;
100int ide_noacpionboot = 1;
101#endif
102
103/*
104 * This is declared extern in ide.h, for access by other IDE modules:
105 */
106ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ 97ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
107 98
108EXPORT_SYMBOL(ide_hwifs);
109
110static void ide_port_init_devices_data(ide_hwif_t *); 99static void ide_port_init_devices_data(ide_hwif_t *);
111 100
112/* 101/*
@@ -232,117 +221,6 @@ static int ide_system_bus_speed(void)
232 return pci_dev_present(pci_default) ? 33 : 50; 221 return pci_dev_present(pci_default) ? 33 : 50;
233} 222}
234 223
235ide_hwif_t * ide_find_port(unsigned long base)
236{
237 ide_hwif_t *hwif;
238 int i;
239
240 for (i = 0; i < MAX_HWIFS; i++) {
241 hwif = &ide_hwifs[i];
242 if (hwif->io_ports[IDE_DATA_OFFSET] == base)
243 goto found;
244 }
245
246 for (i = 0; i < MAX_HWIFS; i++) {
247 hwif = &ide_hwifs[i];
248 if (hwif->chipset == ide_unknown)
249 goto found;
250 }
251
252 hwif = NULL;
253found:
254 return hwif;
255}
256
257EXPORT_SYMBOL_GPL(ide_find_port);
258
259static struct resource* hwif_request_region(ide_hwif_t *hwif,
260 unsigned long addr, int num)
261{
262 struct resource *res = request_region(addr, num, hwif->name);
263
264 if (!res)
265 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
266 hwif->name, addr, addr+num-1);
267 return res;
268}
269
270/**
271 * ide_hwif_request_regions - request resources for IDE
272 * @hwif: interface to use
273 *
274 * Requests all the needed resources for an interface.
275 * Right now core IDE code does this work which is deeply wrong.
276 * MMIO leaves it to the controller driver,
277 * PIO will migrate this way over time.
278 */
279
280int ide_hwif_request_regions(ide_hwif_t *hwif)
281{
282 unsigned long addr;
283 unsigned int i;
284
285 if (hwif->mmio)
286 return 0;
287 addr = hwif->io_ports[IDE_CONTROL_OFFSET];
288 if (addr && !hwif_request_region(hwif, addr, 1))
289 goto control_region_busy;
290 hwif->straight8 = 0;
291 addr = hwif->io_ports[IDE_DATA_OFFSET];
292 if ((addr | 7) == hwif->io_ports[IDE_STATUS_OFFSET]) {
293 if (!hwif_request_region(hwif, addr, 8))
294 goto data_region_busy;
295 hwif->straight8 = 1;
296 return 0;
297 }
298 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
299 addr = hwif->io_ports[i];
300 if (!hwif_request_region(hwif, addr, 1)) {
301 while (--i)
302 release_region(addr, 1);
303 goto data_region_busy;
304 }
305 }
306 return 0;
307
308data_region_busy:
309 addr = hwif->io_ports[IDE_CONTROL_OFFSET];
310 if (addr)
311 release_region(addr, 1);
312control_region_busy:
313 /* If any errors are return, we drop the hwif interface. */
314 return -EBUSY;
315}
316
317/**
318 * ide_hwif_release_regions - free IDE resources
319 *
320 * Note that we only release the standard ports,
321 * and do not even try to handle any extra ports
322 * allocated for weird IDE interface chipsets.
323 *
324 * Note also that we don't yet handle mmio resources here. More
325 * importantly our caller should be doing this so we need to
326 * restructure this as a helper function for drivers.
327 */
328
329void ide_hwif_release_regions(ide_hwif_t *hwif)
330{
331 u32 i = 0;
332
333 if (hwif->mmio)
334 return;
335 if (hwif->io_ports[IDE_CONTROL_OFFSET])
336 release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
337 if (hwif->straight8) {
338 release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
339 return;
340 }
341 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
342 if (hwif->io_ports[i])
343 release_region(hwif->io_ports[i], 1);
344}
345
346void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) 224void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
347{ 225{
348 ide_hwgroup_t *hwgroup = hwif->hwgroup; 226 ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -409,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
409 287
410/** 288/**
411 * ide_unregister - free an IDE interface 289 * ide_unregister - free an IDE interface
412 * @index: index of interface (will change soon to a pointer) 290 * @hwif: IDE interface
413 * 291 *
414 * Perform the final unregister of an IDE interface. At the moment 292 * Perform the final unregister of an IDE interface. At the moment
415 * we don't refcount interfaces so this will also get split up. 293 * we don't refcount interfaces so this will also get split up.
@@ -429,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
429 * This is raving bonkers. 307 * This is raving bonkers.
430 */ 308 */
431 309
432void ide_unregister(unsigned int index) 310void ide_unregister(ide_hwif_t *hwif)
433{ 311{
434 ide_hwif_t *hwif, *g; 312 ide_hwif_t *g;
435 ide_hwgroup_t *hwgroup; 313 ide_hwgroup_t *hwgroup;
436 int irq_count = 0; 314 int irq_count = 0;
437 315
438 BUG_ON(index >= MAX_HWIFS);
439
440 BUG_ON(in_interrupt()); 316 BUG_ON(in_interrupt());
441 BUG_ON(irqs_disabled()); 317 BUG_ON(irqs_disabled());
442 mutex_lock(&ide_cfg_mtx); 318 mutex_lock(&ide_cfg_mtx);
443 spin_lock_irq(&ide_lock); 319 spin_lock_irq(&ide_lock);
444 hwif = &ide_hwifs[index];
445 if (!hwif->present) 320 if (!hwif->present)
446 goto abort; 321 goto abort;
447 __ide_port_unregister_devices(hwif); 322 __ide_port_unregister_devices(hwif);
@@ -479,12 +354,10 @@ void ide_unregister(unsigned int index)
479 spin_lock_irq(&ide_lock); 354 spin_lock_irq(&ide_lock);
480 355
481 if (hwif->dma_base) 356 if (hwif->dma_base)
482 (void)ide_release_dma(hwif); 357 ide_release_dma_engine(hwif);
483
484 ide_hwif_release_regions(hwif);
485 358
486 /* restore hwif data to pristine status */ 359 /* restore hwif data to pristine status */
487 ide_init_port_data(hwif, index); 360 ide_init_port_data(hwif, hwif->index);
488 361
489abort: 362abort:
490 spin_unlock_irq(&ide_lock); 363 spin_unlock_irq(&ide_lock);
@@ -495,9 +368,8 @@ EXPORT_SYMBOL(ide_unregister);
495 368
496void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 369void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
497{ 370{
498 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); 371 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
499 hwif->irq = hw->irq; 372 hwif->irq = hw->irq;
500 hwif->noprobe = 0;
501 hwif->chipset = hw->chipset; 373 hwif->chipset = hw->chipset;
502 hwif->gendev.parent = hw->dev; 374 hwif->gendev.parent = hw->dev;
503 hwif->ack_intr = hw->ack_intr; 375 hwif->ack_intr = hw->ack_intr;
@@ -588,7 +460,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
588 if (!drive->id || !(drive->id->capability & 1)) 460 if (!drive->id || !(drive->id->capability & 1))
589 goto out; 461 goto out;
590 462
591 if (hwif->dma_host_set == NULL) 463 if (hwif->dma_ops == NULL)
592 goto out; 464 goto out;
593 465
594 err = -EBUSY; 466 err = -EBUSY;
@@ -627,11 +499,14 @@ out:
627int set_pio_mode(ide_drive_t *drive, int arg) 499int set_pio_mode(ide_drive_t *drive, int arg)
628{ 500{
629 struct request rq; 501 struct request rq;
502 ide_hwif_t *hwif = drive->hwif;
503 const struct ide_port_ops *port_ops = hwif->port_ops;
630 504
631 if (arg < 0 || arg > 255) 505 if (arg < 0 || arg > 255)
632 return -EINVAL; 506 return -EINVAL;
633 507
634 if (drive->hwif->set_pio_mode == NULL) 508 if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
509 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
635 return -ENOSYS; 510 return -ENOSYS;
636 511
637 if (drive->special.b.set_tune) 512 if (drive->special.b.set_tune)
@@ -953,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
953 return 0; /* zero = nothing matched */ 828 return 0; /* zero = nothing matched */
954} 829}
955 830
956extern int probe_ali14xx;
957extern int probe_umc8672;
958extern int probe_dtc2278;
959extern int probe_ht6560b;
960extern int probe_qd65xx;
961extern int cmd640_vlb;
962extern int probe_4drives;
963
964static int __initdata is_chipset_set;
965
966/* 831/*
967 * ide_setup() gets called VERY EARLY during initialization, 832 * ide_setup() gets called VERY EARLY during initialization,
968 * to handle kernel "command line" strings beginning with "hdx=" or "ide". 833 * to handle kernel "command line" strings beginning with "hdx=" or "ide".
@@ -971,14 +836,12 @@ static int __initdata is_chipset_set;
971 */ 836 */
972static int __init ide_setup(char *s) 837static int __init ide_setup(char *s)
973{ 838{
974 int i, vals[3];
975 ide_hwif_t *hwif; 839 ide_hwif_t *hwif;
976 ide_drive_t *drive; 840 ide_drive_t *drive;
977 unsigned int hw, unit; 841 unsigned int hw, unit;
842 int vals[3];
978 const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1); 843 const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
979 const char max_hwif = '0' + (MAX_HWIFS - 1);
980 844
981
982 if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */ 845 if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */
983 return 0; /* driver and not us */ 846 return 0; /* driver and not us */
984 847
@@ -994,7 +857,7 @@ static int __init ide_setup(char *s)
994 857
995 printk(" : Enabled support for IDE doublers\n"); 858 printk(" : Enabled support for IDE doublers\n");
996 ide_doubler = 1; 859 ide_doubler = 1;
997 return 1; 860 goto obsolete_option;
998 } 861 }
999#endif /* CONFIG_BLK_DEV_IDEDOUBLER */ 862#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
1000 863
@@ -1008,17 +871,17 @@ static int __init ide_setup(char *s)
1008 if (!strcmp(s, "ide=noacpi")) { 871 if (!strcmp(s, "ide=noacpi")) {
1009 //printk(" : Disable IDE ACPI support.\n"); 872 //printk(" : Disable IDE ACPI support.\n");
1010 ide_noacpi = 1; 873 ide_noacpi = 1;
1011 return 1; 874 goto obsolete_option;
1012 } 875 }
1013 if (!strcmp(s, "ide=acpigtf")) { 876 if (!strcmp(s, "ide=acpigtf")) {
1014 //printk(" : Enable IDE ACPI _GTF support.\n"); 877 //printk(" : Enable IDE ACPI _GTF support.\n");
1015 ide_noacpitfs = 0; 878 ide_acpigtf = 1;
1016 return 1; 879 goto obsolete_option;
1017 } 880 }
1018 if (!strcmp(s, "ide=acpionboot")) { 881 if (!strcmp(s, "ide=acpionboot")) {
1019 //printk(" : Call IDE ACPI methods on boot.\n"); 882 //printk(" : Call IDE ACPI methods on boot.\n");
1020 ide_noacpionboot = 0; 883 ide_acpionboot = 1;
1021 return 1; 884 goto obsolete_option;
1022 } 885 }
1023#endif /* CONFIG_BLK_DEV_IDEACPI */ 886#endif /* CONFIG_BLK_DEV_IDEACPI */
1024 887
@@ -1028,7 +891,7 @@ static int __init ide_setup(char *s)
1028 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { 891 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
1029 const char *hd_words[] = { 892 const char *hd_words[] = {
1030 "none", "noprobe", "nowerr", "cdrom", "nodma", 893 "none", "noprobe", "nowerr", "cdrom", "nodma",
1031 "autotune", "noautotune", "-8", "-9", "-10", 894 "-6", "-7", "-8", "-9", "-10",
1032 "noflush", "remap", "remap63", "scsi", NULL }; 895 "noflush", "remap", "remap63", "scsi", NULL };
1033 unit = s[2] - 'a'; 896 unit = s[2] - 'a';
1034 hw = unit / MAX_DRIVES; 897 hw = unit / MAX_DRIVES;
@@ -1043,30 +906,22 @@ static int __init ide_setup(char *s)
1043 case -1: /* "none" */ 906 case -1: /* "none" */
1044 case -2: /* "noprobe" */ 907 case -2: /* "noprobe" */
1045 drive->noprobe = 1; 908 drive->noprobe = 1;
1046 goto done; 909 goto obsolete_option;
1047 case -3: /* "nowerr" */ 910 case -3: /* "nowerr" */
1048 drive->bad_wstat = BAD_R_STAT; 911 drive->bad_wstat = BAD_R_STAT;
1049 hwif->noprobe = 0; 912 goto obsolete_option;
1050 goto done;
1051 case -4: /* "cdrom" */ 913 case -4: /* "cdrom" */
1052 drive->present = 1; 914 drive->present = 1;
1053 drive->media = ide_cdrom; 915 drive->media = ide_cdrom;
1054 /* an ATAPI device ignores DRDY */ 916 /* an ATAPI device ignores DRDY */
1055 drive->ready_stat = 0; 917 drive->ready_stat = 0;
1056 hwif->noprobe = 0; 918 goto obsolete_option;
1057 goto done;
1058 case -5: /* nodma */ 919 case -5: /* nodma */
1059 drive->nodma = 1; 920 drive->nodma = 1;
1060 goto done;
1061 case -6: /* "autotune" */
1062 drive->autotune = IDE_TUNE_AUTO;
1063 goto obsolete_option;
1064 case -7: /* "noautotune" */
1065 drive->autotune = IDE_TUNE_NOAUTO;
1066 goto obsolete_option; 921 goto obsolete_option;
1067 case -11: /* noflush */ 922 case -11: /* noflush */
1068 drive->noflush = 1; 923 drive->noflush = 1;
1069 goto done; 924 goto obsolete_option;
1070 case -12: /* "remap" */ 925 case -12: /* "remap" */
1071 drive->remap_0_to_1 = 1; 926 drive->remap_0_to_1 = 1;
1072 goto obsolete_option; 927 goto obsolete_option;
@@ -1084,8 +939,7 @@ static int __init ide_setup(char *s)
1084 drive->sect = drive->bios_sect = vals[2]; 939 drive->sect = drive->bios_sect = vals[2];
1085 drive->present = 1; 940 drive->present = 1;
1086 drive->forced_geom = 1; 941 drive->forced_geom = 1;
1087 hwif->noprobe = 0; 942 goto obsolete_option;
1088 goto done;
1089 default: 943 default:
1090 goto bad_option; 944 goto bad_option;
1091 } 945 }
@@ -1103,126 +957,15 @@ static int __init ide_setup(char *s)
1103 idebus_parameter = vals[0]; 957 idebus_parameter = vals[0];
1104 } else 958 } else
1105 printk(" -- BAD BUS SPEED! Expected value from 20 to 66"); 959 printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
1106 goto done; 960 goto obsolete_option;
1107 } 961 }
1108 /*
1109 * Look for interface options: "idex="
1110 */
1111 if (s[3] >= '0' && s[3] <= max_hwif) {
1112 /*
1113 * Be VERY CAREFUL changing this: note hardcoded indexes below
1114 * (-8, -9, -10) are reserved to ease the hardcoding.
1115 */
1116 static const char *ide_words[] = {
1117 "minus1", "serialize", "minus3", "minus4",
1118 "reset", "minus6", "ata66", "minus8", "minus9",
1119 "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
1120 "dtc2278", "umc8672", "ali14xx", NULL };
1121
1122 hw = s[3] - '0';
1123 hwif = &ide_hwifs[hw];
1124 i = match_parm(&s[4], ide_words, vals, 3);
1125 962
1126 /*
1127 * Cryptic check to ensure chipset not already set for hwif.
1128 * Note: we can't depend on hwif->chipset here.
1129 */
1130 if (i >= -18 && i <= -11) {
1131 /* chipset already specified */
1132 if (is_chipset_set)
1133 goto bad_option;
1134 /* these drivers are for "ide0=" only */
1135 if (hw != 0)
1136 goto bad_hwif;
1137 is_chipset_set = 1;
1138 printk("\n");
1139 }
1140
1141 switch (i) {
1142#ifdef CONFIG_BLK_DEV_ALI14XX
1143 case -17: /* "ali14xx" */
1144 probe_ali14xx = 1;
1145 goto obsolete_option;
1146#endif
1147#ifdef CONFIG_BLK_DEV_UMC8672
1148 case -16: /* "umc8672" */
1149 probe_umc8672 = 1;
1150 goto obsolete_option;
1151#endif
1152#ifdef CONFIG_BLK_DEV_DTC2278
1153 case -15: /* "dtc2278" */
1154 probe_dtc2278 = 1;
1155 goto obsolete_option;
1156#endif
1157#ifdef CONFIG_BLK_DEV_CMD640
1158 case -14: /* "cmd640_vlb" */
1159 cmd640_vlb = 1;
1160 goto obsolete_option;
1161#endif
1162#ifdef CONFIG_BLK_DEV_HT6560B
1163 case -13: /* "ht6560b" */
1164 probe_ht6560b = 1;
1165 goto obsolete_option;
1166#endif
1167#ifdef CONFIG_BLK_DEV_QD65XX
1168 case -12: /* "qd65xx" */
1169 probe_qd65xx = 1;
1170 goto obsolete_option;
1171#endif
1172#ifdef CONFIG_BLK_DEV_4DRIVES
1173 case -11: /* "four" drives on one set of ports */
1174 probe_4drives = 1;
1175 goto obsolete_option;
1176#endif
1177 case -10: /* minus10 */
1178 case -9: /* minus9 */
1179 case -8: /* minus8 */
1180 case -6:
1181 case -4:
1182 case -3:
1183 goto bad_option;
1184 case -7: /* ata66 */
1185#ifdef CONFIG_BLK_DEV_IDEPCI
1186 /*
1187 * Use ATA_CBL_PATA40_SHORT so drive side
1188 * cable detection is also overriden.
1189 */
1190 hwif->cbl = ATA_CBL_PATA40_SHORT;
1191 goto obsolete_option;
1192#else
1193 goto bad_hwif;
1194#endif
1195 case -5: /* "reset" */
1196 hwif->reset = 1;
1197 goto obsolete_option;
1198 case -2: /* "serialize" */
1199 hwif->mate = &ide_hwifs[hw^1];
1200 hwif->mate->mate = hwif;
1201 hwif->serialized = hwif->mate->serialized = 1;
1202 goto obsolete_option;
1203
1204 case -1:
1205 case 0:
1206 case 1:
1207 case 2:
1208 case 3:
1209 goto bad_option;
1210 default:
1211 printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
1212 return 1;
1213 }
1214 }
1215bad_option: 963bad_option:
1216 printk(" -- BAD OPTION\n"); 964 printk(" -- BAD OPTION\n");
1217 return 1; 965 return 1;
1218obsolete_option: 966obsolete_option:
1219 printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n"); 967 printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n");
1220 return 1; 968 return 1;
1221bad_hwif:
1222 printk("-- NOT SUPPORTED ON ide%d", hw);
1223done:
1224 printk("\n");
1225 return 1;
1226} 969}
1227 970
1228EXPORT_SYMBOL(ide_lock); 971EXPORT_SYMBOL(ide_lock);
@@ -1358,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev)
1358 put_device(&hwif->gendev); 1101 put_device(&hwif->gendev);
1359} 1102}
1360 1103
1104int ide_vlb_clk;
1105EXPORT_SYMBOL_GPL(ide_vlb_clk);
1106
1107module_param_named(vlb_clock, ide_vlb_clk, int, 0);
1108MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
1109
1110int ide_pci_clk;
1111EXPORT_SYMBOL_GPL(ide_pci_clk);
1112
1113module_param_named(pci_clock, ide_pci_clk, int, 0);
1114MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
1115
1116static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
1117{
1118 int a, b, i, j = 1;
1119 unsigned int *dev_param_mask = (unsigned int *)kp->arg;
1120
1121 if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
1122 sscanf(s, "%d.%d", &a, &b) != 2)
1123 return -EINVAL;
1124
1125 i = a * MAX_DRIVES + b;
1126
1127 if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
1128 return -EINVAL;
1129
1130 if (j)
1131 *dev_param_mask |= (1 << i);
1132 else
1133 *dev_param_mask &= (1 << i);
1134
1135 return 0;
1136}
1137
1138static unsigned int ide_nodma;
1139
1140module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
1141MODULE_PARM_DESC(nodma, "disallow DMA for a device");
1142
1143static unsigned int ide_noflush;
1144
1145module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
1146MODULE_PARM_DESC(noflush, "disable flush requests for a device");
1147
1148static unsigned int ide_noprobe;
1149
1150module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
1151MODULE_PARM_DESC(noprobe, "skip probing for a device");
1152
1153static unsigned int ide_nowerr;
1154
1155module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
1156MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device");
1157
1158static unsigned int ide_cdroms;
1159
1160module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
1161MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
1162
1163struct chs_geom {
1164 unsigned int cyl;
1165 u8 head;
1166 u8 sect;
1167};
1168
1169static unsigned int ide_disks;
1170static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
1171
1172static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
1173{
1174 int a, b, c = 0, h = 0, s = 0, i, j = 1;
1175
1176 if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
1177 sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
1178 return -EINVAL;
1179
1180 i = a * MAX_DRIVES + b;
1181
1182 if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
1183 return -EINVAL;
1184
1185 if (c > INT_MAX || h > 255 || s > 255)
1186 return -EINVAL;
1187
1188 if (j)
1189 ide_disks |= (1 << i);
1190 else
1191 ide_disks &= (1 << i);
1192
1193 ide_disks_chs[i].cyl = c;
1194 ide_disks_chs[i].head = h;
1195 ide_disks_chs[i].sect = s;
1196
1197 return 0;
1198}
1199
1200module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
1201MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
1202
1203static void ide_dev_apply_params(ide_drive_t *drive)
1204{
1205 int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit;
1206
1207 if (ide_nodma & (1 << i)) {
1208 printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
1209 drive->nodma = 1;
1210 }
1211 if (ide_noflush & (1 << i)) {
1212 printk(KERN_INFO "ide: disabling flush requests for %s\n",
1213 drive->name);
1214 drive->noflush = 1;
1215 }
1216 if (ide_noprobe & (1 << i)) {
1217 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
1218 drive->noprobe = 1;
1219 }
1220 if (ide_nowerr & (1 << i)) {
1221 printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n",
1222 drive->name);
1223 drive->bad_wstat = BAD_R_STAT;
1224 }
1225 if (ide_cdroms & (1 << i)) {
1226 printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
1227 drive->present = 1;
1228 drive->media = ide_cdrom;
1229 /* an ATAPI device ignores DRDY */
1230 drive->ready_stat = 0;
1231 }
1232 if (ide_disks & (1 << i)) {
1233 drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
1234 drive->head = drive->bios_head = ide_disks_chs[i].head;
1235 drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
1236 drive->forced_geom = 1;
1237 printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
1238 drive->name,
1239 drive->cyl, drive->head, drive->sect);
1240 drive->present = 1;
1241 drive->media = ide_disk;
1242 drive->ready_stat = READY_STAT;
1243 }
1244}
1245
1246static unsigned int ide_ignore_cable;
1247
1248static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
1249{
1250 int i, j = 1;
1251
1252 if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
1253 return -EINVAL;
1254
1255 if (i >= MAX_HWIFS || j < 0 || j > 1)
1256 return -EINVAL;
1257
1258 if (j)
1259 ide_ignore_cable |= (1 << i);
1260 else
1261 ide_ignore_cable &= (1 << i);
1262
1263 return 0;
1264}
1265
1266module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
1267MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
1268
1269void ide_port_apply_params(ide_hwif_t *hwif)
1270{
1271 int i;
1272
1273 if (ide_ignore_cable & (1 << hwif->index)) {
1274 printk(KERN_INFO "ide: ignoring cable detection for %s\n",
1275 hwif->name);
1276 hwif->cbl = ATA_CBL_PATA40_SHORT;
1277 }
1278
1279 for (i = 0; i < MAX_DRIVES; i++)
1280 ide_dev_apply_params(&hwif->drives[i]);
1281}
1282
1361/* 1283/*
1362 * This is gets invoked once during initialization, to set *everything* up 1284 * This is gets invoked once during initialization, to set *everything* up
1363 */ 1285 */
@@ -1424,11 +1346,6 @@ int __init init_module (void)
1424 1346
1425void __exit cleanup_module (void) 1347void __exit cleanup_module (void)
1426{ 1348{
1427 int index;
1428
1429 for (index = 0; index < MAX_HWIFS; ++index)
1430 ide_unregister(index);
1431
1432 proc_ide_destroy(); 1349 proc_ide_destroy();
1433 1350
1434 class_destroy(ide_port_class); 1351 class_destroy(ide_port_class);
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index bc8b1f8de614..90c65cf97448 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -49,6 +49,8 @@
49 49
50#include <asm/io.h> 50#include <asm/io.h>
51 51
52#define DRV_NAME "ali14xx"
53
52/* port addresses for auto-detection */ 54/* port addresses for auto-detection */
53#define ALI_NUM_PORTS 4 55#define ALI_NUM_PORTS 4
54static const int ports[ALI_NUM_PORTS] __initdata = 56static const int ports[ALI_NUM_PORTS] __initdata =
@@ -86,7 +88,7 @@ static u8 regOff; /* output to base port to close registers */
86/* 88/*
87 * Read a controller register. 89 * Read a controller register.
88 */ 90 */
89static inline u8 inReg (u8 reg) 91static inline u8 inReg(u8 reg)
90{ 92{
91 outb_p(reg, regPort); 93 outb_p(reg, regPort);
92 return inb(dataPort); 94 return inb(dataPort);
@@ -95,7 +97,7 @@ static inline u8 inReg (u8 reg)
95/* 97/*
96 * Write a controller register. 98 * Write a controller register.
97 */ 99 */
98static void outReg (u8 data, u8 reg) 100static void outReg(u8 data, u8 reg)
99{ 101{
100 outb_p(reg, regPort); 102 outb_p(reg, regPort);
101 outb_p(data, dataPort); 103 outb_p(data, dataPort);
@@ -114,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
114 int time1, time2; 116 int time1, time2;
115 u8 param1, param2, param3, param4; 117 u8 param1, param2, param3, param4;
116 unsigned long flags; 118 unsigned long flags;
117 int bus_speed = system_bus_clock(); 119 int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
118 120
119 /* calculate timing, according to PIO mode */ 121 /* calculate timing, according to PIO mode */
120 time1 = ide_pio_cycle_time(drive, pio); 122 time1 = ide_pio_cycle_time(drive, pio);
@@ -143,7 +145,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
143/* 145/*
144 * Auto-detect the IDE controller port. 146 * Auto-detect the IDE controller port.
145 */ 147 */
146static int __init findPort (void) 148static int __init findPort(void)
147{ 149{
148 int i; 150 int i;
149 u8 t; 151 u8 t;
@@ -175,7 +177,8 @@ static int __init findPort (void)
175/* 177/*
176 * Initialize controller registers with default values. 178 * Initialize controller registers with default values.
177 */ 179 */
178static int __init initRegisters (void) { 180static int __init initRegisters(void)
181{
179 const RegInitializer *p; 182 const RegInitializer *p;
180 u8 t; 183 u8 t;
181 unsigned long flags; 184 unsigned long flags;
@@ -191,17 +194,20 @@ static int __init initRegisters (void) {
191 return t; 194 return t;
192} 195}
193 196
197static const struct ide_port_ops ali14xx_port_ops = {
198 .set_pio_mode = ali14xx_set_pio_mode,
199};
200
194static const struct ide_port_info ali14xx_port_info = { 201static const struct ide_port_info ali14xx_port_info = {
202 .name = DRV_NAME,
195 .chipset = ide_ali14xx, 203 .chipset = ide_ali14xx,
196 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, 204 .port_ops = &ali14xx_port_ops,
205 .host_flags = IDE_HFLAG_NO_DMA,
197 .pio_mask = ATA_PIO4, 206 .pio_mask = ATA_PIO4,
198}; 207};
199 208
200static int __init ali14xx_probe(void) 209static int __init ali14xx_probe(void)
201{ 210{
202 static u8 idx[4] = { 0, 1, 0xff, 0xff };
203 hw_regs_t hw[2];
204
205 printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n", 211 printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
206 basePort, regOn); 212 basePort, regOn);
207 213
@@ -211,26 +217,10 @@ static int __init ali14xx_probe(void)
211 return 1; 217 return 1;
212 } 218 }
213 219
214 memset(&hw, 0, sizeof(hw)); 220 return ide_legacy_device_add(&ali14xx_port_info, 0);
215
216 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
217 hw[0].irq = 14;
218
219 ide_std_init_ports(&hw[1], 0x170, 0x376);
220 hw[1].irq = 15;
221
222 ide_init_port_hw(&ide_hwifs[0], &hw[0]);
223 ide_init_port_hw(&ide_hwifs[1], &hw[1]);
224
225 ide_hwifs[0].set_pio_mode = &ali14xx_set_pio_mode;
226 ide_hwifs[1].set_pio_mode = &ali14xx_set_pio_mode;
227
228 ide_device_add(idx, &ali14xx_port_info);
229
230 return 0;
231} 221}
232 222
233int probe_ali14xx = 0; 223static int probe_ali14xx;
234 224
235module_param_named(probe, probe_ali14xx, bool, 0); 225module_param_named(probe, probe_ali14xx, bool, 0);
236MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); 226MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index fdd3791e465f..5c730e4dd735 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
102{ 102{
103 unsigned char ch; 103 unsigned char ch;
104 104
105 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 105 ch = z_readb(hwif->io_ports.irq_addr);
106 if (!(ch & 0x80)) 106 if (!(ch & 0x80))
107 return 0; 107 return 0;
108 return 1; 108 return 1;
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
112{ 112{
113 unsigned char ch; 113 unsigned char ch;
114 114
115 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 115 ch = z_readb(hwif->io_ports.irq_addr);
116 /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */ 116 /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */
117 z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]); 117 z_writeb(0, hwif->io_ports.irq_addr);
118 if (!(ch & 0x80)) 118 if (!(ch & 0x80))
119 return 0; 119 return 0;
120 return 1; 120 return 1;
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
128 128
129 memset(hw, 0, sizeof(*hw)); 129 memset(hw, 0, sizeof(*hw));
130 130
131 hw->io_ports[IDE_DATA_OFFSET] = base; 131 hw->io_ports.data_addr = base;
132 132
133 for (i = 1; i < 8; i++) 133 for (i = 1; i < 8; i++)
134 hw->io_ports[i] = base + 2 + i * 4; 134 hw->io_ports_array[i] = base + 2 + i * 4;
135 135
136 hw->io_ports[IDE_CONTROL_OFFSET] = ctl; 136 hw->io_ports.ctl_addr = ctl;
137 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 137 hw->io_ports.irq_addr = irq_port;
138 138
139 hw->irq = IRQ_AMIGA_PORTS; 139 hw->irq = IRQ_AMIGA_PORTS;
140 hw->ack_intr = ack_intr; 140 hw->ack_intr = ack_intr;
@@ -221,15 +221,13 @@ fail_base2:
221 221
222 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); 222 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
223 223
224 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 224 hwif = ide_find_port();
225 if (hwif) { 225 if (hwif) {
226 u8 index = hwif->index; 226 u8 index = hwif->index;
227 227
228 ide_init_port_data(hwif, index); 228 ide_init_port_data(hwif, index);
229 ide_init_port_hw(hwif, &hw); 229 ide_init_port_hw(hwif, &hw);
230 230
231 hwif->mmio = 1;
232
233 idx[i] = index; 231 idx[i] = index;
234 } 232 }
235 } 233 }
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 5f69cd2ea6f7..af791a02a120 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -16,6 +16,8 @@
16 16
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRV_NAME "dtc2278"
20
19/* 21/*
20 * Changing this #undef to #define may solve start up problems in some systems. 22 * Changing this #undef to #define may solve start up problems in some systems.
21 */ 23 */
@@ -86,30 +88,26 @@ static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
86 } 88 }
87} 89}
88 90
91static const struct ide_port_ops dtc2278_port_ops = {
92 .set_pio_mode = dtc2278_set_pio_mode,
93};
94
89static const struct ide_port_info dtc2278_port_info __initdata = { 95static const struct ide_port_info dtc2278_port_info __initdata = {
96 .name = DRV_NAME,
90 .chipset = ide_dtc2278, 97 .chipset = ide_dtc2278,
98 .port_ops = &dtc2278_port_ops,
91 .host_flags = IDE_HFLAG_SERIALIZE | 99 .host_flags = IDE_HFLAG_SERIALIZE |
92 IDE_HFLAG_NO_UNMASK_IRQS | 100 IDE_HFLAG_NO_UNMASK_IRQS |
93 IDE_HFLAG_IO_32BIT | 101 IDE_HFLAG_IO_32BIT |
94 /* disallow ->io_32bit changes */ 102 /* disallow ->io_32bit changes */
95 IDE_HFLAG_NO_IO_32BIT | 103 IDE_HFLAG_NO_IO_32BIT |
96 IDE_HFLAG_NO_DMA | 104 IDE_HFLAG_NO_DMA,
97 IDE_HFLAG_NO_AUTOTUNE,
98 .pio_mask = ATA_PIO4, 105 .pio_mask = ATA_PIO4,
99}; 106};
100 107
101static int __init dtc2278_probe(void) 108static int __init dtc2278_probe(void)
102{ 109{
103 unsigned long flags; 110 unsigned long flags;
104 ide_hwif_t *hwif, *mate;
105 static u8 idx[4] = { 0, 1, 0xff, 0xff };
106 hw_regs_t hw[2];
107
108 hwif = &ide_hwifs[0];
109 mate = &ide_hwifs[1];
110
111 if (hwif->chipset != ide_unknown || mate->chipset != ide_unknown)
112 return 1;
113 111
114 local_irq_save(flags); 112 local_irq_save(flags);
115 /* 113 /*
@@ -129,25 +127,10 @@ static int __init dtc2278_probe(void)
129#endif 127#endif
130 local_irq_restore(flags); 128 local_irq_restore(flags);
131 129
132 memset(&hw, 0, sizeof(hw)); 130 return ide_legacy_device_add(&dtc2278_port_info, 0);
133
134 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
135 hw[0].irq = 14;
136
137 ide_std_init_ports(&hw[1], 0x170, 0x376);
138 hw[1].irq = 15;
139
140 ide_init_port_hw(hwif, &hw[0]);
141 ide_init_port_hw(mate, &hw[1]);
142
143 hwif->set_pio_mode = &dtc2278_set_pio_mode;
144
145 ide_device_add(idx, &dtc2278_port_info);
146
147 return 0;
148} 131}
149 132
150int probe_dtc2278 = 0; 133static int probe_dtc2278;
151 134
152module_param_named(probe, probe_dtc2278, bool, 0); 135module_param_named(probe, probe_dtc2278, bool, 0);
153MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); 136MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index e950afa5939c..56cdaa0eeea5 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -22,6 +22,7 @@
22#include <asm/atariints.h> 22#include <asm/atariints.h>
23#include <asm/atari_stdma.h> 23#include <asm/atari_stdma.h>
24 24
25#define DRV_NAME "falconide"
25 26
26 /* 27 /*
27 * Base of the IDE interface 28 * Base of the IDE interface
@@ -49,12 +50,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
49 50
50 memset(hw, 0, sizeof(*hw)); 51 memset(hw, 0, sizeof(*hw));
51 52
52 hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE; 53 hw->io_ports.data_addr = ATA_HD_BASE;
53 54
54 for (i = 1; i < 8; i++) 55 for (i = 1; i < 8; i++)
55 hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4; 56 hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
56 57
57 hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL; 58 hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
58 59
59 hw->irq = IRQ_MFP_IDE; 60 hw->irq = IRQ_MFP_IDE;
60 hw->ack_intr = NULL; 61 hw->ack_intr = NULL;
@@ -74,9 +75,14 @@ static int __init falconide_init(void)
74 75
75 printk(KERN_INFO "ide: Falcon IDE controller\n"); 76 printk(KERN_INFO "ide: Falcon IDE controller\n");
76 77
78 if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) {
79 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
80 return -EBUSY;
81 }
82
77 falconide_setup_ports(&hw); 83 falconide_setup_ports(&hw);
78 84
79 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 85 hwif = ide_find_port();
80 if (hwif) { 86 if (hwif) {
81 u8 index = hwif->index; 87 u8 index = hwif->index;
82 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 88 u8 idx[4] = { index, 0xff, 0xff, 0xff };
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index e3b4638cc883..a9c2593a898c 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -63,6 +63,8 @@
63#define GAYLE_HAS_CONTROL_REG (!ide_doubler) 63#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
64#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) 64#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
65int ide_doubler = 0; /* support IDE doublers? */ 65int ide_doubler = 0; /* support IDE doublers? */
66module_param_named(doubler, ide_doubler, bool, 0);
67MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
66#endif /* CONFIG_BLK_DEV_IDEDOUBLER */ 68#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
67 69
68 70
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif)
74{ 76{
75 unsigned char ch; 77 unsigned char ch;
76 78
77 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 79 ch = z_readb(hwif->io_ports.irq_addr);
78 if (!(ch & GAYLE_IRQ_IDE)) 80 if (!(ch & GAYLE_IRQ_IDE))
79 return 0; 81 return 0;
80 return 1; 82 return 1;
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
84{ 86{
85 unsigned char ch; 87 unsigned char ch;
86 88
87 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 89 ch = z_readb(hwif->io_ports.irq_addr);
88 if (!(ch & GAYLE_IRQ_IDE)) 90 if (!(ch & GAYLE_IRQ_IDE))
89 return 0; 91 return 0;
90 (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]); 92 (void)z_readb(hwif->io_ports.status_addr);
91 z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]); 93 z_writeb(0x7c, hwif->io_ports.irq_addr);
92 return 1; 94 return 1;
93} 95}
94 96
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
100 102
101 memset(hw, 0, sizeof(*hw)); 103 memset(hw, 0, sizeof(*hw));
102 104
103 hw->io_ports[IDE_DATA_OFFSET] = base; 105 hw->io_ports.data_addr = base;
104 106
105 for (i = 1; i < 8; i++) 107 for (i = 1; i < 8; i++)
106 hw->io_ports[i] = base + 2 + i * 4; 108 hw->io_ports_array[i] = base + 2 + i * 4;
107 109
108 hw->io_ports[IDE_CONTROL_OFFSET] = ctl; 110 hw->io_ports.ctl_addr = ctl;
109 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 111 hw->io_ports.irq_addr = irq_port;
110 112
111 hw->irq = IRQ_AMIGA_PORTS; 113 hw->irq = IRQ_AMIGA_PORTS;
112 hw->ack_intr = ack_intr; 114 hw->ack_intr = ack_intr;
@@ -175,15 +177,13 @@ found:
175 177
176 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); 178 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
177 179
178 hwif = ide_find_port(base); 180 hwif = ide_find_port();
179 if (hwif) { 181 if (hwif) {
180 u8 index = hwif->index; 182 u8 index = hwif->index;
181 183
182 ide_init_port_data(hwif, index); 184 ide_init_port_data(hwif, index);
183 ide_init_port_hw(hwif, &hw); 185 ide_init_port_hw(hwif, &hw);
184 186
185 hwif->mmio = 1;
186
187 idx[i] = index; 187 idx[i] = index;
188 } else 188 } else
189 release_mem_region(res_start, res_n); 189 release_mem_region(res_start, res_n);
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 0b0d86731927..abdedf56643e 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -122,12 +122,12 @@ static int hd_error;
122 * This struct defines the HD's and their types. 122 * This struct defines the HD's and their types.
123 */ 123 */
124struct hd_i_struct { 124struct hd_i_struct {
125 unsigned int head,sect,cyl,wpcom,lzone,ctl; 125 unsigned int head, sect, cyl, wpcom, lzone, ctl;
126 int unit; 126 int unit;
127 int recalibrate; 127 int recalibrate;
128 int special_op; 128 int special_op;
129}; 129};
130 130
131#ifdef HD_TYPE 131#ifdef HD_TYPE
132static struct hd_i_struct hd_info[] = { HD_TYPE }; 132static struct hd_i_struct hd_info[] = { HD_TYPE };
133static int NR_HD = ARRAY_SIZE(hd_info); 133static int NR_HD = ARRAY_SIZE(hd_info);
@@ -168,7 +168,7 @@ unsigned long read_timer(void)
168 168
169 spin_lock_irqsave(&i8253_lock, flags); 169 spin_lock_irqsave(&i8253_lock, flags);
170 t = jiffies * 11932; 170 t = jiffies * 11932;
171 outb_p(0, 0x43); 171 outb_p(0, 0x43);
172 i = inb_p(0x40); 172 i = inb_p(0x40);
173 i |= inb(0x40) << 8; 173 i |= inb(0x40) << 8;
174 spin_unlock_irqrestore(&i8253_lock, flags); 174 spin_unlock_irqrestore(&i8253_lock, flags);
@@ -183,7 +183,7 @@ static void __init hd_setup(char *str, int *ints)
183 if (ints[0] != 3) 183 if (ints[0] != 3)
184 return; 184 return;
185 if (hd_info[0].head != 0) 185 if (hd_info[0].head != 0)
186 hdind=1; 186 hdind = 1;
187 hd_info[hdind].head = ints[2]; 187 hd_info[hdind].head = ints[2];
188 hd_info[hdind].sect = ints[3]; 188 hd_info[hdind].sect = ints[3];
189 hd_info[hdind].cyl = ints[1]; 189 hd_info[hdind].cyl = ints[1];
@@ -193,7 +193,7 @@ static void __init hd_setup(char *str, int *ints)
193 NR_HD = hdind+1; 193 NR_HD = hdind+1;
194} 194}
195 195
196static void dump_status (const char *msg, unsigned int stat) 196static void dump_status(const char *msg, unsigned int stat)
197{ 197{
198 char *name = "hd?"; 198 char *name = "hd?";
199 if (CURRENT) 199 if (CURRENT)
@@ -291,7 +291,6 @@ static int controller_ready(unsigned int drive, unsigned int head)
291 return 0; 291 return 0;
292} 292}
293 293
294
295static void hd_out(struct hd_i_struct *disk, 294static void hd_out(struct hd_i_struct *disk,
296 unsigned int nsect, 295 unsigned int nsect,
297 unsigned int sect, 296 unsigned int sect,
@@ -313,15 +312,15 @@ static void hd_out(struct hd_i_struct *disk,
313 return; 312 return;
314 } 313 }
315 SET_HANDLER(intr_addr); 314 SET_HANDLER(intr_addr);
316 outb_p(disk->ctl,HD_CMD); 315 outb_p(disk->ctl, HD_CMD);
317 port=HD_DATA; 316 port = HD_DATA;
318 outb_p(disk->wpcom>>2,++port); 317 outb_p(disk->wpcom >> 2, ++port);
319 outb_p(nsect,++port); 318 outb_p(nsect, ++port);
320 outb_p(sect,++port); 319 outb_p(sect, ++port);
321 outb_p(cyl,++port); 320 outb_p(cyl, ++port);
322 outb_p(cyl>>8,++port); 321 outb_p(cyl >> 8, ++port);
323 outb_p(0xA0|(disk->unit<<4)|head,++port); 322 outb_p(0xA0 | (disk->unit << 4) | head, ++port);
324 outb_p(cmd,++port); 323 outb_p(cmd, ++port);
325} 324}
326 325
327static void hd_request (void); 326static void hd_request (void);
@@ -344,14 +343,14 @@ static void reset_controller(void)
344{ 343{
345 int i; 344 int i;
346 345
347 outb_p(4,HD_CMD); 346 outb_p(4, HD_CMD);
348 for(i = 0; i < 1000; i++) barrier(); 347 for (i = 0; i < 1000; i++) barrier();
349 outb_p(hd_info[0].ctl & 0x0f,HD_CMD); 348 outb_p(hd_info[0].ctl & 0x0f, HD_CMD);
350 for(i = 0; i < 1000; i++) barrier(); 349 for (i = 0; i < 1000; i++) barrier();
351 if (drive_busy()) 350 if (drive_busy())
352 printk("hd: controller still busy\n"); 351 printk("hd: controller still busy\n");
353 else if ((hd_error = inb(HD_ERROR)) != 1) 352 else if ((hd_error = inb(HD_ERROR)) != 1)
354 printk("hd: controller reset failed: %02x\n",hd_error); 353 printk("hd: controller reset failed: %02x\n", hd_error);
355} 354}
356 355
357static void reset_hd(void) 356static void reset_hd(void)
@@ -371,8 +370,8 @@ repeat:
371 if (++i < NR_HD) { 370 if (++i < NR_HD) {
372 struct hd_i_struct *disk = &hd_info[i]; 371 struct hd_i_struct *disk = &hd_info[i];
373 disk->special_op = disk->recalibrate = 1; 372 disk->special_op = disk->recalibrate = 1;
374 hd_out(disk,disk->sect,disk->sect,disk->head-1, 373 hd_out(disk, disk->sect, disk->sect, disk->head-1,
375 disk->cyl,WIN_SPECIFY,&reset_hd); 374 disk->cyl, WIN_SPECIFY, &reset_hd);
376 if (reset) 375 if (reset)
377 goto repeat; 376 goto repeat;
378 } else 377 } else
@@ -393,7 +392,7 @@ static void unexpected_hd_interrupt(void)
393 unsigned int stat = inb_p(HD_STATUS); 392 unsigned int stat = inb_p(HD_STATUS);
394 393
395 if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) { 394 if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) {
396 dump_status ("unexpected interrupt", stat); 395 dump_status("unexpected interrupt", stat);
397 SET_TIMER; 396 SET_TIMER;
398 } 397 }
399} 398}
@@ -453,7 +452,7 @@ static void read_intr(void)
453 return; 452 return;
454ok_to_read: 453ok_to_read:
455 req = CURRENT; 454 req = CURRENT;
456 insw(HD_DATA,req->buffer,256); 455 insw(HD_DATA, req->buffer, 256);
457 req->sector++; 456 req->sector++;
458 req->buffer += 512; 457 req->buffer += 512;
459 req->errors = 0; 458 req->errors = 0;
@@ -507,7 +506,7 @@ ok_to_write:
507 end_request(req, 1); 506 end_request(req, 1);
508 if (i > 0) { 507 if (i > 0) {
509 SET_HANDLER(&write_intr); 508 SET_HANDLER(&write_intr);
510 outsw(HD_DATA,req->buffer,256); 509 outsw(HD_DATA, req->buffer, 256);
511 local_irq_enable(); 510 local_irq_enable();
512 } else { 511 } else {
513#if (HD_DELAY > 0) 512#if (HD_DELAY > 0)
@@ -560,11 +559,11 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
560{ 559{
561 if (disk->recalibrate) { 560 if (disk->recalibrate) {
562 disk->recalibrate = 0; 561 disk->recalibrate = 0;
563 hd_out(disk,disk->sect,0,0,0,WIN_RESTORE,&recal_intr); 562 hd_out(disk, disk->sect, 0, 0, 0, WIN_RESTORE, &recal_intr);
564 return reset; 563 return reset;
565 } 564 }
566 if (disk->head > 16) { 565 if (disk->head > 16) {
567 printk ("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
568 end_request(req, 0); 567 end_request(req, 0);
569 } 568 }
570 disk->special_op = 0; 569 disk->special_op = 0;
@@ -633,19 +632,21 @@ repeat:
633 if (blk_fs_request(req)) { 632 if (blk_fs_request(req)) {
634 switch (rq_data_dir(req)) { 633 switch (rq_data_dir(req)) {
635 case READ: 634 case READ:
636 hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr); 635 hd_out(disk, nsect, sec, head, cyl, WIN_READ,
636 &read_intr);
637 if (reset) 637 if (reset)
638 goto repeat; 638 goto repeat;
639 break; 639 break;
640 case WRITE: 640 case WRITE:
641 hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr); 641 hd_out(disk, nsect, sec, head, cyl, WIN_WRITE,
642 &write_intr);
642 if (reset) 643 if (reset)
643 goto repeat; 644 goto repeat;
644 if (wait_DRQ()) { 645 if (wait_DRQ()) {
645 bad_rw_intr(); 646 bad_rw_intr();
646 goto repeat; 647 goto repeat;
647 } 648 }
648 outsw(HD_DATA,req->buffer,256); 649 outsw(HD_DATA, req->buffer, 256);
649 break; 650 break;
650 default: 651 default:
651 printk("unknown hd-command\n"); 652 printk("unknown hd-command\n");
@@ -655,7 +656,7 @@ repeat:
655 } 656 }
656} 657}
657 658
658static void do_hd_request (struct request_queue * q) 659static void do_hd_request(struct request_queue *q)
659{ 660{
660 disable_irq(HD_IRQ); 661 disable_irq(HD_IRQ);
661 hd_request(); 662 hd_request();
@@ -708,12 +709,12 @@ static int __init hd_init(void)
708{ 709{
709 int drive; 710 int drive;
710 711
711 if (register_blkdev(MAJOR_NR,"hd")) 712 if (register_blkdev(MAJOR_NR, "hd"))
712 return -1; 713 return -1;
713 714
714 hd_queue = blk_init_queue(do_hd_request, &hd_lock); 715 hd_queue = blk_init_queue(do_hd_request, &hd_lock);
715 if (!hd_queue) { 716 if (!hd_queue) {
716 unregister_blkdev(MAJOR_NR,"hd"); 717 unregister_blkdev(MAJOR_NR, "hd");
717 return -ENOMEM; 718 return -ENOMEM;
718 } 719 }
719 720
@@ -742,7 +743,7 @@ static int __init hd_init(void)
742 goto out; 743 goto out;
743 } 744 }
744 745
745 for (drive=0 ; drive < NR_HD ; drive++) { 746 for (drive = 0 ; drive < NR_HD ; drive++) {
746 struct gendisk *disk = alloc_disk(64); 747 struct gendisk *disk = alloc_disk(64);
747 struct hd_i_struct *p = &hd_info[drive]; 748 struct hd_i_struct *p = &hd_info[drive];
748 if (!disk) 749 if (!disk)
@@ -756,7 +757,7 @@ static int __init hd_init(void)
756 disk->queue = hd_queue; 757 disk->queue = hd_queue;
757 p->unit = drive; 758 p->unit = drive;
758 hd_gendisk[drive] = disk; 759 hd_gendisk[drive] = disk;
759 printk ("%s: %luMB, CHS=%d/%d/%d\n", 760 printk("%s: %luMB, CHS=%d/%d/%d\n",
760 disk->disk_name, (unsigned long)get_capacity(disk)/2048, 761 disk->disk_name, (unsigned long)get_capacity(disk)/2048,
761 p->cyl, p->head, p->sect); 762 p->cyl, p->head, p->sect);
762 } 763 }
@@ -776,7 +777,7 @@ static int __init hd_init(void)
776 } 777 }
777 778
778 /* Let them fly */ 779 /* Let them fly */
779 for(drive=0; drive < NR_HD; drive++) 780 for (drive = 0; drive < NR_HD; drive++)
780 add_disk(hd_gendisk[drive]); 781 add_disk(hd_gendisk[drive]);
781 782
782 return 0; 783 return 0;
@@ -791,7 +792,7 @@ out1:
791 NR_HD = 0; 792 NR_HD = 0;
792out: 793out:
793 del_timer(&device_timer); 794 del_timer(&device_timer);
794 unregister_blkdev(MAJOR_NR,"hd"); 795 unregister_blkdev(MAJOR_NR, "hd");
795 blk_cleanup_queue(hd_queue); 796 blk_cleanup_queue(hd_queue);
796 return -1; 797 return -1;
797Enomem: 798Enomem:
@@ -800,7 +801,8 @@ Enomem:
800 goto out; 801 goto out;
801} 802}
802 803
803static int __init parse_hd_setup (char *line) { 804static int __init parse_hd_setup(char *line)
805{
804 int ints[6]; 806 int ints[6];
805 807
806 (void) get_options(line, ARRAY_SIZE(ints), ints); 808 (void) get_options(line, ARRAY_SIZE(ints), ints);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 88fe9070c9c3..4fe516df9f74 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -35,6 +35,7 @@
35 * Try: http://www.maf.iki.fi/~maf/ht6560b/ 35 * Try: http://www.maf.iki.fi/~maf/ht6560b/
36 */ 36 */
37 37
38#define DRV_NAME "ht6560b"
38#define HT6560B_VERSION "v0.08" 39#define HT6560B_VERSION "v0.08"
39 40
40#include <linux/module.h> 41#include <linux/module.h>
@@ -156,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive)
156 /* 157 /*
157 * Set timing for this drive: 158 * Set timing for this drive:
158 */ 159 */
159 outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]); 160 outb(timing, hwif->io_ports.device_addr);
160 (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]); 161 (void)inb(hwif->io_ports.status_addr);
161#ifdef DEBUG 162#ifdef DEBUG
162 printk("ht6560b: %s: select=%#x timing=%#x\n", 163 printk("ht6560b: %s: select=%#x timing=%#x\n",
163 drive->name, select, timing); 164 drive->name, select, timing);
@@ -211,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
211{ 212{
212 int active_time, recovery_time; 213 int active_time, recovery_time;
213 int active_cycles, recovery_cycles; 214 int active_cycles, recovery_cycles;
214 int bus_speed = system_bus_clock(); 215 int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
215 216
216 if (pio) { 217 if (pio) {
217 unsigned int cycle_time; 218 unsigned int cycle_time;
218 219
@@ -322,66 +323,44 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif)
322 hwif->drives[1].drive_data = t; 323 hwif->drives[1].drive_data = t;
323} 324}
324 325
325int probe_ht6560b = 0; 326static int probe_ht6560b;
326 327
327module_param_named(probe, probe_ht6560b, bool, 0); 328module_param_named(probe, probe_ht6560b, bool, 0);
328MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); 329MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
329 330
331static const struct ide_port_ops ht6560b_port_ops = {
332 .port_init_devs = ht6560b_port_init_devs,
333 .set_pio_mode = ht6560b_set_pio_mode,
334 .selectproc = ht6560b_selectproc,
335};
336
330static const struct ide_port_info ht6560b_port_info __initdata = { 337static const struct ide_port_info ht6560b_port_info __initdata = {
338 .name = DRV_NAME,
331 .chipset = ide_ht6560b, 339 .chipset = ide_ht6560b,
340 .port_ops = &ht6560b_port_ops,
332 .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */ 341 .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
333 IDE_HFLAG_NO_DMA | 342 IDE_HFLAG_NO_DMA |
334 IDE_HFLAG_NO_AUTOTUNE |
335 IDE_HFLAG_ABUSE_PREFETCH, 343 IDE_HFLAG_ABUSE_PREFETCH,
336 .pio_mask = ATA_PIO4, 344 .pio_mask = ATA_PIO4,
337}; 345};
338 346
339static int __init ht6560b_init(void) 347static int __init ht6560b_init(void)
340{ 348{
341 ide_hwif_t *hwif, *mate;
342 static u8 idx[4] = { 0, 1, 0xff, 0xff };
343 hw_regs_t hw[2];
344
345 if (probe_ht6560b == 0) 349 if (probe_ht6560b == 0)
346 return -ENODEV; 350 return -ENODEV;
347 351
348 hwif = &ide_hwifs[0]; 352 if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
349 mate = &ide_hwifs[1];
350
351 if (!request_region(HT_CONFIG_PORT, 1, hwif->name)) {
352 printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n", 353 printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
353 __FUNCTION__); 354 __func__);
354 return -ENODEV; 355 return -ENODEV;
355 } 356 }
356 357
357 if (!try_to_init_ht6560b()) { 358 if (!try_to_init_ht6560b()) {
358 printk(KERN_NOTICE "%s: HBA not found\n", __FUNCTION__); 359 printk(KERN_NOTICE "%s: HBA not found\n", __func__);
359 goto release_region; 360 goto release_region;
360 } 361 }
361 362
362 memset(&hw, 0, sizeof(hw)); 363 return ide_legacy_device_add(&ht6560b_port_info, 0);
363
364 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
365 hw[0].irq = 14;
366
367 ide_std_init_ports(&hw[1], 0x170, 0x376);
368 hw[1].irq = 15;
369
370 ide_init_port_hw(hwif, &hw[0]);
371 ide_init_port_hw(mate, &hw[1]);
372
373 hwif->selectproc = &ht6560b_selectproc;
374 hwif->set_pio_mode = &ht6560b_set_pio_mode;
375
376 mate->selectproc = &ht6560b_selectproc;
377 mate->set_pio_mode = &ht6560b_set_pio_mode;
378
379 hwif->port_init_devs = ht6560b_port_init_devs;
380 mate->port_init_devs = ht6560b_port_init_devs;
381
382 ide_device_add(idx, &ht6560b_port_info);
383
384 return 0;
385 364
386release_region: 365release_region:
387 release_region(HT_CONFIG_PORT, 1); 366 release_region(HT_CONFIG_PORT, 1);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index ecd7f3553554..ecae916a3385 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -4,7 +4,9 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/ide.h> 5#include <linux/ide.h>
6 6
7int probe_4drives = 0; 7#define DRV_NAME "ide-4drives"
8
9static int probe_4drives;
8 10
9module_param_named(probe, probe_4drives, bool, 0); 11module_param_named(probe, probe_4drives, bool, 0);
10MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); 12MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
@@ -12,31 +14,51 @@ MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
12static int __init ide_4drives_init(void) 14static int __init ide_4drives_init(void)
13{ 15{
14 ide_hwif_t *hwif, *mate; 16 ide_hwif_t *hwif, *mate;
15 u8 idx[4] = { 0, 1, 0xff, 0xff }; 17 unsigned long base = 0x1f0, ctl = 0x3f6;
18 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
16 hw_regs_t hw; 19 hw_regs_t hw;
17 20
18 if (probe_4drives == 0) 21 if (probe_4drives == 0)
19 return -ENODEV; 22 return -ENODEV;
20 23
21 hwif = &ide_hwifs[0]; 24 if (!request_region(base, 8, DRV_NAME)) {
22 mate = &ide_hwifs[1]; 25 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
26 DRV_NAME, base, base + 7);
27 return -EBUSY;
28 }
29
30 if (!request_region(ctl, 1, DRV_NAME)) {
31 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
32 DRV_NAME, ctl);
33 release_region(base, 8);
34 return -EBUSY;
35 }
23 36
24 memset(&hw, 0, sizeof(hw)); 37 memset(&hw, 0, sizeof(hw));
25 38
26 ide_std_init_ports(&hw, 0x1f0, 0x3f6); 39 ide_std_init_ports(&hw, base, ctl);
27 hw.irq = 14; 40 hw.irq = 14;
28 hw.chipset = ide_4drives; 41 hw.chipset = ide_4drives;
29 42
30 ide_init_port_hw(hwif, &hw); 43 hwif = ide_find_port();
31 ide_init_port_hw(mate, &hw); 44 if (hwif) {
32 45 ide_init_port_hw(hwif, &hw);
33 mate->drives[0].select.all ^= 0x20; 46 idx[0] = hwif->index;
34 mate->drives[1].select.all ^= 0x20; 47 }
35 48
36 hwif->mate = mate; 49 mate = ide_find_port();
37 mate->mate = hwif; 50 if (mate) {
38 51 ide_init_port_hw(mate, &hw);
39 hwif->serialized = mate->serialized = 1; 52 mate->drives[0].select.all ^= 0x20;
53 mate->drives[1].select.all ^= 0x20;
54 idx[1] = mate->index;
55
56 if (hwif) {
57 hwif->mate = mate;
58 mate->mate = hwif;
59 hwif->serialized = mate->serialized = 1;
60 }
61 }
40 62
41 ide_device_add(idx, NULL); 63 ide_device_add(idx, NULL);
42 64
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 9a23b94f2939..aa2ea3deac85 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -51,6 +51,8 @@
51#include <pcmcia/cisreg.h> 51#include <pcmcia/cisreg.h>
52#include <pcmcia/ciscode.h> 52#include <pcmcia/ciscode.h>
53 53
54#define DRV_NAME "ide-cs"
55
54/*====================================================================*/ 56/*====================================================================*/
55 57
56/* Module parameters */ 58/* Module parameters */
@@ -72,16 +74,11 @@ static char *version =
72 74
73/*====================================================================*/ 75/*====================================================================*/
74 76
75static const char ide_major[] = {
76 IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR,
77 IDE4_MAJOR, IDE5_MAJOR
78};
79
80typedef struct ide_info_t { 77typedef struct ide_info_t {
81 struct pcmcia_device *p_dev; 78 struct pcmcia_device *p_dev;
79 ide_hwif_t *hwif;
82 int ndev; 80 int ndev;
83 dev_node_t node; 81 dev_node_t node;
84 int hd;
85} ide_info_t; 82} ide_info_t;
86 83
87static void ide_release(struct pcmcia_device *); 84static void ide_release(struct pcmcia_device *);
@@ -136,45 +133,71 @@ static int ide_probe(struct pcmcia_device *link)
136 133
137static void ide_detach(struct pcmcia_device *link) 134static void ide_detach(struct pcmcia_device *link)
138{ 135{
136 ide_info_t *info = link->priv;
137 ide_hwif_t *hwif = info->hwif;
138
139 DEBUG(0, "ide_detach(0x%p)\n", link); 139 DEBUG(0, "ide_detach(0x%p)\n", link);
140 140
141 ide_release(link); 141 ide_release(link);
142 142
143 kfree(link->priv); 143 release_region(hwif->io_ports.ctl_addr, 1);
144 release_region(hwif->io_ports.data_addr, 8);
145
146 kfree(info);
144} /* ide_detach */ 147} /* ide_detach */
145 148
146static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) 149static const struct ide_port_ops idecs_port_ops = {
150 .quirkproc = ide_undecoded_slave,
151};
152
153static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
154 unsigned long irq, struct pcmcia_device *handle)
147{ 155{
148 ide_hwif_t *hwif; 156 ide_hwif_t *hwif;
149 hw_regs_t hw; 157 hw_regs_t hw;
150 int i; 158 int i;
151 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 159 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
152 160
161 if (!request_region(io, 8, DRV_NAME)) {
162 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
163 DRV_NAME, io, io + 7);
164 return NULL;
165 }
166
167 if (!request_region(ctl, 1, DRV_NAME)) {
168 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
169 DRV_NAME, ctl);
170 release_region(io, 8);
171 return NULL;
172 }
173
153 memset(&hw, 0, sizeof(hw)); 174 memset(&hw, 0, sizeof(hw));
154 ide_std_init_ports(&hw, io, ctl); 175 ide_std_init_ports(&hw, io, ctl);
155 hw.irq = irq; 176 hw.irq = irq;
156 hw.chipset = ide_pci; 177 hw.chipset = ide_pci;
157 hw.dev = &handle->dev; 178 hw.dev = &handle->dev;
158 179
159 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 180 hwif = ide_find_port();
160 if (hwif == NULL) 181 if (hwif == NULL)
161 return -1; 182 goto out_release;
162 183
163 i = hwif->index; 184 i = hwif->index;
164 185
165 if (hwif->present) 186 ide_init_port_data(hwif, i);
166 ide_unregister(i);
167 else
168 ide_init_port_data(hwif, i);
169
170 ide_init_port_hw(hwif, &hw); 187 ide_init_port_hw(hwif, &hw);
171 hwif->quirkproc = &ide_undecoded_slave; 188 hwif->port_ops = &idecs_port_ops;
172 189
173 idx[0] = i; 190 idx[0] = i;
174 191
175 ide_device_add(idx, NULL); 192 ide_device_add(idx, NULL);
176 193
177 return hwif->present ? i : -1; 194 if (hwif->present)
195 return hwif;
196
197out_release:
198 release_region(ctl, 1);
199 release_region(io, 8);
200 return NULL;
178} 201}
179 202
180/*====================================================================== 203/*======================================================================
@@ -199,8 +222,9 @@ static int ide_config(struct pcmcia_device *link)
199 cistpl_cftable_entry_t dflt; 222 cistpl_cftable_entry_t dflt;
200 } *stk = NULL; 223 } *stk = NULL;
201 cistpl_cftable_entry_t *cfg; 224 cistpl_cftable_entry_t *cfg;
202 int i, pass, last_ret = 0, last_fn = 0, hd, is_kme = 0; 225 int i, pass, last_ret = 0, last_fn = 0, is_kme = 0;
203 unsigned long io_base, ctl_base; 226 unsigned long io_base, ctl_base;
227 ide_hwif_t *hwif;
204 228
205 DEBUG(0, "ide_config(0x%p)\n", link); 229 DEBUG(0, "ide_config(0x%p)\n", link);
206 230
@@ -296,14 +320,15 @@ static int ide_config(struct pcmcia_device *link)
296 outb(0x81, ctl_base+1); 320 outb(0x81, ctl_base+1);
297 321
298 /* retry registration in case device is still spinning up */ 322 /* retry registration in case device is still spinning up */
299 for (hd = -1, i = 0; i < 10; i++) { 323 for (i = 0; i < 10; i++) {
300 hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 324 hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
301 if (hd >= 0) break; 325 if (hwif)
326 break;
302 if (link->io.NumPorts1 == 0x20) { 327 if (link->io.NumPorts1 == 0x20) {
303 outb(0x02, ctl_base + 0x10); 328 outb(0x02, ctl_base + 0x10);
304 hd = idecs_register(io_base + 0x10, ctl_base + 0x10, 329 hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
305 link->irq.AssignedIRQ, link); 330 link->irq.AssignedIRQ, link);
306 if (hd >= 0) { 331 if (hwif) {
307 io_base += 0x10; 332 io_base += 0x10;
308 ctl_base += 0x10; 333 ctl_base += 0x10;
309 break; 334 break;
@@ -312,7 +337,7 @@ static int ide_config(struct pcmcia_device *link)
312 msleep(100); 337 msleep(100);
313 } 338 }
314 339
315 if (hd < 0) { 340 if (hwif == NULL) {
316 printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx" 341 printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
317 ", irq %u failed\n", io_base, ctl_base, 342 ", irq %u failed\n", io_base, ctl_base,
318 link->irq.AssignedIRQ); 343 link->irq.AssignedIRQ);
@@ -320,10 +345,10 @@ static int ide_config(struct pcmcia_device *link)
320 } 345 }
321 346
322 info->ndev = 1; 347 info->ndev = 1;
323 sprintf(info->node.dev_name, "hd%c", 'a' + (hd * 2)); 348 sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
324 info->node.major = ide_major[hd]; 349 info->node.major = hwif->major;
325 info->node.minor = 0; 350 info->node.minor = 0;
326 info->hd = hd; 351 info->hwif = hwif;
327 link->dev_node = &info->node; 352 link->dev_node = &info->node;
328 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 353 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
329 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 354 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -354,13 +379,14 @@ failed:
354void ide_release(struct pcmcia_device *link) 379void ide_release(struct pcmcia_device *link)
355{ 380{
356 ide_info_t *info = link->priv; 381 ide_info_t *info = link->priv;
382 ide_hwif_t *hwif = info->hwif;
357 383
358 DEBUG(0, "ide_release(0x%p)\n", link); 384 DEBUG(0, "ide_release(0x%p)\n", link);
359 385
360 if (info->ndev) { 386 if (info->ndev) {
361 /* FIXME: if this fails we need to queue the cleanup somehow 387 /* FIXME: if this fails we need to queue the cleanup somehow
362 -- need to investigate the required PCMCIA magic */ 388 -- need to investigate the required PCMCIA magic */
363 ide_unregister(info->hd); 389 ide_unregister(hwif);
364 } 390 }
365 info->ndev = 0; 391 info->ndev = 0;
366 392
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 361b1bb544bf..8279dc7ca4c0 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
30 unsigned long port = (unsigned long)base; 30 unsigned long port = (unsigned long)base;
31 int i; 31 int i;
32 32
33 hw->io_ports[IDE_DATA_OFFSET] = port; 33 hw->io_ports.data_addr = port;
34 34
35 port += (1 << pdata->ioport_shift); 35 port += (1 << pdata->ioport_shift);
36 for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; 36 for (i = 1; i <= 7;
37 i++, port += (1 << pdata->ioport_shift)) 37 i++, port += (1 << pdata->ioport_shift))
38 hw->io_ports[i] = port; 38 hw->io_ports_array[i] = port;
39 39
40 hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; 40 hw->io_ports.ctl_addr = (unsigned long)ctrl;
41 41
42 hw->irq = irq; 42 hw->irq = irq;
43 43
@@ -89,7 +89,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
89 res_alt->start, res_alt->end - res_alt->start + 1); 89 res_alt->start, res_alt->end - res_alt->start + 1);
90 } 90 }
91 91
92 hwif = ide_find_port((unsigned long)base); 92 hwif = ide_find_port();
93 if (!hwif) { 93 if (!hwif) {
94 ret = -ENODEV; 94 ret = -ENODEV;
95 goto out; 95 goto out;
@@ -101,10 +101,8 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
101 101
102 ide_init_port_hw(hwif, &hw); 102 ide_init_port_hw(hwif, &hw);
103 103
104 if (mmio) { 104 if (mmio)
105 hwif->mmio = 1;
106 default_hwif_mmiops(hwif); 105 default_hwif_mmiops(hwif);
107 }
108 106
109 idx[0] = hwif->index; 107 idx[0] = hwif->index;
110 108
@@ -122,7 +120,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev)
122{ 120{
123 ide_hwif_t *hwif = pdev->dev.driver_data; 121 ide_hwif_t *hwif = pdev->dev.driver_data;
124 122
125 ide_unregister(hwif->index); 123 ide_unregister(hwif);
126 124
127 return 0; 125 return 0;
128} 126}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index eaf5dbe58bc2..1f527bbf8d96 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
72 memset(hw, 0, sizeof(*hw)); 72 memset(hw, 0, sizeof(*hw));
73 73
74 for (i = 0; i < 8; i++) 74 for (i = 0; i < 8; i++)
75 hw->io_ports[i] = base + i * 4; 75 hw->io_ports_array[i] = base + i * 4;
76 76
77 hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL; 77 hw->io_ports.ctl_addr = base + IDE_CONTROL;
78 78
79 hw->irq = irq; 79 hw->irq = irq;
80 hw->ack_intr = ack_intr; 80 hw->ack_intr = ack_intr;
@@ -120,7 +120,7 @@ static int __init macide_init(void)
120 120
121 macide_setup_ports(&hw, base, irq, ack_intr); 121 macide_setup_ports(&hw, base, irq, ack_intr);
122 122
123 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 123 hwif = ide_find_port();
124 if (hwif) { 124 if (hwif) {
125 u8 index = hwif->index; 125 u8 index = hwif->index;
126 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 126 u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -128,8 +128,6 @@ static int __init macide_init(void)
128 ide_init_port_data(hwif, index); 128 ide_init_port_data(hwif, index);
129 ide_init_port_hw(hwif, &hw); 129 ide_init_port_hw(hwif, &hw);
130 130
131 hwif->mmio = 1;
132
133 ide_device_add(idx, NULL); 131 ide_device_add(idx, NULL);
134 } 132 }
135 133
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2da28759686e..a3573d40b4b7 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,10 +80,10 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
80 for (i = 0; i < IDE_NR_PORTS; i++) { 80 for (i = 0; i < IDE_NR_PORTS; i++) {
81 /* BIG FAT WARNING: 81 /* BIG FAT WARNING:
82 assumption: only DATA port is ever used in 16 bit mode */ 82 assumption: only DATA port is ever used in 16 bit mode */
83 if ( i==0 ) 83 if (i == 0)
84 hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]); 84 hw->io_ports_array[i] = Q40_ISA_IO_W(base + offsets[i]);
85 else 85 else
86 hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]); 86 hw->io_ports_array[i] = Q40_ISA_IO_B(base + offsets[i]);
87 } 87 }
88 88
89 hw->irq = irq; 89 hw->irq = irq;
@@ -137,11 +137,10 @@ static int __init q40ide_init(void)
137// m68kide_iops, 137// m68kide_iops,
138 q40ide_default_irq(pcide_bases[i])); 138 q40ide_default_irq(pcide_bases[i]));
139 139
140 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 140 hwif = ide_find_port();
141 if (hwif) { 141 if (hwif) {
142 ide_init_port_data(hwif, hwif->index); 142 ide_init_port_data(hwif, hwif->index);
143 ide_init_port_hw(hwif, &hw); 143 ide_init_port_hw(hwif, &hw);
144 hwif->mmio = 1;
145 144
146 idx[i] = hwif->index; 145 idx[i] = hwif->index;
147 } 146 }
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 7016bdf4fcc1..6424af154325 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -11,11 +11,7 @@
11 * 11 *
12 * QDI QD6500/QD6580 EIDE controller fast support 12 * QDI QD6500/QD6580 EIDE controller fast support
13 * 13 *
14 * Please set local bus speed using kernel parameter idebus
15 * for example, "idebus=33" stands for 33Mhz VLbus
16 * To activate controller support, use "ide0=qd65xx" 14 * To activate controller support, use "ide0=qd65xx"
17 * To enable tuning, use "hda=autotune hdb=autotune"
18 * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
19 */ 15 */
20 16
21/* 17/*
@@ -37,6 +33,8 @@
37#include <asm/system.h> 33#include <asm/system.h>
38#include <asm/io.h> 34#include <asm/io.h>
39 35
36#define DRV_NAME "qd65xx"
37
40#include "qd65xx.h" 38#include "qd65xx.h"
41 39
42/* 40/*
@@ -88,12 +86,12 @@
88static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */ 86static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
89 87
90/* 88/*
91 * qd_select: 89 * qd65xx_select:
92 * 90 *
93 * This routine is invoked from ide.c to prepare for access to a given drive. 91 * This routine is invoked to prepare for access to a given drive.
94 */ 92 */
95 93
96static void qd_select (ide_drive_t *drive) 94static void qd65xx_select(ide_drive_t *drive)
97{ 95{
98 u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) | 96 u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
99 (QD_TIMREG(drive) & 0x02); 97 (QD_TIMREG(drive) & 0x02);
@@ -112,17 +110,18 @@ static void qd_select (ide_drive_t *drive)
112 110
113static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) 111static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
114{ 112{
115 u8 active_cycle,recovery_cycle; 113 int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
114 u8 act_cyc, rec_cyc;
116 115
117 if (system_bus_clock()<=33) { 116 if (clk <= 33) {
118 active_cycle = 9 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 9); 117 act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
119 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15); 118 rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
120 } else { 119 } else {
121 active_cycle = 8 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 1, 8); 120 act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
122 recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18); 121 rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
123 } 122 }
124 123
125 return((recovery_cycle<<4) | 0x08 | active_cycle); 124 return (rec_cyc << 4) | 0x08 | act_cyc;
126} 125}
127 126
128/* 127/*
@@ -133,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery
133 132
134static u8 qd6580_compute_timing (int active_time, int recovery_time) 133static u8 qd6580_compute_timing (int active_time, int recovery_time)
135{ 134{
136 u8 active_cycle = 17 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 17); 135 int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
137 u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15); 136 u8 act_cyc, rec_cyc;
137
138 act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
139 rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
138 140
139 return((recovery_cycle<<4) | active_cycle); 141 return (rec_cyc << 4) | act_cyc;
140} 142}
141 143
142/* 144/*
@@ -168,36 +170,15 @@ static int qd_find_disk_type (ide_drive_t *drive,
168} 170}
169 171
170/* 172/*
171 * qd_timing_ok:
172 *
173 * check whether timings don't conflict
174 */
175
176static int qd_timing_ok (ide_drive_t drives[])
177{
178 return (IDE_IMPLY(drives[0].present && drives[1].present,
179 IDE_IMPLY(QD_TIMREG(drives) == QD_TIMREG(drives+1),
180 QD_TIMING(drives) == QD_TIMING(drives+1))));
181 /* if same timing register, must be same timing */
182}
183
184/*
185 * qd_set_timing: 173 * qd_set_timing:
186 * 174 *
187 * records the timing, and enables selectproc as needed 175 * records the timing
188 */ 176 */
189 177
190static void qd_set_timing (ide_drive_t *drive, u8 timing) 178static void qd_set_timing (ide_drive_t *drive, u8 timing)
191{ 179{
192 ide_hwif_t *hwif = HWIF(drive);
193
194 drive->drive_data &= 0xff00; 180 drive->drive_data &= 0xff00;
195 drive->drive_data |= timing; 181 drive->drive_data |= timing;
196 if (qd_timing_ok(hwif->drives)) {
197 qd_select(drive); /* selects once */
198 hwif->selectproc = NULL;
199 } else
200 hwif->selectproc = &qd_select;
201 182
202 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); 183 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
203} 184}
@@ -225,10 +206,11 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
225 206
226static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) 207static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
227{ 208{
228 int base = HWIF(drive)->select_data; 209 ide_hwif_t *hwif = drive->hwif;
229 unsigned int cycle_time; 210 unsigned int cycle_time;
230 int active_time = 175; 211 int active_time = 175;
231 int recovery_time = 415; /* worst case values from the dos driver */ 212 int recovery_time = 415; /* worst case values from the dos driver */
213 u8 base = (hwif->config_data & 0xff00) >> 8;
232 214
233 if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { 215 if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
234 cycle_time = ide_pio_cycle_time(drive, pio); 216 cycle_time = ide_pio_cycle_time(drive, pio);
@@ -299,21 +281,10 @@ static int __init qd_testreg(int port)
299 return (readreg != QD_TESTVAL); 281 return (readreg != QD_TESTVAL);
300} 282}
301 283
302/*
303 * qd_setup:
304 *
305 * called to setup an ata channel : adjusts attributes & links for tuning
306 */
307
308static void __init qd_setup(ide_hwif_t *hwif, int base, int config)
309{
310 hwif->select_data = base;
311 hwif->config_data = config;
312}
313
314static void __init qd6500_port_init_devs(ide_hwif_t *hwif) 284static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
315{ 285{
316 u8 base = hwif->select_data, config = QD_CONFIG(hwif); 286 u8 base = (hwif->config_data & 0xff00) >> 8;
287 u8 config = QD_CONFIG(hwif);
317 288
318 hwif->drives[0].drive_data = QD6500_DEF_DATA; 289 hwif->drives[0].drive_data = QD6500_DEF_DATA;
319 hwif->drives[1].drive_data = QD6500_DEF_DATA; 290 hwif->drives[1].drive_data = QD6500_DEF_DATA;
@@ -322,9 +293,10 @@ static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
322static void __init qd6580_port_init_devs(ide_hwif_t *hwif) 293static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
323{ 294{
324 u16 t1, t2; 295 u16 t1, t2;
325 u8 base = hwif->select_data, config = QD_CONFIG(hwif); 296 u8 base = (hwif->config_data & 0xff00) >> 8;
297 u8 config = QD_CONFIG(hwif);
326 298
327 if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { 299 if (hwif->host_flags & IDE_HFLAG_SINGLE) {
328 t1 = QD6580_DEF_DATA; 300 t1 = QD6580_DEF_DATA;
329 t2 = QD6580_DEF_DATA2; 301 t2 = QD6580_DEF_DATA2;
330 } else 302 } else
@@ -334,11 +306,23 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
334 hwif->drives[1].drive_data = t2; 306 hwif->drives[1].drive_data = t2;
335} 307}
336 308
309static const struct ide_port_ops qd6500_port_ops = {
310 .port_init_devs = qd6500_port_init_devs,
311 .set_pio_mode = qd6500_set_pio_mode,
312 .selectproc = qd65xx_select,
313};
314
315static const struct ide_port_ops qd6580_port_ops = {
316 .port_init_devs = qd6580_port_init_devs,
317 .set_pio_mode = qd6580_set_pio_mode,
318 .selectproc = qd65xx_select,
319};
320
337static const struct ide_port_info qd65xx_port_info __initdata = { 321static const struct ide_port_info qd65xx_port_info __initdata = {
322 .name = DRV_NAME,
338 .chipset = ide_qd65xx, 323 .chipset = ide_qd65xx,
339 .host_flags = IDE_HFLAG_IO_32BIT | 324 .host_flags = IDE_HFLAG_IO_32BIT |
340 IDE_HFLAG_NO_DMA | 325 IDE_HFLAG_NO_DMA,
341 IDE_HFLAG_NO_AUTOTUNE,
342 .pio_mask = ATA_PIO4, 326 .pio_mask = ATA_PIO4,
343}; 327};
344 328
@@ -351,65 +335,41 @@ static const struct ide_port_info qd65xx_port_info __initdata = {
351 335
352static int __init qd_probe(int base) 336static int __init qd_probe(int base)
353{ 337{
354 ide_hwif_t *hwif; 338 int rc;
355 u8 config, unit; 339 u8 config, unit, control;
356 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 340 struct ide_port_info d = qd65xx_port_info;
357 hw_regs_t hw[2];
358 341
359 config = inb(QD_CONFIG_PORT); 342 config = inb(QD_CONFIG_PORT);
360 343
361 if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) ) 344 if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
362 return 1; 345 return -ENODEV;
363 346
364 unit = ! (config & QD_CONFIG_IDE_BASEPORT); 347 unit = ! (config & QD_CONFIG_IDE_BASEPORT);
365 348
366 memset(&hw, 0, sizeof(hw)); 349 if (unit)
350 d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
367 351
368 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 352 switch (config & 0xf0) {
369 hw[0].irq = 14; 353 case QD_CONFIG_QD6500:
354 if (qd_testreg(base))
355 return -ENODEV; /* bad register */
370 356
371 ide_std_init_ports(&hw[1], 0x170, 0x376);
372 hw[1].irq = 15;
373
374 if ((config & 0xf0) == QD_CONFIG_QD6500) {
375
376 if (qd_testreg(base)) return 1; /* bad register */
377
378 /* qd6500 found */
379
380 hwif = &ide_hwifs[unit];
381 printk(KERN_NOTICE "%s: qd6500 at %#x\n", hwif->name, base);
382 printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
383 config, QD_ID3);
384
385 if (config & QD_CONFIG_DISABLED) { 357 if (config & QD_CONFIG_DISABLED) {
386 printk(KERN_WARNING "qd6500 is disabled !\n"); 358 printk(KERN_WARNING "qd6500 is disabled !\n");
387 return 1; 359 return -ENODEV;
388 } 360 }
389 361
390 ide_init_port_hw(hwif, &hw[unit]); 362 printk(KERN_NOTICE "qd6500 at %#x\n", base);
391 363 printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
392 qd_setup(hwif, base, config); 364 config, QD_ID3);
393
394 hwif->port_init_devs = qd6500_port_init_devs;
395 hwif->set_pio_mode = &qd6500_set_pio_mode;
396
397 idx[unit] = unit;
398
399 ide_device_add(idx, &qd65xx_port_info);
400
401 return 1;
402 }
403
404 if (((config & 0xf0) == QD_CONFIG_QD6580_A) ||
405 ((config & 0xf0) == QD_CONFIG_QD6580_B)) {
406
407 u8 control;
408
409 if (qd_testreg(base) || qd_testreg(base+0x02)) return 1;
410 /* bad registers */
411 365
412 /* qd6580 found */ 366 d.port_ops = &qd6500_port_ops;
367 d.host_flags |= IDE_HFLAG_SINGLE;
368 break;
369 case QD_CONFIG_QD6580_A:
370 case QD_CONFIG_QD6580_B:
371 if (qd_testreg(base) || qd_testreg(base + 0x02))
372 return -ENODEV; /* bad registers */
413 373
414 control = inb(QD_CONTROL_PORT); 374 control = inb(QD_CONTROL_PORT);
415 375
@@ -419,74 +379,44 @@ static int __init qd_probe(int base)
419 379
420 outb(QD_DEF_CONTR, QD_CONTROL_PORT); 380 outb(QD_DEF_CONTR, QD_CONTROL_PORT);
421 381
422 if (control & QD_CONTR_SEC_DISABLED) { 382 d.port_ops = &qd6580_port_ops;
423 /* secondary disabled */ 383 if (control & QD_CONTR_SEC_DISABLED)
424 384 d.host_flags |= IDE_HFLAG_SINGLE;
425 hwif = &ide_hwifs[unit];
426 printk(KERN_INFO "%s: qd6580: single IDE board\n",
427 hwif->name);
428
429 ide_init_port_hw(hwif, &hw[unit]);
430
431 qd_setup(hwif, base, config | (control << 8));
432
433 hwif->port_init_devs = qd6580_port_init_devs;
434 hwif->set_pio_mode = &qd6580_set_pio_mode;
435
436 idx[unit] = unit;
437 385
438 ide_device_add(idx, &qd65xx_port_info); 386 printk(KERN_INFO "qd6580: %s IDE board\n",
439 387 (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
440 return 1; 388 break;
441 } else { 389 default:
442 ide_hwif_t *mate; 390 return -ENODEV;
443 391 }
444 hwif = &ide_hwifs[0];
445 mate = &ide_hwifs[1];
446 /* secondary enabled */
447 printk(KERN_INFO "%s&%s: qd6580: dual IDE board\n",
448 hwif->name, mate->name);
449
450 ide_init_port_hw(hwif, &hw[0]);
451 ide_init_port_hw(mate, &hw[1]);
452
453 qd_setup(hwif, base, config | (control << 8));
454
455 hwif->port_init_devs = qd6580_port_init_devs;
456 hwif->set_pio_mode = &qd6580_set_pio_mode;
457
458 qd_setup(mate, base, config | (control << 8));
459
460 mate->port_init_devs = qd6580_port_init_devs;
461 mate->set_pio_mode = &qd6580_set_pio_mode;
462 392
463 idx[0] = 0; 393 rc = ide_legacy_device_add(&d, (base << 8) | config);
464 idx[1] = 1;
465 394
466 ide_device_add(idx, &qd65xx_port_info); 395 if (d.host_flags & IDE_HFLAG_SINGLE)
396 return (rc == 0) ? 1 : rc;
467 397
468 return 0; /* no other qd65xx possible */ 398 return rc;
469 }
470 }
471 /* no qd65xx found */
472 return 1;
473} 399}
474 400
475int probe_qd65xx = 0; 401static int probe_qd65xx;
476 402
477module_param_named(probe, probe_qd65xx, bool, 0); 403module_param_named(probe, probe_qd65xx, bool, 0);
478MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); 404MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
479 405
480static int __init qd65xx_init(void) 406static int __init qd65xx_init(void)
481{ 407{
408 int rc1, rc2 = -ENODEV;
409
482 if (probe_qd65xx == 0) 410 if (probe_qd65xx == 0)
483 return -ENODEV; 411 return -ENODEV;
484 412
485 if (qd_probe(0x30)) 413 rc1 = qd_probe(0x30);
486 qd_probe(0xb0); 414 if (rc1)
487 if (ide_hwifs[0].chipset != ide_qd65xx && 415 rc2 = qd_probe(0xb0);
488 ide_hwifs[1].chipset != ide_qd65xx) 416
417 if (rc1 < 0 && rc2 < 0)
489 return -ENODEV; 418 return -ENODEV;
419
490 return 0; 420 return 0;
491} 421}
492 422
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/legacy/qd65xx.h
index 28dd50a15d55..c83dea85e621 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/legacy/qd65xx.h
@@ -30,7 +30,6 @@
30#define QD_ID3 ((config & QD_CONFIG_ID3)!=0) 30#define QD_ID3 ((config & QD_CONFIG_ID3)!=0)
31 31
32#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff) 32#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff)
33#define QD_CONTROL(hwif) (((hwif)->config_data & 0xff00) >> 8)
34 33
35#define QD_TIMING(drive) (byte)(((drive)->drive_data) & 0x00ff) 34#define QD_TIMING(drive) (byte)(((drive)->drive_data) & 0x00ff)
36#define QD_TIMREG(drive) (byte)((((drive)->drive_data) & 0xff00) >> 8) 35#define QD_TIMREG(drive) (byte)((((drive)->drive_data) & 0xff00) >> 8)
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index bc1944811b99..b54a14a57755 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -19,7 +19,7 @@
19 */ 19 */
20 20
21/* 21/*
22 * VLB Controller Support from 22 * VLB Controller Support from
23 * Wolfram Podien 23 * Wolfram Podien
24 * Rohoefe 3 24 * Rohoefe 3
25 * D28832 Achim 25 * D28832 Achim
@@ -32,7 +32,7 @@
32 * #define UMC_DRIVE0 11 32 * #define UMC_DRIVE0 11
33 * in the beginning of the driver, which sets the speed of drive 0 to 11 (there 33 * in the beginning of the driver, which sets the speed of drive 0 to 11 (there
34 * are some lines present). 0 - 11 are allowed speed values. These values are 34 * are some lines present). 0 - 11 are allowed speed values. These values are
35 * the results from the DOS speed test program supplied from UMC. 11 is the 35 * the results from the DOS speed test program supplied from UMC. 11 is the
36 * highest speed (about PIO mode 3) 36 * highest speed (about PIO mode 3)
37 */ 37 */
38#define REALLY_SLOW_IO /* some systems can safely undef this */ 38#define REALLY_SLOW_IO /* some systems can safely undef this */
@@ -51,6 +51,8 @@
51 51
52#include <asm/io.h> 52#include <asm/io.h>
53 53
54#define DRV_NAME "umc8672"
55
54/* 56/*
55 * Default speeds. These can be changed with "auto-tune" and/or hdparm. 57 * Default speeds. These can be changed with "auto-tune" and/or hdparm.
56 */ 58 */
@@ -60,115 +62,103 @@
60#define UMC_DRIVE3 1 /* In case of crash reduce speed */ 62#define UMC_DRIVE3 1 /* In case of crash reduce speed */
61 63
62static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3}; 64static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
63static const u8 pio_to_umc [5] = {0,3,7,10,11}; /* rough guesses */ 65static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
64 66
65/* 0 1 2 3 4 5 6 7 8 9 10 11 */ 67/* 0 1 2 3 4 5 6 7 8 9 10 11 */
66static const u8 speedtab [3][12] = { 68static const u8 speedtab [3][12] = {
67 {0xf, 0xb, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }, 69 {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
68 {0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }, 70 {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
69 {0xff,0xcb,0xc0,0x58,0x36,0x33,0x23,0x22,0x21,0x11,0x10,0x0}}; 71 {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
72};
70 73
71static void out_umc (char port,char wert) 74static void out_umc(char port, char wert)
72{ 75{
73 outb_p(port,0x108); 76 outb_p(port, 0x108);
74 outb_p(wert,0x109); 77 outb_p(wert, 0x109);
75} 78}
76 79
77static inline u8 in_umc (char port) 80static inline u8 in_umc(char port)
78{ 81{
79 outb_p(port,0x108); 82 outb_p(port, 0x108);
80 return inb_p(0x109); 83 return inb_p(0x109);
81} 84}
82 85
83static void umc_set_speeds (u8 speeds[]) 86static void umc_set_speeds(u8 speeds[])
84{ 87{
85 int i, tmp; 88 int i, tmp;
86 89
87 outb_p(0x5A,0x108); /* enable umc */ 90 outb_p(0x5A, 0x108); /* enable umc */
88 91
89 out_umc (0xd7,(speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4))); 92 out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
90 out_umc (0xd6,(speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4))); 93 out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
91 tmp = 0; 94 tmp = 0;
92 for (i = 3; i >= 0; i--) { 95 for (i = 3; i >= 0; i--)
93 tmp = (tmp << 2) | speedtab[1][speeds[i]]; 96 tmp = (tmp << 2) | speedtab[1][speeds[i]];
97 out_umc(0xdc, tmp);
98 for (i = 0; i < 4; i++) {
99 out_umc(0xd0 + i, speedtab[2][speeds[i]]);
100 out_umc(0xd8 + i, speedtab[2][speeds[i]]);
94 } 101 }
95 out_umc (0xdc,tmp); 102 outb_p(0xa5, 0x108); /* disable umc */
96 for (i = 0;i < 4; i++) {
97 out_umc (0xd0+i,speedtab[2][speeds[i]]);
98 out_umc (0xd8+i,speedtab[2][speeds[i]]);
99 }
100 outb_p(0xa5,0x108); /* disable umc */
101 103
102 printk ("umc8672: drive speeds [0 to 11]: %d %d %d %d\n", 104 printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
103 speeds[0], speeds[1], speeds[2], speeds[3]); 105 speeds[0], speeds[1], speeds[2], speeds[3]);
104} 106}
105 107
106static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) 108static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
107{ 109{
110 ide_hwif_t *hwif = drive->hwif;
108 unsigned long flags; 111 unsigned long flags;
109 ide_hwgroup_t *hwgroup = ide_hwifs[HWIF(drive)->index^1].hwgroup;
110 112
111 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", 113 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
112 drive->name, pio, pio_to_umc[pio]); 114 drive->name, pio, pio_to_umc[pio]);
113 spin_lock_irqsave(&ide_lock, flags); 115 spin_lock_irqsave(&ide_lock, flags);
114 if (hwgroup && hwgroup->handler != NULL) { 116 if (hwif->mate && hwif->mate->hwgroup->handler) {
115 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); 117 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
116 } else { 118 } else {
117 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; 119 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
118 umc_set_speeds (current_speeds); 120 umc_set_speeds(current_speeds);
119 } 121 }
120 spin_unlock_irqrestore(&ide_lock, flags); 122 spin_unlock_irqrestore(&ide_lock, flags);
121} 123}
122 124
125static const struct ide_port_ops umc8672_port_ops = {
126 .set_pio_mode = umc_set_pio_mode,
127};
128
123static const struct ide_port_info umc8672_port_info __initdata = { 129static const struct ide_port_info umc8672_port_info __initdata = {
130 .name = DRV_NAME,
124 .chipset = ide_umc8672, 131 .chipset = ide_umc8672,
125 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, 132 .port_ops = &umc8672_port_ops,
133 .host_flags = IDE_HFLAG_NO_DMA,
126 .pio_mask = ATA_PIO4, 134 .pio_mask = ATA_PIO4,
127}; 135};
128 136
129static int __init umc8672_probe(void) 137static int __init umc8672_probe(void)
130{ 138{
131 unsigned long flags; 139 unsigned long flags;
132 static u8 idx[4] = { 0, 1, 0xff, 0xff };
133 hw_regs_t hw[2];
134 140
135 if (!request_region(0x108, 2, "umc8672")) { 141 if (!request_region(0x108, 2, "umc8672")) {
136 printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n"); 142 printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
137 return 1; 143 return 1;
138 } 144 }
139 local_irq_save(flags); 145 local_irq_save(flags);
140 outb_p(0x5A,0x108); /* enable umc */ 146 outb_p(0x5A, 0x108); /* enable umc */
141 if (in_umc (0xd5) != 0xa0) { 147 if (in_umc (0xd5) != 0xa0) {
142 local_irq_restore(flags); 148 local_irq_restore(flags);
143 printk(KERN_ERR "umc8672: not found\n"); 149 printk(KERN_ERR "umc8672: not found\n");
144 release_region(0x108, 2); 150 release_region(0x108, 2);
145 return 1; 151 return 1;
146 } 152 }
147 outb_p(0xa5,0x108); /* disable umc */ 153 outb_p(0xa5, 0x108); /* disable umc */
148 154
149 umc_set_speeds (current_speeds); 155 umc_set_speeds(current_speeds);
150 local_irq_restore(flags); 156 local_irq_restore(flags);
151 157
152 memset(&hw, 0, sizeof(hw)); 158 return ide_legacy_device_add(&umc8672_port_info, 0);
153
154 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
155 hw[0].irq = 14;
156
157 ide_std_init_ports(&hw[1], 0x170, 0x376);
158 hw[1].irq = 15;
159
160 ide_init_port_hw(&ide_hwifs[0], &hw[0]);
161 ide_init_port_hw(&ide_hwifs[1], &hw[1]);
162
163 ide_hwifs[0].set_pio_mode = &umc_set_pio_mode;
164 ide_hwifs[1].set_pio_mode = &umc_set_pio_mode;
165
166 ide_device_add(idx, &umc8672_port_info);
167
168 return 0;
169} 159}
170 160
171int probe_umc8672 = 0; 161static int probe_umc8672;
172 162
173module_param_named(probe, probe_umc8672, bool, 0); 163module_param_named(probe, probe_umc8672, bool, 0);
174MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); 164MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 9b628248f2f4..296b9c674bae 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -47,7 +47,6 @@
47#define IDE_AU1XXX_BURSTMODE 1 47#define IDE_AU1XXX_BURSTMODE 1
48 48
49static _auide_hwif auide_hwif; 49static _auide_hwif auide_hwif;
50static int dbdma_init_done;
51 50
52static int auide_ddma_init(_auide_hwif *auide); 51static int auide_ddma_init(_auide_hwif *auide);
53 52
@@ -61,7 +60,7 @@ void auide_insw(unsigned long port, void *addr, u32 count)
61 60
62 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 61 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
63 DDMA_FLAGS_NOIE)) { 62 DDMA_FLAGS_NOIE)) {
64 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 63 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
65 return; 64 return;
66 } 65 }
67 ctp = *((chan_tab_t **)ahwif->rx_chan); 66 ctp = *((chan_tab_t **)ahwif->rx_chan);
@@ -79,7 +78,7 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
79 78
80 if(!put_source_flags(ahwif->tx_chan, (void*)addr, 79 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
81 count << 1, DDMA_FLAGS_NOIE)) { 80 count << 1, DDMA_FLAGS_NOIE)) {
82 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 81 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
83 return; 82 return;
84 } 83 }
85 ctp = *((chan_tab_t **)ahwif->tx_chan); 84 ctp = *((chan_tab_t **)ahwif->tx_chan);
@@ -250,7 +249,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
250 (void*) sg_virt(sg), 249 (void*) sg_virt(sg),
251 tc, flags)) { 250 tc, flags)) {
252 printk(KERN_ERR "%s failed %d\n", 251 printk(KERN_ERR "%s failed %d\n",
253 __FUNCTION__, __LINE__); 252 __func__, __LINE__);
254 } 253 }
255 } else 254 } else
256 { 255 {
@@ -258,7 +257,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
258 (void*) sg_virt(sg), 257 (void*) sg_virt(sg),
259 tc, flags)) { 258 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n", 259 printk(KERN_ERR "%s failed %d\n",
261 __FUNCTION__, __LINE__); 260 __func__, __LINE__);
262 } 261 }
263 } 262 }
264 263
@@ -315,35 +314,6 @@ static int auide_dma_setup(ide_drive_t *drive)
315 return 0; 314 return 0;
316} 315}
317 316
318static u8 auide_mdma_filter(ide_drive_t *drive)
319{
320 /*
321 * FIXME: ->white_list and ->black_list are based on completely bogus
322 * ->ide_dma_check implementation which didn't set neither the host
323 * controller timings nor the device for the desired transfer mode.
324 *
325 * They should be either removed or 0x00 MWDMA mask should be
326 * returned for devices on the ->black_list.
327 */
328
329 if (dbdma_init_done == 0) {
330 auide_hwif.white_list = ide_in_drive_list(drive->id,
331 dma_white_list);
332 auide_hwif.black_list = ide_in_drive_list(drive->id,
333 dma_black_list);
334 auide_hwif.drive = drive;
335 auide_ddma_init(&auide_hwif);
336 dbdma_init_done = 1;
337 }
338
339 /* Is the drive in our DMA black list? */
340 if (auide_hwif.black_list)
341 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
342 drive->name, drive->id->model);
343
344 return drive->hwif->mwdma_mask;
345}
346
347static int auide_dma_test_irq(ide_drive_t *drive) 317static int auide_dma_test_irq(ide_drive_t *drive)
348{ 318{
349 if (drive->waiting_for_dma == 0) 319 if (drive->waiting_for_dma == 0)
@@ -396,41 +366,41 @@ static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 de
396 dev->dev_devwidth = devwidth; 366 dev->dev_devwidth = devwidth;
397 dev->dev_flags = flags; 367 dev->dev_flags = flags;
398} 368}
399
400#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
401 369
370#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
402static void auide_dma_timeout(ide_drive_t *drive) 371static void auide_dma_timeout(ide_drive_t *drive)
403{ 372{
404 ide_hwif_t *hwif = HWIF(drive); 373 ide_hwif_t *hwif = HWIF(drive);
405 374
406 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 375 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
407 376
408 if (hwif->ide_dma_test_irq(drive)) 377 if (auide_dma_test_irq(drive))
409 return; 378 return;
410 379
411 hwif->ide_dma_end(drive); 380 auide_dma_end(drive);
412} 381}
413
414 382
415static int auide_ddma_init(_auide_hwif *auide) { 383static const struct ide_dma_ops au1xxx_dma_ops = {
416 384 .dma_host_set = auide_dma_host_set,
385 .dma_setup = auide_dma_setup,
386 .dma_exec_cmd = auide_dma_exec_cmd,
387 .dma_start = auide_dma_start,
388 .dma_end = auide_dma_end,
389 .dma_test_irq = auide_dma_test_irq,
390 .dma_lost_irq = auide_dma_lost_irq,
391 .dma_timeout = auide_dma_timeout,
392};
393
394static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
395{
396 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
417 dbdev_tab_t source_dev_tab, target_dev_tab; 397 dbdev_tab_t source_dev_tab, target_dev_tab;
418 u32 dev_id, tsize, devwidth, flags; 398 u32 dev_id, tsize, devwidth, flags;
419 ide_hwif_t *hwif = auide->hwif;
420 399
421 dev_id = AU1XXX_ATA_DDMA_REQ; 400 dev_id = AU1XXX_ATA_DDMA_REQ;
422 401
423 if (auide->white_list || auide->black_list) { 402 tsize = 8; /* 1 */
424 tsize = 8; 403 devwidth = 32; /* 16 */
425 devwidth = 32;
426 }
427 else {
428 tsize = 1;
429 devwidth = 16;
430
431 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
432 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
433 }
434 404
435#ifdef IDE_AU1XXX_BURSTMODE 405#ifdef IDE_AU1XXX_BURSTMODE
436 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; 406 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
@@ -482,9 +452,9 @@ static int auide_ddma_init(_auide_hwif *auide) {
482 return 0; 452 return 0;
483} 453}
484#else 454#else
485 455static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
486static int auide_ddma_init( _auide_hwif *auide )
487{ 456{
457 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
488 dbdev_tab_t source_dev_tab; 458 dbdev_tab_t source_dev_tab;
489 int flags; 459 int flags;
490 460
@@ -532,20 +502,28 @@ static int auide_ddma_init( _auide_hwif *auide )
532static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) 502static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
533{ 503{
534 int i; 504 int i;
535 unsigned long *ata_regs = hw->io_ports; 505 unsigned long *ata_regs = hw->io_ports_array;
536 506
537 /* FIXME? */ 507 /* FIXME? */
538 for (i = 0; i < IDE_CONTROL_OFFSET; i++) { 508 for (i = 0; i < 8; i++)
539 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); 509 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
540 }
541 510
542 /* set the Alternative Status register */ 511 /* set the Alternative Status register */
543 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); 512 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
544} 513}
545 514
515static const struct ide_port_ops au1xxx_port_ops = {
516 .set_pio_mode = au1xxx_set_pio_mode,
517 .set_dma_mode = auide_set_dma_mode,
518};
519
546static const struct ide_port_info au1xxx_port_info = { 520static const struct ide_port_info au1xxx_port_info = {
521 .init_dma = auide_ddma_init,
522 .port_ops = &au1xxx_port_ops,
523#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
524 .dma_ops = &au1xxx_dma_ops,
525#endif
547 .host_flags = IDE_HFLAG_POST_SET_MODE | 526 .host_flags = IDE_HFLAG_POST_SET_MODE |
548 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
549 IDE_HFLAG_NO_IO_32BIT | 527 IDE_HFLAG_NO_IO_32BIT |
550 IDE_HFLAG_UNMASK_IRQS, 528 IDE_HFLAG_UNMASK_IRQS,
551 .pio_mask = ATA_PIO4, 529 .pio_mask = ATA_PIO4,
@@ -599,9 +577,11 @@ static int au_ide_probe(struct device *dev)
599 goto out; 577 goto out;
600 } 578 }
601 579
602 /* FIXME: This might possibly break PCMCIA IDE devices */ 580 hwif = ide_find_port();
603 581 if (hwif == NULL) {
604 hwif = &ide_hwifs[pdev->id]; 582 ret = -ENOENT;
583 goto out;
584 }
605 585
606 memset(&hw, 0, sizeof(hw)); 586 memset(&hw, 0, sizeof(hw));
607 auide_setup_ports(&hw, ahwif); 587 auide_setup_ports(&hw, ahwif);
@@ -613,8 +593,6 @@ static int au_ide_probe(struct device *dev)
613 593
614 hwif->dev = dev; 594 hwif->dev = dev;
615 595
616 hwif->mmio = 1;
617
618 /* If the user has selected DDMA assisted copies, 596 /* If the user has selected DDMA assisted copies,
619 then set up a few local I/O function entry points 597 then set up a few local I/O function entry points
620 */ 598 */
@@ -623,34 +601,12 @@ static int au_ide_probe(struct device *dev)
623 hwif->INSW = auide_insw; 601 hwif->INSW = auide_insw;
624 hwif->OUTSW = auide_outsw; 602 hwif->OUTSW = auide_outsw;
625#endif 603#endif
626
627 hwif->set_pio_mode = &au1xxx_set_pio_mode;
628 hwif->set_dma_mode = &auide_set_dma_mode;
629
630#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
631 hwif->dma_timeout = &auide_dma_timeout;
632
633 hwif->mdma_filter = &auide_mdma_filter;
634
635 hwif->dma_host_set = &auide_dma_host_set;
636 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
637 hwif->dma_start = &auide_dma_start;
638 hwif->ide_dma_end = &auide_dma_end;
639 hwif->dma_setup = &auide_dma_setup;
640 hwif->ide_dma_test_irq = &auide_dma_test_irq;
641 hwif->dma_lost_irq = &auide_dma_lost_irq;
642#endif
643 hwif->select_data = 0; /* no chipset-specific code */ 604 hwif->select_data = 0; /* no chipset-specific code */
644 hwif->config_data = 0; /* no chipset-specific code */ 605 hwif->config_data = 0; /* no chipset-specific code */
645 606
646 auide_hwif.hwif = hwif; 607 auide_hwif.hwif = hwif;
647 hwif->hwif_data = &auide_hwif; 608 hwif->hwif_data = &auide_hwif;
648 609
649#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
650 auide_ddma_init(&auide_hwif);
651 dbdma_init_done = 1;
652#endif
653
654 idx[0] = hwif->index; 610 idx[0] = hwif->index;
655 611
656 ide_device_add(idx, &au1xxx_port_info); 612 ide_device_add(idx, &au1xxx_port_info);
@@ -670,7 +626,7 @@ static int au_ide_remove(struct device *dev)
670 ide_hwif_t *hwif = dev_get_drvdata(dev); 626 ide_hwif_t *hwif = dev_get_drvdata(dev);
671 _auide_hwif *ahwif = &auide_hwif; 627 _auide_hwif *ahwif = &auide_hwif;
672 628
673 ide_unregister(hwif->index); 629 ide_unregister(hwif);
674 630
675 iounmap((void *)ahwif->regbase); 631 iounmap((void *)ahwif->regbase);
676 632
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 956259fc09ba..68947626e4aa 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -76,17 +76,12 @@ static int __devinit swarm_ide_probe(struct device *dev)
76 if (!SIBYTE_HAVE_IDE) 76 if (!SIBYTE_HAVE_IDE)
77 return -ENODEV; 77 return -ENODEV;
78 78
79 /* Find an empty slot. */ 79 hwif = ide_find_port();
80 for (i = 0; i < MAX_HWIFS; i++) 80 if (hwif == NULL) {
81 if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET])
82 break;
83 if (i >= MAX_HWIFS) {
84 printk(KERN_ERR DRV_NAME ": no free slot for interface\n"); 81 printk(KERN_ERR DRV_NAME ": no free slot for interface\n");
85 return -ENOMEM; 82 return -ENOMEM;
86 } 83 }
87 84
88 hwif = ide_hwifs + i;
89
90 base = ioremap(A_IO_EXT_BASE, 0x800); 85 base = ioremap(A_IO_EXT_BASE, 0x800);
91 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); 86 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
92 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); 87 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
@@ -115,15 +110,13 @@ static int __devinit swarm_ide_probe(struct device *dev)
115 110
116 /* Setup MMIO ops. */ 111 /* Setup MMIO ops. */
117 default_hwif_mmiops(hwif); 112 default_hwif_mmiops(hwif);
118 /* Prevent resource map manipulation. */ 113
119 hwif->mmio = 1;
120 hwif->chipset = ide_generic; 114 hwif->chipset = ide_generic;
121 hwif->noprobe = 0;
122 115
123 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 116 for (i = 0; i <= 7; i++)
124 hwif->io_ports[i] = 117 hwif->io_ports_array[i] =
125 (unsigned long)(base + ((0x1f0 + i) << 5)); 118 (unsigned long)(base + ((0x1f0 + i) << 5));
126 hwif->io_ports[IDE_CONTROL_OFFSET] = 119 hwif->io_ports.ctl_addr =
127 (unsigned long)(base + (0x3f6 << 5)); 120 (unsigned long)(base + (0x3f6 << 5));
128 hwif->irq = K_INT_GB_IDE; 121 hwif->irq = K_INT_GB_IDE;
129 122
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index cfb3265bc1a8..7f46c224b7c4 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -135,12 +135,12 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
135 135
136static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) 136static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
137{ 137{
138 drive->hwif->set_dma_mode(drive, pio + XFER_PIO_0); 138 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
139} 139}
140 140
141static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) 141static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
142{ 142{
143 int bus_speed = system_bus_clock(); 143 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
144 144
145 if (bus_speed <= 33) 145 if (bus_speed <= 33)
146 pci_set_drvdata(dev, (void *) aec6xxx_33_base); 146 pci_set_drvdata(dev, (void *) aec6xxx_33_base);
@@ -175,27 +175,23 @@ static u8 __devinit atp86x_cable_detect(ide_hwif_t *hwif)
175 return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 175 return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
176} 176}
177 177
178static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) 178static const struct ide_port_ops atp850_port_ops = {
179{ 179 .set_pio_mode = aec_set_pio_mode,
180 struct pci_dev *dev = to_pci_dev(hwif->dev); 180 .set_dma_mode = aec6210_set_mode,
181 181};
182 hwif->set_pio_mode = &aec_set_pio_mode;
183
184 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
185 hwif->set_dma_mode = &aec6210_set_mode;
186 else {
187 hwif->set_dma_mode = &aec6260_set_mode;
188 182
189 hwif->cable_detect = atp86x_cable_detect; 183static const struct ide_port_ops atp86x_port_ops = {
190 } 184 .set_pio_mode = aec_set_pio_mode,
191} 185 .set_dma_mode = aec6260_set_mode,
186 .cable_detect = atp86x_cable_detect,
187};
192 188
193static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { 189static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
194 { /* 0 */ 190 { /* 0 */
195 .name = "AEC6210", 191 .name = "AEC6210",
196 .init_chipset = init_chipset_aec62xx, 192 .init_chipset = init_chipset_aec62xx,
197 .init_hwif = init_hwif_aec62xx,
198 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 193 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
194 .port_ops = &atp850_port_ops,
199 .host_flags = IDE_HFLAG_SERIALIZE | 195 .host_flags = IDE_HFLAG_SERIALIZE |
200 IDE_HFLAG_NO_ATAPI_DMA | 196 IDE_HFLAG_NO_ATAPI_DMA |
201 IDE_HFLAG_NO_DSC | 197 IDE_HFLAG_NO_DSC |
@@ -207,7 +203,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
207 },{ /* 1 */ 203 },{ /* 1 */
208 .name = "AEC6260", 204 .name = "AEC6260",
209 .init_chipset = init_chipset_aec62xx, 205 .init_chipset = init_chipset_aec62xx,
210 .init_hwif = init_hwif_aec62xx, 206 .port_ops = &atp86x_port_ops,
211 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | 207 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
212 IDE_HFLAG_ABUSE_SET_DMA_MODE | 208 IDE_HFLAG_ABUSE_SET_DMA_MODE |
213 IDE_HFLAG_OFF_BOARD, 209 IDE_HFLAG_OFF_BOARD,
@@ -217,17 +213,18 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
217 },{ /* 2 */ 213 },{ /* 2 */
218 .name = "AEC6260R", 214 .name = "AEC6260R",
219 .init_chipset = init_chipset_aec62xx, 215 .init_chipset = init_chipset_aec62xx,
220 .init_hwif = init_hwif_aec62xx,
221 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 216 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
217 .port_ops = &atp86x_port_ops,
222 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 218 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
223 IDE_HFLAG_ABUSE_SET_DMA_MODE, 219 IDE_HFLAG_ABUSE_SET_DMA_MODE |
220 IDE_HFLAG_NON_BOOTABLE,
224 .pio_mask = ATA_PIO4, 221 .pio_mask = ATA_PIO4,
225 .mwdma_mask = ATA_MWDMA2, 222 .mwdma_mask = ATA_MWDMA2,
226 .udma_mask = ATA_UDMA4, 223 .udma_mask = ATA_UDMA4,
227 },{ /* 3 */ 224 },{ /* 3 */
228 .name = "AEC6280", 225 .name = "AEC6280",
229 .init_chipset = init_chipset_aec62xx, 226 .init_chipset = init_chipset_aec62xx,
230 .init_hwif = init_hwif_aec62xx, 227 .port_ops = &atp86x_port_ops,
231 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 228 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
232 IDE_HFLAG_ABUSE_SET_DMA_MODE | 229 IDE_HFLAG_ABUSE_SET_DMA_MODE |
233 IDE_HFLAG_OFF_BOARD, 230 IDE_HFLAG_OFF_BOARD,
@@ -237,8 +234,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
237 },{ /* 4 */ 234 },{ /* 4 */
238 .name = "AEC6280R", 235 .name = "AEC6280R",
239 .init_chipset = init_chipset_aec62xx, 236 .init_chipset = init_chipset_aec62xx,
240 .init_hwif = init_hwif_aec62xx,
241 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 237 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
238 .port_ops = &atp86x_port_ops,
242 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 239 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
243 IDE_HFLAG_ABUSE_SET_DMA_MODE | 240 IDE_HFLAG_ABUSE_SET_DMA_MODE |
244 IDE_HFLAG_OFF_BOARD, 241 IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b3b6f514ce2d..b36a22b8c213 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -38,8 +38,6 @@
38 38
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DISPLAY_ALI_TIMINGS
42
43/* 41/*
44 * ALi devices are not plug in. Otherwise these static values would 42 * ALi devices are not plug in. Otherwise these static values would
45 * need to go. They ought to go away anyway 43 * need to go. They ought to go away anyway
@@ -49,236 +47,6 @@ static u8 m5229_revision;
49static u8 chip_is_1543c_e; 47static u8 chip_is_1543c_e;
50static struct pci_dev *isa_dev; 48static struct pci_dev *isa_dev;
51 49
52#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
53#include <linux/stat.h>
54#include <linux/proc_fs.h>
55
56static u8 ali_proc = 0;
57
58static struct pci_dev *bmide_dev;
59
60static char *fifo[4] = {
61 "FIFO Off",
62 "FIFO On ",
63 "DMA mode",
64 "PIO mode" };
65
66static char *udmaT[8] = {
67 "1.5T",
68 " 2T",
69 "2.5T",
70 " 3T",
71 "3.5T",
72 " 4T",
73 " 6T",
74 " 8T"
75};
76
77static char *channel_status[8] = {
78 "OK ",
79 "busy ",
80 "DRQ ",
81 "DRQ busy ",
82 "error ",
83 "error busy ",
84 "error DRQ ",
85 "error DRQ busy"
86};
87
88/**
89 * ali_get_info - generate proc file for ALi IDE
90 * @buffer: buffer to fill
91 * @addr: address of user start in buffer
92 * @offset: offset into 'file'
93 * @count: buffer count
94 *
95 * Walks the Ali devices and outputs summary data on the tuning and
96 * anything else that will help with debugging
97 */
98
99static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
100{
101 unsigned long bibma;
102 u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp;
103 char *q, *p = buffer;
104
105 /* fetch rev. */
106 pci_read_config_byte(bmide_dev, 0x08, &rev);
107 if (rev >= 0xc1) /* M1543C or newer */
108 udmaT[7] = " ???";
109 else
110 fifo[3] = " ??? ";
111
112 /* first fetch bibma: */
113
114 bibma = pci_resource_start(bmide_dev, 4);
115
116 /*
117 * at that point bibma+0x2 et bibma+0xa are byte
118 * registers to investigate:
119 */
120 c0 = inb(bibma + 0x02);
121 c1 = inb(bibma + 0x0a);
122
123 p += sprintf(p,
124 "\n Ali M15x3 Chipset.\n");
125 p += sprintf(p,
126 " ------------------\n");
127 pci_read_config_byte(bmide_dev, 0x78, &reg53h);
128 p += sprintf(p, "PCI Clock: %d.\n", reg53h);
129
130 pci_read_config_byte(bmide_dev, 0x53, &reg53h);
131 p += sprintf(p,
132 "CD_ROM FIFO:%s, CD_ROM DMA:%s\n",
133 (reg53h & 0x02) ? "Yes" : "No ",
134 (reg53h & 0x01) ? "Yes" : "No " );
135 pci_read_config_byte(bmide_dev, 0x74, &reg53h);
136 p += sprintf(p,
137 "FIFO Status: contains %d Words, runs%s%s\n\n",
138 (reg53h & 0x3f),
139 (reg53h & 0x40) ? " OVERWR" : "",
140 (reg53h & 0x80) ? " OVERRD." : "." );
141
142 p += sprintf(p,
143 "-------------------primary channel"
144 "-------------------secondary channel"
145 "---------\n\n");
146
147 pci_read_config_byte(bmide_dev, 0x09, &reg53h);
148 p += sprintf(p,
149 "channel status: %s"
150 " %s\n",
151 (reg53h & 0x20) ? "On " : "Off",
152 (reg53h & 0x10) ? "On " : "Off" );
153
154 p += sprintf(p,
155 "both channels togth: %s"
156 " %s\n",
157 (c0&0x80) ? "No " : "Yes",
158 (c1&0x80) ? "No " : "Yes" );
159
160 pci_read_config_byte(bmide_dev, 0x76, &reg53h);
161 p += sprintf(p,
162 "Channel state: %s %s\n",
163 channel_status[reg53h & 0x07],
164 channel_status[(reg53h & 0x70) >> 4] );
165
166 pci_read_config_byte(bmide_dev, 0x58, &reg5xh);
167 pci_read_config_byte(bmide_dev, 0x5c, &reg5yh);
168 p += sprintf(p,
169 "Add. Setup Timing: %dT"
170 " %dT\n",
171 (reg5xh & 0x07) ? (reg5xh & 0x07) : 8,
172 (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 );
173
174 pci_read_config_byte(bmide_dev, 0x59, &reg5xh);
175 pci_read_config_byte(bmide_dev, 0x5d, &reg5yh);
176 p += sprintf(p,
177 "Command Act. Count: %dT"
178 " %dT\n"
179 "Command Rec. Count: %dT"
180 " %dT\n\n",
181 (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
182 (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
183 (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
184 (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 );
185
186 p += sprintf(p,
187 "----------------drive0-----------drive1"
188 "------------drive0-----------drive1------\n\n");
189 p += sprintf(p,
190 "DMA enabled: %s %s"
191 " %s %s\n",
192 (c0&0x20) ? "Yes" : "No ",
193 (c0&0x40) ? "Yes" : "No ",
194 (c1&0x20) ? "Yes" : "No ",
195 (c1&0x40) ? "Yes" : "No " );
196
197 pci_read_config_byte(bmide_dev, 0x54, &reg5xh);
198 pci_read_config_byte(bmide_dev, 0x55, &reg5yh);
199 q = "FIFO threshold: %2d Words %2d Words"
200 " %2d Words %2d Words\n";
201 if (rev < 0xc1) {
202 if ((rev == 0x20) &&
203 (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) {
204 p += sprintf(p, q, 8, 8, 8, 8);
205 } else {
206 p += sprintf(p, q,
207 (reg5xh & 0x03) + 12,
208 ((reg5xh & 0x30)>>4) + 12,
209 (reg5yh & 0x03) + 12,
210 ((reg5yh & 0x30)>>4) + 12 );
211 }
212 } else {
213 int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4;
214 int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4;
215 int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4;
216 int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4;
217 p += sprintf(p, q, t1, t2, t3, t4);
218 }
219
220#if 0
221 p += sprintf(p,
222 "FIFO threshold: %2d Words %2d Words"
223 " %2d Words %2d Words\n",
224 (reg5xh & 0x03) + 12,
225 ((reg5xh & 0x30)>>4) + 12,
226 (reg5yh & 0x03) + 12,
227 ((reg5yh & 0x30)>>4) + 12 );
228#endif
229
230 p += sprintf(p,
231 "FIFO mode: %s %s %s %s\n",
232 fifo[((reg5xh & 0x0c) >> 2)],
233 fifo[((reg5xh & 0xc0) >> 6)],
234 fifo[((reg5yh & 0x0c) >> 2)],
235 fifo[((reg5yh & 0xc0) >> 6)] );
236
237 pci_read_config_byte(bmide_dev, 0x5a, &reg5xh);
238 pci_read_config_byte(bmide_dev, 0x5b, &reg5xh1);
239 pci_read_config_byte(bmide_dev, 0x5e, &reg5yh);
240 pci_read_config_byte(bmide_dev, 0x5f, &reg5yh1);
241
242 p += sprintf(p,/*
243 "------------------drive0-----------drive1"
244 "------------drive0-----------drive1------\n")*/
245 "Dt RW act. Cnt %2dT %2dT"
246 " %2dT %2dT\n"
247 "Dt RW rec. Cnt %2dT %2dT"
248 " %2dT %2dT\n\n",
249 (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
250 (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8,
251 (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
252 (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8,
253 (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
254 (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16,
255 (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16,
256 (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 );
257
258 p += sprintf(p,
259 "-----------------------------------UDMA Timings"
260 "--------------------------------\n\n");
261
262 pci_read_config_byte(bmide_dev, 0x56, &reg5xh);
263 pci_read_config_byte(bmide_dev, 0x57, &reg5yh);
264 p += sprintf(p,
265 "UDMA: %s %s"
266 " %s %s\n"
267 "UDMA timings: %s %s"
268 " %s %s\n\n",
269 (reg5xh & 0x08) ? "OK" : "No",
270 (reg5xh & 0x80) ? "OK" : "No",
271 (reg5yh & 0x08) ? "OK" : "No",
272 (reg5yh & 0x80) ? "OK" : "No",
273 udmaT[(reg5xh & 0x07)],
274 udmaT[(reg5xh & 0x70) >> 4],
275 udmaT[reg5yh & 0x07],
276 udmaT[(reg5yh & 0x70) >> 4] );
277
278 return p-buffer; /* => must be less than 4k! */
279}
280#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
281
282/** 50/**
283 * ali_set_pio_mode - set host controller for PIO mode 51 * ali_set_pio_mode - set host controller for PIO mode
284 * @drive: drive 52 * @drive: drive
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
294 int s_time, a_time, c_time; 62 int s_time, a_time, c_time;
295 u8 s_clc, a_clc, r_clc; 63 u8 s_clc, a_clc, r_clc;
296 unsigned long flags; 64 unsigned long flags;
297 int bus_speed = system_bus_clock(); 65 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
298 int port = hwif->channel ? 0x5c : 0x58; 66 int port = hwif->channel ? 0x5c : 0x58;
299 int portFIFO = hwif->channel ? 0x55 : 0x54; 67 int portFIFO = hwif->channel ? 0x55 : 0x54;
300 u8 cd_dma_fifo = 0; 68 u8 cd_dma_fifo = 0;
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
465 233
466 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 234 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
467 235
468#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
469 if (!ali_proc) {
470 ali_proc = 1;
471 bmide_dev = dev;
472 ide_pci_create_host_proc("ali", ali_get_info);
473 }
474#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
475
476 local_irq_save(flags); 236 local_irq_save(flags);
477 237
478 if (m5229_revision < 0xC2) { 238 if (m5229_revision < 0xC2) {
@@ -610,7 +370,7 @@ static int ali_cable_override(struct pci_dev *pdev)
610} 370}
611 371
612/** 372/**
613 * ata66_ali15x3 - check for UDMA 66 support 373 * ali_cable_detect - cable detection
614 * @hwif: IDE interface 374 * @hwif: IDE interface
615 * 375 *
616 * This checks if the controller and the cable are capable 376 * This checks if the controller and the cable are capable
@@ -620,7 +380,7 @@ static int ali_cable_override(struct pci_dev *pdev)
620 * FIXME: frobs bits that are not defined on newer ALi devicea 380 * FIXME: frobs bits that are not defined on newer ALi devicea
621 */ 381 */
622 382
623static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif) 383static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
624{ 384{
625 struct pci_dev *dev = to_pci_dev(hwif->dev); 385 struct pci_dev *dev = to_pci_dev(hwif->dev);
626 unsigned long flags; 386 unsigned long flags;
@@ -652,27 +412,7 @@ static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
652 return cbl; 412 return cbl;
653} 413}
654 414
655/** 415#ifndef CONFIG_SPARC64
656 * init_hwif_common_ali15x3 - Set up ALI IDE hardware
657 * @hwif: IDE interface
658 *
659 * Initialize the IDE structure side of the ALi 15x3 driver.
660 */
661
662static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
663{
664 hwif->set_pio_mode = &ali_set_pio_mode;
665 hwif->set_dma_mode = &ali_set_dma_mode;
666 hwif->udma_filter = &ali_udma_filter;
667
668 hwif->cable_detect = ata66_ali15x3;
669
670 if (hwif->dma_base == 0)
671 return;
672
673 hwif->dma_setup = &ali15x3_dma_setup;
674}
675
676/** 416/**
677 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff 417 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
678 * @hwif: interface to configure 418 * @hwif: interface to configure
@@ -722,35 +462,66 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
722 if(irq >= 0) 462 if(irq >= 0)
723 hwif->irq = irq; 463 hwif->irq = irq;
724 } 464 }
725
726 init_hwif_common_ali15x3(hwif);
727} 465}
466#endif
728 467
729/** 468/**
730 * init_dma_ali15x3 - set up DMA on ALi15x3 469 * init_dma_ali15x3 - set up DMA on ALi15x3
731 * @hwif: IDE interface 470 * @hwif: IDE interface
732 * @dmabase: DMA interface base PCI address 471 * @d: IDE port info
733 * 472 *
734 * Set up the DMA functionality on the ALi 15x3. For the ALi 473 * Set up the DMA functionality on the ALi 15x3.
735 * controllers this is generic so we can let the generic code do
736 * the actual work.
737 */ 474 */
738 475
739static void __devinit init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase) 476static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
477 const struct ide_port_info *d)
740{ 478{
741 if (m5229_revision < 0x20) 479 struct pci_dev *dev = to_pci_dev(hwif->dev);
742 return; 480 unsigned long base = ide_pci_dma_base(hwif, d);
481
482 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
483 return -1;
484
743 if (!hwif->channel) 485 if (!hwif->channel)
744 outb(inb(dmabase + 2) & 0x60, dmabase + 2); 486 outb(inb(base + 2) & 0x60, base + 2);
745 ide_setup_dma(hwif, dmabase); 487
488 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
489 hwif->name, base, base + 7);
490
491 if (ide_allocate_dma_engine(hwif))
492 return -1;
493
494 ide_setup_dma(hwif, base);
495
496 return 0;
746} 497}
747 498
499static const struct ide_port_ops ali_port_ops = {
500 .set_pio_mode = ali_set_pio_mode,
501 .set_dma_mode = ali_set_dma_mode,
502 .udma_filter = ali_udma_filter,
503 .cable_detect = ali_cable_detect,
504};
505
506static const struct ide_dma_ops ali_dma_ops = {
507 .dma_host_set = ide_dma_host_set,
508 .dma_setup = ali15x3_dma_setup,
509 .dma_exec_cmd = ide_dma_exec_cmd,
510 .dma_start = ide_dma_start,
511 .dma_end = __ide_dma_end,
512 .dma_test_irq = ide_dma_test_irq,
513 .dma_lost_irq = ide_dma_lost_irq,
514 .dma_timeout = ide_dma_timeout,
515};
516
748static const struct ide_port_info ali15x3_chipset __devinitdata = { 517static const struct ide_port_info ali15x3_chipset __devinitdata = {
749 .name = "ALI15X3", 518 .name = "ALI15X3",
750 .init_chipset = init_chipset_ali15x3, 519 .init_chipset = init_chipset_ali15x3,
520#ifndef CONFIG_SPARC64
751 .init_hwif = init_hwif_ali15x3, 521 .init_hwif = init_hwif_ali15x3,
522#endif
752 .init_dma = init_dma_ali15x3, 523 .init_dma = init_dma_ali15x3,
753 .host_flags = IDE_HFLAG_BOOTABLE, 524 .port_ops = &ali_port_ops,
754 .pio_mask = ATA_PIO5, 525 .pio_mask = ATA_PIO5,
755 .swdma_mask = ATA_SWDMA2, 526 .swdma_mask = ATA_SWDMA2,
756 .mwdma_mask = ATA_MWDMA2, 527 .mwdma_mask = ATA_MWDMA2,
@@ -793,14 +564,17 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
793 d.udma_mask = ATA_UDMA5; 564 d.udma_mask = ATA_UDMA5;
794 else 565 else
795 d.udma_mask = ATA_UDMA6; 566 d.udma_mask = ATA_UDMA6;
567
568 d.dma_ops = &ali_dma_ops;
569 } else {
570 d.host_flags |= IDE_HFLAG_NO_DMA;
571
572 d.mwdma_mask = d.swdma_mask = 0;
796 } 573 }
797 574
798 if (idx == 0) 575 if (idx == 0)
799 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; 576 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
800 577
801#if defined(CONFIG_SPARC64)
802 d.init_hwif = init_hwif_common_ali15x3;
803#endif /* CONFIG_SPARC64 */
804 return ide_setup_pci_device(dev, &d); 578 return ide_setup_pci_device(dev, &d);
805} 579}
806 580
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 2ef890ce8097..efcf54338be7 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
179 * Determine the system bus clock. 179 * Determine the system bus clock.
180 */ 180 */
181 181
182 amd_clock = system_bus_clock() * 1000; 182 amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
183 183
184 switch (amd_clock) { 184 switch (amd_clock) {
185 case 33000: amd_clock = 33333; break; 185 case 33000: amd_clock = 33333; break;
@@ -210,21 +210,20 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
210 210
211 if (hwif->irq == 0) /* 0 is bogus but will do for now */ 211 if (hwif->irq == 0) /* 0 is bogus but will do for now */
212 hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel); 212 hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
213
214 hwif->set_pio_mode = &amd_set_pio_mode;
215 hwif->set_dma_mode = &amd_set_drive;
216
217 hwif->cable_detect = amd_cable_detect;
218} 213}
219 214
215static const struct ide_port_ops amd_port_ops = {
216 .set_pio_mode = amd_set_pio_mode,
217 .set_dma_mode = amd_set_drive,
218 .cable_detect = amd_cable_detect,
219};
220
220#define IDE_HFLAGS_AMD \ 221#define IDE_HFLAGS_AMD \
221 (IDE_HFLAG_PIO_NO_BLACKLIST | \ 222 (IDE_HFLAG_PIO_NO_BLACKLIST | \
222 IDE_HFLAG_PIO_NO_DOWNGRADE | \
223 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 223 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
224 IDE_HFLAG_POST_SET_MODE | \ 224 IDE_HFLAG_POST_SET_MODE | \
225 IDE_HFLAG_IO_32BIT | \ 225 IDE_HFLAG_IO_32BIT | \
226 IDE_HFLAG_UNMASK_IRQS | \ 226 IDE_HFLAG_UNMASK_IRQS)
227 IDE_HFLAG_BOOTABLE)
228 227
229#define DECLARE_AMD_DEV(name_str, swdma, udma) \ 228#define DECLARE_AMD_DEV(name_str, swdma, udma) \
230 { \ 229 { \
@@ -232,6 +231,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
232 .init_chipset = init_chipset_amd74xx, \ 231 .init_chipset = init_chipset_amd74xx, \
233 .init_hwif = init_hwif_amd74xx, \ 232 .init_hwif = init_hwif_amd74xx, \
234 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \ 233 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
234 .port_ops = &amd_port_ops, \
235 .host_flags = IDE_HFLAGS_AMD, \ 235 .host_flags = IDE_HFLAGS_AMD, \
236 .pio_mask = ATA_PIO5, \ 236 .pio_mask = ATA_PIO5, \
237 .swdma_mask = swdma, \ 237 .swdma_mask = swdma, \
@@ -245,6 +245,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
245 .init_chipset = init_chipset_amd74xx, \ 245 .init_chipset = init_chipset_amd74xx, \
246 .init_hwif = init_hwif_amd74xx, \ 246 .init_hwif = init_hwif_amd74xx, \
247 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \ 247 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
248 .port_ops = &amd_port_ops, \
248 .host_flags = IDE_HFLAGS_AMD, \ 249 .host_flags = IDE_HFLAGS_AMD, \
249 .pio_mask = ATA_PIO5, \ 250 .pio_mask = ATA_PIO5, \
250 .swdma_mask = ATA_SWDMA2, \ 251 .swdma_mask = ATA_SWDMA2, \
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 7e037c880cb0..8b637181681a 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -130,37 +130,26 @@ static u8 __devinit atiixp_cable_detect(ide_hwif_t *hwif)
130 return ATA_CBL_PATA40; 130 return ATA_CBL_PATA40;
131} 131}
132 132
133/** 133static const struct ide_port_ops atiixp_port_ops = {
134 * init_hwif_atiixp - fill in the hwif for the ATIIXP 134 .set_pio_mode = atiixp_set_pio_mode,
135 * @hwif: IDE interface 135 .set_dma_mode = atiixp_set_dma_mode,
136 * 136 .cable_detect = atiixp_cable_detect,
137 * Set up the ide_hwif_t for the ATIIXP interface according to the 137};
138 * capabilities of the hardware.
139 */
140
141static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
142{
143 hwif->set_pio_mode = &atiixp_set_pio_mode;
144 hwif->set_dma_mode = &atiixp_set_dma_mode;
145
146 hwif->cable_detect = atiixp_cable_detect;
147}
148 138
149static const struct ide_port_info atiixp_pci_info[] __devinitdata = { 139static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
150 { /* 0 */ 140 { /* 0 */
151 .name = "ATIIXP", 141 .name = "ATIIXP",
152 .init_hwif = init_hwif_atiixp,
153 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, 142 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
154 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, 143 .port_ops = &atiixp_port_ops,
144 .host_flags = IDE_HFLAG_LEGACY_IRQS,
155 .pio_mask = ATA_PIO4, 145 .pio_mask = ATA_PIO4,
156 .mwdma_mask = ATA_MWDMA2, 146 .mwdma_mask = ATA_MWDMA2,
157 .udma_mask = ATA_UDMA5, 147 .udma_mask = ATA_UDMA5,
158 },{ /* 1 */ 148 },{ /* 1 */
159 .name = "SB600_PATA", 149 .name = "SB600_PATA",
160 .init_hwif = init_hwif_atiixp,
161 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}}, 150 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
162 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS | 151 .port_ops = &atiixp_port_ops,
163 IDE_HFLAG_BOOTABLE, 152 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
164 .pio_mask = ATA_PIO4, 153 .pio_mask = ATA_PIO4,
165 .mwdma_mask = ATA_MWDMA2, 154 .mwdma_mask = ATA_MWDMA2,
166 .udma_mask = ATA_UDMA5, 155 .udma_mask = ATA_UDMA5,
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index a1cfe033a55f..aaf38109eaec 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -4,7 +4,7 @@
4 4
5/* 5/*
6 * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov) 6 * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov)
7 * mlord@pobox.com (Mark Lord) 7 * mlord@pobox.com (Mark Lord)
8 * 8 *
9 * See linux/MAINTAINERS for address of current maintainer. 9 * See linux/MAINTAINERS for address of current maintainer.
10 * 10 *
@@ -98,7 +98,7 @@
98 98
99#define CMD640_PREFETCH_MASKS 1 99#define CMD640_PREFETCH_MASKS 1
100 100
101//#define CMD640_DUMP_REGS 101/*#define CMD640_DUMP_REGS */
102 102
103#include <linux/types.h> 103#include <linux/types.h>
104#include <linux/kernel.h> 104#include <linux/kernel.h>
@@ -109,10 +109,9 @@
109 109
110#include <asm/io.h> 110#include <asm/io.h>
111 111
112/* 112#define DRV_NAME "cmd640"
113 * This flag is set in ide.c by the parameter: ide0=cmd640_vlb 113
114 */ 114static int cmd640_vlb;
115int cmd640_vlb = 0;
116 115
117/* 116/*
118 * CMD640 specific registers definition. 117 * CMD640 specific registers definition.
@@ -185,7 +184,6 @@ static DEFINE_SPINLOCK(cmd640_lock);
185 * These are initialized to point at the devices we control 184 * These are initialized to point at the devices we control
186 */ 185 */
187static ide_hwif_t *cmd_hwif0, *cmd_hwif1; 186static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
188static ide_drive_t *cmd_drives[4];
189 187
190/* 188/*
191 * Interface to access cmd640x registers 189 * Interface to access cmd640x registers
@@ -207,13 +205,13 @@ static unsigned int cmd640_chip_version;
207 205
208/* PCI method 1 access */ 206/* PCI method 1 access */
209 207
210static void put_cmd640_reg_pci1 (u16 reg, u8 val) 208static void put_cmd640_reg_pci1(u16 reg, u8 val)
211{ 209{
212 outl_p((reg & 0xfc) | cmd640_key, 0xcf8); 210 outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
213 outb_p(val, (reg & 3) | 0xcfc); 211 outb_p(val, (reg & 3) | 0xcfc);
214} 212}
215 213
216static u8 get_cmd640_reg_pci1 (u16 reg) 214static u8 get_cmd640_reg_pci1(u16 reg)
217{ 215{
218 outl_p((reg & 0xfc) | cmd640_key, 0xcf8); 216 outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
219 return inb_p((reg & 3) | 0xcfc); 217 return inb_p((reg & 3) | 0xcfc);
@@ -221,14 +219,14 @@ static u8 get_cmd640_reg_pci1 (u16 reg)
221 219
222/* PCI method 2 access (from CMD datasheet) */ 220/* PCI method 2 access (from CMD datasheet) */
223 221
224static void put_cmd640_reg_pci2 (u16 reg, u8 val) 222static void put_cmd640_reg_pci2(u16 reg, u8 val)
225{ 223{
226 outb_p(0x10, 0xcf8); 224 outb_p(0x10, 0xcf8);
227 outb_p(val, cmd640_key + reg); 225 outb_p(val, cmd640_key + reg);
228 outb_p(0, 0xcf8); 226 outb_p(0, 0xcf8);
229} 227}
230 228
231static u8 get_cmd640_reg_pci2 (u16 reg) 229static u8 get_cmd640_reg_pci2(u16 reg)
232{ 230{
233 u8 b; 231 u8 b;
234 232
@@ -240,13 +238,13 @@ static u8 get_cmd640_reg_pci2 (u16 reg)
240 238
241/* VLB access */ 239/* VLB access */
242 240
243static void put_cmd640_reg_vlb (u16 reg, u8 val) 241static void put_cmd640_reg_vlb(u16 reg, u8 val)
244{ 242{
245 outb_p(reg, cmd640_key); 243 outb_p(reg, cmd640_key);
246 outb_p(val, cmd640_key + 4); 244 outb_p(val, cmd640_key + 4);
247} 245}
248 246
249static u8 get_cmd640_reg_vlb (u16 reg) 247static u8 get_cmd640_reg_vlb(u16 reg)
250{ 248{
251 outb_p(reg, cmd640_key); 249 outb_p(reg, cmd640_key);
252 return inb_p(cmd640_key + 4); 250 return inb_p(cmd640_key + 4);
@@ -268,11 +266,11 @@ static void put_cmd640_reg(u16 reg, u8 val)
268 unsigned long flags; 266 unsigned long flags;
269 267
270 spin_lock_irqsave(&cmd640_lock, flags); 268 spin_lock_irqsave(&cmd640_lock, flags);
271 __put_cmd640_reg(reg,val); 269 __put_cmd640_reg(reg, val);
272 spin_unlock_irqrestore(&cmd640_lock, flags); 270 spin_unlock_irqrestore(&cmd640_lock, flags);
273} 271}
274 272
275static int __init match_pci_cmd640_device (void) 273static int __init match_pci_cmd640_device(void)
276{ 274{
277 const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06}; 275 const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
278 unsigned int i; 276 unsigned int i;
@@ -292,7 +290,7 @@ static int __init match_pci_cmd640_device (void)
292/* 290/*
293 * Probe for CMD640x -- pci method 1 291 * Probe for CMD640x -- pci method 1
294 */ 292 */
295static int __init probe_for_cmd640_pci1 (void) 293static int __init probe_for_cmd640_pci1(void)
296{ 294{
297 __get_cmd640_reg = get_cmd640_reg_pci1; 295 __get_cmd640_reg = get_cmd640_reg_pci1;
298 __put_cmd640_reg = put_cmd640_reg_pci1; 296 __put_cmd640_reg = put_cmd640_reg_pci1;
@@ -308,7 +306,7 @@ static int __init probe_for_cmd640_pci1 (void)
308/* 306/*
309 * Probe for CMD640x -- pci method 2 307 * Probe for CMD640x -- pci method 2
310 */ 308 */
311static int __init probe_for_cmd640_pci2 (void) 309static int __init probe_for_cmd640_pci2(void)
312{ 310{
313 __get_cmd640_reg = get_cmd640_reg_pci2; 311 __get_cmd640_reg = get_cmd640_reg_pci2;
314 __put_cmd640_reg = put_cmd640_reg_pci2; 312 __put_cmd640_reg = put_cmd640_reg_pci2;
@@ -322,7 +320,7 @@ static int __init probe_for_cmd640_pci2 (void)
322/* 320/*
323 * Probe for CMD640x -- vlb 321 * Probe for CMD640x -- vlb
324 */ 322 */
325static int __init probe_for_cmd640_vlb (void) 323static int __init probe_for_cmd640_vlb(void)
326{ 324{
327 u8 b; 325 u8 b;
328 326
@@ -343,18 +341,18 @@ static int __init probe_for_cmd640_vlb (void)
343 * Returns 1 if an IDE interface/drive exists at 0x170, 341 * Returns 1 if an IDE interface/drive exists at 0x170,
344 * Returns 0 otherwise. 342 * Returns 0 otherwise.
345 */ 343 */
346static int __init secondary_port_responding (void) 344static int __init secondary_port_responding(void)
347{ 345{
348 unsigned long flags; 346 unsigned long flags;
349 347
350 spin_lock_irqsave(&cmd640_lock, flags); 348 spin_lock_irqsave(&cmd640_lock, flags);
351 349
352 outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */ 350 outb_p(0x0a, 0x176); /* select drive0 */
353 udelay(100); 351 udelay(100);
354 if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) { 352 if ((inb_p(0x176) & 0x1f) != 0x0a) {
355 outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */ 353 outb_p(0x1a, 0x176); /* select drive1 */
356 udelay(100); 354 udelay(100);
357 if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) { 355 if ((inb_p(0x176) & 0x1f) != 0x1a) {
358 spin_unlock_irqrestore(&cmd640_lock, flags); 356 spin_unlock_irqrestore(&cmd640_lock, flags);
359 return 0; /* nothing responded */ 357 return 0; /* nothing responded */
360 } 358 }
@@ -367,7 +365,7 @@ static int __init secondary_port_responding (void)
367/* 365/*
368 * Dump out all cmd640 registers. May be called from ide.c 366 * Dump out all cmd640 registers. May be called from ide.c
369 */ 367 */
370static void cmd640_dump_regs (void) 368static void cmd640_dump_regs(void)
371{ 369{
372 unsigned int reg = cmd640_vlb ? 0x50 : 0x00; 370 unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
373 371
@@ -382,13 +380,13 @@ static void cmd640_dump_regs (void)
382} 380}
383#endif 381#endif
384 382
383#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
385/* 384/*
386 * Check whether prefetch is on for a drive, 385 * Check whether prefetch is on for a drive,
387 * and initialize the unmask flags for safe operation. 386 * and initialize the unmask flags for safe operation.
388 */ 387 */
389static void __init check_prefetch (unsigned int index) 388static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
390{ 389{
391 ide_drive_t *drive = cmd_drives[index];
392 u8 b = get_cmd640_reg(prefetch_regs[index]); 390 u8 b = get_cmd640_reg(prefetch_regs[index]);
393 391
394 if (b & prefetch_masks[index]) { /* is prefetch off? */ 392 if (b & prefetch_masks[index]) { /* is prefetch off? */
@@ -403,29 +401,12 @@ static void __init check_prefetch (unsigned int index)
403 drive->no_io_32bit = 0; 401 drive->no_io_32bit = 0;
404 } 402 }
405} 403}
406 404#else
407/*
408 * Figure out which devices we control
409 */
410static void __init setup_device_ptrs (void)
411{
412 cmd_hwif0 = &ide_hwifs[0];
413 cmd_hwif1 = &ide_hwifs[1];
414
415 cmd_drives[0] = &cmd_hwif0->drives[0];
416 cmd_drives[1] = &cmd_hwif0->drives[1];
417 cmd_drives[2] = &cmd_hwif1->drives[0];
418 cmd_drives[3] = &cmd_hwif1->drives[1];
419}
420
421#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
422
423/* 405/*
424 * Sets prefetch mode for a drive. 406 * Sets prefetch mode for a drive.
425 */ 407 */
426static void set_prefetch_mode (unsigned int index, int mode) 408static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode)
427{ 409{
428 ide_drive_t *drive = cmd_drives[index];
429 unsigned long flags; 410 unsigned long flags;
430 int reg = prefetch_regs[index]; 411 int reg = prefetch_regs[index];
431 u8 b; 412 u8 b;
@@ -452,7 +433,7 @@ static void set_prefetch_mode (unsigned int index, int mode)
452/* 433/*
453 * Dump out current drive clocks settings 434 * Dump out current drive clocks settings
454 */ 435 */
455static void display_clocks (unsigned int index) 436static void display_clocks(unsigned int index)
456{ 437{
457 u8 active_count, recovery_count; 438 u8 active_count, recovery_count;
458 439
@@ -471,44 +452,16 @@ static void display_clocks (unsigned int index)
471 * Pack active and recovery counts into single byte representation 452 * Pack active and recovery counts into single byte representation
472 * used by controller 453 * used by controller
473 */ 454 */
474static inline u8 pack_nibbles (u8 upper, u8 lower) 455static inline u8 pack_nibbles(u8 upper, u8 lower)
475{ 456{
476 return ((upper & 0x0f) << 4) | (lower & 0x0f); 457 return ((upper & 0x0f) << 4) | (lower & 0x0f);
477} 458}
478 459
479/* 460/*
480 * This routine retrieves the initial drive timings from the chipset.
481 */
482static void __init retrieve_drive_counts (unsigned int index)
483{
484 u8 b;
485
486 /*
487 * Get the internal setup timing, and convert to clock count
488 */
489 b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
490 switch (b) {
491 case 0x00: b = 4; break;
492 case 0x80: b = 3; break;
493 case 0x40: b = 2; break;
494 default: b = 5; break;
495 }
496 setup_counts[index] = b;
497
498 /*
499 * Get the active/recovery counts
500 */
501 b = get_cmd640_reg(drwtim_regs[index]);
502 active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
503 recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
504}
505
506
507/*
508 * This routine writes the prepared setup/active/recovery counts 461 * This routine writes the prepared setup/active/recovery counts
509 * for a drive into the cmd640 chipset registers to active them. 462 * for a drive into the cmd640 chipset registers to active them.
510 */ 463 */
511static void program_drive_counts (unsigned int index) 464static void program_drive_counts(ide_drive_t *drive, unsigned int index)
512{ 465{
513 unsigned long flags; 466 unsigned long flags;
514 u8 setup_count = setup_counts[index]; 467 u8 setup_count = setup_counts[index];
@@ -522,8 +475,11 @@ static void program_drive_counts (unsigned int index)
522 * so we merge the timings, using the slowest value for each timing. 475 * so we merge the timings, using the slowest value for each timing.
523 */ 476 */
524 if (index > 1) { 477 if (index > 1) {
525 unsigned int mate; 478 ide_hwif_t *hwif = drive->hwif;
526 if (cmd_drives[mate = index ^ 1]->present) { 479 ide_drive_t *peer = &hwif->drives[!drive->select.b.unit];
480 unsigned int mate = index ^ 1;
481
482 if (peer->present) {
527 if (setup_count < setup_counts[mate]) 483 if (setup_count < setup_counts[mate])
528 setup_count = setup_counts[mate]; 484 setup_count = setup_counts[mate];
529 if (active_count < active_counts[mate]) 485 if (active_count < active_counts[mate])
@@ -537,11 +493,11 @@ static void program_drive_counts (unsigned int index)
537 * Convert setup_count to internal chipset representation 493 * Convert setup_count to internal chipset representation
538 */ 494 */
539 switch (setup_count) { 495 switch (setup_count) {
540 case 4: setup_count = 0x00; break; 496 case 4: setup_count = 0x00; break;
541 case 3: setup_count = 0x80; break; 497 case 3: setup_count = 0x80; break;
542 case 1: 498 case 1:
543 case 2: setup_count = 0x40; break; 499 case 2: setup_count = 0x40; break;
544 default: setup_count = 0xc0; /* case 5 */ 500 default: setup_count = 0xc0; /* case 5 */
545 } 501 }
546 502
547 /* 503 /*
@@ -562,11 +518,19 @@ static void program_drive_counts (unsigned int index)
562/* 518/*
563 * Set a specific pio_mode for a drive 519 * Set a specific pio_mode for a drive
564 */ 520 */
565static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle_time) 521static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
522 u8 pio_mode, unsigned int cycle_time)
566{ 523{
567 int setup_time, active_time, recovery_time, clock_time; 524 int setup_time, active_time, recovery_time, clock_time;
568 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; 525 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
569 int bus_speed = system_bus_clock(); 526 int bus_speed;
527
528 if (cmd640_vlb && ide_vlb_clk)
529 bus_speed = ide_vlb_clk;
530 else if (!cmd640_vlb && ide_pci_clk)
531 bus_speed = ide_pci_clk;
532 else
533 bus_speed = system_bus_clock();
570 534
571 if (pio_mode > 5) 535 if (pio_mode > 5)
572 pio_mode = 5; 536 pio_mode = 5;
@@ -574,15 +538,15 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
574 active_time = ide_pio_timings[pio_mode].active_time; 538 active_time = ide_pio_timings[pio_mode].active_time;
575 recovery_time = cycle_time - (setup_time + active_time); 539 recovery_time = cycle_time - (setup_time + active_time);
576 clock_time = 1000 / bus_speed; 540 clock_time = 1000 / bus_speed;
577 cycle_count = (cycle_time + clock_time - 1) / clock_time; 541 cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
578 542
579 setup_count = (setup_time + clock_time - 1) / clock_time; 543 setup_count = DIV_ROUND_UP(setup_time, clock_time);
580 544
581 active_count = (active_time + clock_time - 1) / clock_time; 545 active_count = DIV_ROUND_UP(active_time, clock_time);
582 if (active_count < 2) 546 if (active_count < 2)
583 active_count = 2; /* minimum allowed by cmd640 */ 547 active_count = 2; /* minimum allowed by cmd640 */
584 548
585 recovery_count = (recovery_time + clock_time - 1) / clock_time; 549 recovery_count = DIV_ROUND_UP(recovery_time, clock_time);
586 recovery_count2 = cycle_count - (setup_count + active_count); 550 recovery_count2 = cycle_count - (setup_count + active_count);
587 if (recovery_count2 > recovery_count) 551 if (recovery_count2 > recovery_count)
588 recovery_count = recovery_count2; 552 recovery_count = recovery_count2;
@@ -611,7 +575,7 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
611 * 1) this is the wrong place to do it (proper is do_special() in ide.c) 575 * 1) this is the wrong place to do it (proper is do_special() in ide.c)
612 * 2) in practice this is rarely, if ever, necessary 576 * 2) in practice this is rarely, if ever, necessary
613 */ 577 */
614 program_drive_counts (index); 578 program_drive_counts(drive, index);
615} 579}
616 580
617static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) 581static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -619,32 +583,26 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
619 unsigned int index = 0, cycle_time; 583 unsigned int index = 0, cycle_time;
620 u8 b; 584 u8 b;
621 585
622 while (drive != cmd_drives[index]) {
623 if (++index > 3) {
624 printk(KERN_ERR "%s: bad news in %s\n",
625 drive->name, __FUNCTION__);
626 return;
627 }
628 }
629 switch (pio) { 586 switch (pio) {
630 case 6: /* set fast-devsel off */ 587 case 6: /* set fast-devsel off */
631 case 7: /* set fast-devsel on */ 588 case 7: /* set fast-devsel on */
632 b = get_cmd640_reg(CNTRL) & ~0x27; 589 b = get_cmd640_reg(CNTRL) & ~0x27;
633 if (pio & 1) 590 if (pio & 1)
634 b |= 0x27; 591 b |= 0x27;
635 put_cmd640_reg(CNTRL, b); 592 put_cmd640_reg(CNTRL, b);
636 printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, (pio & 1) ? "en" : "dis"); 593 printk("%s: %sabled cmd640 fast host timing (devsel)\n",
637 return; 594 drive->name, (pio & 1) ? "en" : "dis");
638 595 return;
639 case 8: /* set prefetch off */ 596 case 8: /* set prefetch off */
640 case 9: /* set prefetch on */ 597 case 9: /* set prefetch on */
641 set_prefetch_mode(index, pio & 1); 598 set_prefetch_mode(drive, index, pio & 1);
642 printk("%s: %sabled cmd640 prefetch\n", drive->name, (pio & 1) ? "en" : "dis"); 599 printk("%s: %sabled cmd640 prefetch\n",
643 return; 600 drive->name, (pio & 1) ? "en" : "dis");
601 return;
644 } 602 }
645 603
646 cycle_time = ide_pio_cycle_time(drive, pio); 604 cycle_time = ide_pio_cycle_time(drive, pio);
647 cmd640_set_mode(index, pio, cycle_time); 605 cmd640_set_mode(drive, index, pio, cycle_time);
648 606
649 printk("%s: selected cmd640 PIO mode%d (%dns)", 607 printk("%s: selected cmd640 PIO mode%d (%dns)",
650 drive->name, pio, cycle_time); 608 drive->name, pio, cycle_time);
@@ -652,6 +610,9 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
652 display_clocks(index); 610 display_clocks(index);
653} 611}
654 612
613static const struct ide_port_ops cmd640_port_ops = {
614 .set_pio_mode = cmd640_set_pio_mode,
615};
655#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 616#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
656 617
657static int pci_conf1(void) 618static int pci_conf1(void)
@@ -693,14 +654,32 @@ static const struct ide_port_info cmd640_port_info __initdata = {
693 .chipset = ide_cmd640, 654 .chipset = ide_cmd640,
694 .host_flags = IDE_HFLAG_SERIALIZE | 655 .host_flags = IDE_HFLAG_SERIALIZE |
695 IDE_HFLAG_NO_DMA | 656 IDE_HFLAG_NO_DMA |
696 IDE_HFLAG_NO_AUTOTUNE |
697 IDE_HFLAG_ABUSE_PREFETCH | 657 IDE_HFLAG_ABUSE_PREFETCH |
698 IDE_HFLAG_ABUSE_FAST_DEVSEL, 658 IDE_HFLAG_ABUSE_FAST_DEVSEL,
699#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 659#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
660 .port_ops = &cmd640_port_ops,
700 .pio_mask = ATA_PIO5, 661 .pio_mask = ATA_PIO5,
701#endif 662#endif
702}; 663};
703 664
665static int cmd640x_init_one(unsigned long base, unsigned long ctl)
666{
667 if (!request_region(base, 8, DRV_NAME)) {
668 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
669 DRV_NAME, base, base + 7);
670 return -EBUSY;
671 }
672
673 if (!request_region(ctl, 1, DRV_NAME)) {
674 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
675 DRV_NAME, ctl);
676 release_region(base, 8);
677 return -EBUSY;
678 }
679
680 return 0;
681}
682
704/* 683/*
705 * Probe for a cmd640 chipset, and initialize it if found. 684 * Probe for a cmd640 chipset, and initialize it if found.
706 */ 685 */
@@ -709,7 +688,7 @@ static int __init cmd640x_init(void)
709#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 688#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
710 int second_port_toggled = 0; 689 int second_port_toggled = 0;
711#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 690#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
712 int second_port_cmd640 = 0; 691 int second_port_cmd640 = 0, rc;
713 const char *bus_type, *port2; 692 const char *bus_type, *port2;
714 unsigned int index; 693 unsigned int index;
715 u8 b, cfr; 694 u8 b, cfr;
@@ -749,10 +728,21 @@ static int __init cmd640x_init(void)
749 cfr = get_cmd640_reg(CFR); 728 cfr = get_cmd640_reg(CFR);
750 cmd640_chip_version = cfr & CFR_DEVREV; 729 cmd640_chip_version = cfr & CFR_DEVREV;
751 if (cmd640_chip_version == 0) { 730 if (cmd640_chip_version == 0) {
752 printk ("ide: bad cmd640 revision: %d\n", cmd640_chip_version); 731 printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
753 return 0; 732 return 0;
754 } 733 }
755 734
735 rc = cmd640x_init_one(0x1f0, 0x3f6);
736 if (rc)
737 return rc;
738
739 rc = cmd640x_init_one(0x170, 0x376);
740 if (rc) {
741 release_region(0x3f6, 1);
742 release_region(0x1f0, 8);
743 return rc;
744 }
745
756 memset(&hw, 0, sizeof(hw)); 746 memset(&hw, 0, sizeof(hw));
757 747
758 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 748 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
@@ -764,17 +754,15 @@ static int __init cmd640x_init(void)
764 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 754 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
765 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 755 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
766 756
757 cmd_hwif0 = ide_find_port();
758
767 /* 759 /*
768 * Initialize data for primary port 760 * Initialize data for primary port
769 */ 761 */
770 setup_device_ptrs (); 762 if (cmd_hwif0) {
771 763 ide_init_port_hw(cmd_hwif0, &hw[0]);
772 ide_init_port_hw(cmd_hwif0, &hw[0]); 764 idx[0] = cmd_hwif0->index;
773#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 765 }
774 cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
775#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
776
777 idx[0] = cmd_hwif0->index;
778 766
779 /* 767 /*
780 * Ensure compatibility by always using the slowest timings 768 * Ensure compatibility by always using the slowest timings
@@ -786,10 +774,13 @@ static int __init cmd640x_init(void)
786 put_cmd640_reg(CMDTIM, 0); 774 put_cmd640_reg(CMDTIM, 0);
787 put_cmd640_reg(BRST, 0x40); 775 put_cmd640_reg(BRST, 0x40);
788 776
777 cmd_hwif1 = ide_find_port();
778
789 /* 779 /*
790 * Try to enable the secondary interface, if not already enabled 780 * Try to enable the secondary interface, if not already enabled
791 */ 781 */
792 if (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) { 782 if (cmd_hwif1 &&
783 cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) {
793 port2 = "not probed"; 784 port2 = "not probed";
794 } else { 785 } else {
795 b = get_cmd640_reg(CNTRL); 786 b = get_cmd640_reg(CNTRL);
@@ -820,15 +811,11 @@ static int __init cmd640x_init(void)
820 /* 811 /*
821 * Initialize data for secondary cmd640 port, if enabled 812 * Initialize data for secondary cmd640 port, if enabled
822 */ 813 */
823 if (second_port_cmd640) { 814 if (second_port_cmd640 && cmd_hwif1) {
824 ide_init_port_hw(cmd_hwif1, &hw[1]); 815 ide_init_port_hw(cmd_hwif1, &hw[1]);
825#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
826 cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
827#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
828
829 idx[1] = cmd_hwif1->index; 816 idx[1] = cmd_hwif1->index;
830 } 817 }
831 printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name, 818 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
832 second_port_cmd640 ? "" : "not ", port2); 819 second_port_cmd640 ? "" : "not ", port2);
833 820
834 /* 821 /*
@@ -836,35 +823,34 @@ static int __init cmd640x_init(void)
836 * Do not unnecessarily disturb any prior BIOS setup of these. 823 * Do not unnecessarily disturb any prior BIOS setup of these.
837 */ 824 */
838 for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) { 825 for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
839 ide_drive_t *drive = cmd_drives[index]; 826 ide_drive_t *drive;
840#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 827
841 if (drive->autotune || ((index > 1) && second_port_toggled)) { 828 if (index > 1) {
842 /* 829 if (cmd_hwif1 == NULL)
843 * Reset timing to the slowest speed and turn off prefetch. 830 continue;
844 * This way, the drive identify code has a better chance. 831 drive = &cmd_hwif1->drives[index & 1];
845 */ 832 } else {
846 setup_counts [index] = 4; /* max possible */ 833 if (cmd_hwif0 == NULL)
847 active_counts [index] = 16; /* max possible */ 834 continue;
848 recovery_counts [index] = 16; /* max possible */ 835 drive = &cmd_hwif0->drives[index & 1];
849 program_drive_counts (index);
850 set_prefetch_mode (index, 0);
851 printk("cmd640: drive%d timings/prefetch cleared\n", index);
852 } else {
853 /*
854 * Record timings/prefetch without changing them.
855 * This preserves any prior BIOS setup.
856 */
857 retrieve_drive_counts (index);
858 check_prefetch (index);
859 printk("cmd640: drive%d timings/prefetch(%s) preserved",
860 index, drive->no_io_32bit ? "off" : "on");
861 display_clocks(index);
862 } 836 }
837
838#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
839 /*
840 * Reset timing to the slowest speed and turn off prefetch.
841 * This way, the drive identify code has a better chance.
842 */
843 setup_counts [index] = 4; /* max possible */
844 active_counts [index] = 16; /* max possible */
845 recovery_counts [index] = 16; /* max possible */
846 program_drive_counts(drive, index);
847 set_prefetch_mode(drive, index, 0);
848 printk("cmd640: drive%d timings/prefetch cleared\n", index);
863#else 849#else
864 /* 850 /*
865 * Set the drive unmask flags to match the prefetch setting 851 * Set the drive unmask flags to match the prefetch setting
866 */ 852 */
867 check_prefetch (index); 853 check_prefetch(drive, index);
868 printk("cmd640: drive%d timings/prefetch(%s) preserved\n", 854 printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
869 index, drive->no_io_32bit ? "off" : "on"); 855 index, drive->no_io_32bit ? "off" : "on");
870#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 856#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index edabe6299efd..08674711d089 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant)
68 */ 68 */
69static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time) 69static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
70{ 70{
71 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 71 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
72 int clock_time = 1000 / system_bus_clock(); 72 int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock());
73 u8 cycle_count, active_count, recovery_count, drwtim; 73 u8 cycle_count, active_count, recovery_count, drwtim;
74 static const u8 recovery_values[] = 74 static const u8 recovery_values[] =
75 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; 75 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
128 ide_pio_timings[pio].active_time); 128 ide_pio_timings[pio].active_time);
129 129
130 setup_count = quantize_timing(ide_pio_timings[pio].setup_time, 130 setup_count = quantize_timing(ide_pio_timings[pio].setup_time,
131 1000 / system_bus_clock()); 131 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()));
132 132
133 /* 133 /*
134 * The primary channel has individual address setup timing registers 134 * The primary channel has individual address setup timing registers
@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
223 (void) pci_write_config_byte(dev, pciU, regU); 223 (void) pci_write_config_byte(dev, pciU, regU);
224} 224}
225 225
226static int cmd648_ide_dma_end (ide_drive_t *drive) 226static int cmd648_dma_end(ide_drive_t *drive)
227{ 227{
228 ide_hwif_t *hwif = HWIF(drive); 228 ide_hwif_t *hwif = HWIF(drive);
229 unsigned long base = hwif->dma_base - (hwif->channel * 8); 229 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
239 return err; 239 return err;
240} 240}
241 241
242static int cmd64x_ide_dma_end (ide_drive_t *drive) 242static int cmd64x_dma_end(ide_drive_t *drive)
243{ 243{
244 ide_hwif_t *hwif = HWIF(drive); 244 ide_hwif_t *hwif = HWIF(drive);
245 struct pci_dev *dev = to_pci_dev(hwif->dev); 245 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
256 return err; 256 return err;
257} 257}
258 258
259static int cmd648_ide_dma_test_irq (ide_drive_t *drive) 259static int cmd648_dma_test_irq(ide_drive_t *drive)
260{ 260{
261 ide_hwif_t *hwif = HWIF(drive); 261 ide_hwif_t *hwif = HWIF(drive);
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 262 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
279 return 0; 279 return 0;
280} 280}
281 281
282static int cmd64x_ide_dma_test_irq (ide_drive_t *drive) 282static int cmd64x_dma_test_irq(ide_drive_t *drive)
283{ 283{
284 ide_hwif_t *hwif = HWIF(drive); 284 ide_hwif_t *hwif = HWIF(drive);
285 struct pci_dev *dev = to_pci_dev(hwif->dev); 285 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
310 * event order for DMA transfers. 310 * event order for DMA transfers.
311 */ 311 */
312 312
313static int cmd646_1_ide_dma_end (ide_drive_t *drive) 313static int cmd646_1_dma_end(ide_drive_t *drive)
314{ 314{
315 ide_hwif_t *hwif = HWIF(drive); 315 ide_hwif_t *hwif = HWIF(drive);
316 u8 dma_stat = 0, dma_cmd = 0; 316 u8 dma_stat = 0, dma_cmd = 0;
@@ -370,7 +370,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
370 return 0; 370 return 0;
371} 371}
372 372
373static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif) 373static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
374{ 374{
375 struct pci_dev *dev = to_pci_dev(hwif->dev); 375 struct pci_dev *dev = to_pci_dev(hwif->dev);
376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01; 376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
@@ -385,91 +385,85 @@ static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
385 } 385 }
386} 386}
387 387
388static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) 388static const struct ide_port_ops cmd64x_port_ops = {
389{ 389 .set_pio_mode = cmd64x_set_pio_mode,
390 struct pci_dev *dev = to_pci_dev(hwif->dev); 390 .set_dma_mode = cmd64x_set_dma_mode,
391 391 .cable_detect = cmd64x_cable_detect,
392 hwif->set_pio_mode = &cmd64x_set_pio_mode; 392};
393 hwif->set_dma_mode = &cmd64x_set_dma_mode;
394
395 hwif->cable_detect = ata66_cmd64x;
396 393
397 if (!hwif->dma_base) 394static const struct ide_dma_ops cmd64x_dma_ops = {
398 return; 395 .dma_host_set = ide_dma_host_set,
396 .dma_setup = ide_dma_setup,
397 .dma_exec_cmd = ide_dma_exec_cmd,
398 .dma_start = ide_dma_start,
399 .dma_end = cmd64x_dma_end,
400 .dma_test_irq = cmd64x_dma_test_irq,
401 .dma_lost_irq = ide_dma_lost_irq,
402 .dma_timeout = ide_dma_timeout,
403};
399 404
400 /* 405static const struct ide_dma_ops cmd646_rev1_dma_ops = {
401 * UltraDMA only supported on PCI646U and PCI646U2, which 406 .dma_host_set = ide_dma_host_set,
402 * correspond to revisions 0x03, 0x05 and 0x07 respectively. 407 .dma_setup = ide_dma_setup,
403 * Actually, although the CMD tech support people won't 408 .dma_exec_cmd = ide_dma_exec_cmd,
404 * tell me the details, the 0x03 revision cannot support 409 .dma_start = ide_dma_start,
405 * UDMA correctly without hardware modifications, and even 410 .dma_end = cmd646_1_dma_end,
406 * then it only works with Quantum disks due to some 411 .dma_test_irq = ide_dma_test_irq,
407 * hold time assumptions in the 646U part which are fixed 412 .dma_lost_irq = ide_dma_lost_irq,
408 * in the 646U2. 413 .dma_timeout = ide_dma_timeout,
409 * 414};
410 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
411 */
412 if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
413 hwif->ultra_mask = 0x00;
414 415
415 switch (dev->device) { 416static const struct ide_dma_ops cmd648_dma_ops = {
416 case PCI_DEVICE_ID_CMD_648: 417 .dma_host_set = ide_dma_host_set,
417 case PCI_DEVICE_ID_CMD_649: 418 .dma_setup = ide_dma_setup,
418 alt_irq_bits: 419 .dma_exec_cmd = ide_dma_exec_cmd,
419 hwif->ide_dma_end = &cmd648_ide_dma_end; 420 .dma_start = ide_dma_start,
420 hwif->ide_dma_test_irq = &cmd648_ide_dma_test_irq; 421 .dma_end = cmd648_dma_end,
421 break; 422 .dma_test_irq = cmd648_dma_test_irq,
422 case PCI_DEVICE_ID_CMD_646: 423 .dma_lost_irq = ide_dma_lost_irq,
423 if (dev->revision == 0x01) { 424 .dma_timeout = ide_dma_timeout,
424 hwif->ide_dma_end = &cmd646_1_ide_dma_end; 425};
425 break;
426 } else if (dev->revision >= 0x03)
427 goto alt_irq_bits;
428 /* fall thru */
429 default:
430 hwif->ide_dma_end = &cmd64x_ide_dma_end;
431 hwif->ide_dma_test_irq = &cmd64x_ide_dma_test_irq;
432 break;
433 }
434}
435 426
436static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { 427static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
437 { /* 0 */ 428 { /* 0 */
438 .name = "CMD643", 429 .name = "CMD643",
439 .init_chipset = init_chipset_cmd64x, 430 .init_chipset = init_chipset_cmd64x,
440 .init_hwif = init_hwif_cmd64x,
441 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 431 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
432 .port_ops = &cmd64x_port_ops,
433 .dma_ops = &cmd64x_dma_ops,
442 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 434 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
443 IDE_HFLAG_ABUSE_PREFETCH | 435 IDE_HFLAG_ABUSE_PREFETCH,
444 IDE_HFLAG_BOOTABLE,
445 .pio_mask = ATA_PIO5, 436 .pio_mask = ATA_PIO5,
446 .mwdma_mask = ATA_MWDMA2, 437 .mwdma_mask = ATA_MWDMA2,
447 .udma_mask = 0x00, /* no udma */ 438 .udma_mask = 0x00, /* no udma */
448 },{ /* 1 */ 439 },{ /* 1 */
449 .name = "CMD646", 440 .name = "CMD646",
450 .init_chipset = init_chipset_cmd64x, 441 .init_chipset = init_chipset_cmd64x,
451 .init_hwif = init_hwif_cmd64x,
452 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 442 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
453 .chipset = ide_cmd646, 443 .chipset = ide_cmd646,
454 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 444 .port_ops = &cmd64x_port_ops,
445 .dma_ops = &cmd648_dma_ops,
446 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
455 .pio_mask = ATA_PIO5, 447 .pio_mask = ATA_PIO5,
456 .mwdma_mask = ATA_MWDMA2, 448 .mwdma_mask = ATA_MWDMA2,
457 .udma_mask = ATA_UDMA2, 449 .udma_mask = ATA_UDMA2,
458 },{ /* 2 */ 450 },{ /* 2 */
459 .name = "CMD648", 451 .name = "CMD648",
460 .init_chipset = init_chipset_cmd64x, 452 .init_chipset = init_chipset_cmd64x,
461 .init_hwif = init_hwif_cmd64x,
462 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 453 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
463 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 454 .port_ops = &cmd64x_port_ops,
455 .dma_ops = &cmd648_dma_ops,
456 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
464 .pio_mask = ATA_PIO5, 457 .pio_mask = ATA_PIO5,
465 .mwdma_mask = ATA_MWDMA2, 458 .mwdma_mask = ATA_MWDMA2,
466 .udma_mask = ATA_UDMA4, 459 .udma_mask = ATA_UDMA4,
467 },{ /* 3 */ 460 },{ /* 3 */
468 .name = "CMD649", 461 .name = "CMD649",
469 .init_chipset = init_chipset_cmd64x, 462 .init_chipset = init_chipset_cmd64x,
470 .init_hwif = init_hwif_cmd64x,
471 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 463 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
472 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 464 .port_ops = &cmd64x_port_ops,
465 .dma_ops = &cmd648_dma_ops,
466 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
473 .pio_mask = ATA_PIO5, 467 .pio_mask = ATA_PIO5,
474 .mwdma_mask = ATA_MWDMA2, 468 .mwdma_mask = ATA_MWDMA2,
475 .udma_mask = ATA_UDMA5, 469 .udma_mask = ATA_UDMA5,
@@ -483,12 +477,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
483 477
484 d = cmd64x_chipsets[idx]; 478 d = cmd64x_chipsets[idx];
485 479
486 /* 480 if (idx == 1) {
487 * The original PCI0646 didn't have the primary channel enable bit, 481 /*
488 * it appeared starting with PCI0646U (i.e. revision ID 3). 482 * UltraDMA only supported on PCI646U and PCI646U2, which
489 */ 483 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
490 if (idx == 1 && dev->revision < 3) 484 * Actually, although the CMD tech support people won't
491 d.enablebits[0].reg = 0; 485 * tell me the details, the 0x03 revision cannot support
486 * UDMA correctly without hardware modifications, and even
487 * then it only works with Quantum disks due to some
488 * hold time assumptions in the 646U part which are fixed
489 * in the 646U2.
490 *
491 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
492 */
493 if (dev->revision < 5) {
494 d.udma_mask = 0x00;
495 /*
496 * The original PCI0646 didn't have the primary
497 * channel enable bit, it appeared starting with
498 * PCI0646U (i.e. revision ID 3).
499 */
500 if (dev->revision < 3) {
501 d.enablebits[0].reg = 0;
502 if (dev->revision == 1)
503 d.dma_ops = &cmd646_rev1_dma_ops;
504 else
505 d.dma_ops = &cmd64x_dma_ops;
506 }
507 }
508 }
492 509
493 return ide_setup_pci_device(dev, &d); 510 return ide_setup_pci_device(dev, &d);
494} 511}
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 1c163e4ef03f..17669a434438 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -103,27 +103,32 @@ static void cs5520_dma_host_set(ide_drive_t *drive, int on)
103 ide_dma_host_set(drive, on); 103 ide_dma_host_set(drive, on);
104} 104}
105 105
106static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) 106static const struct ide_port_ops cs5520_port_ops = {
107{ 107 .set_pio_mode = cs5520_set_pio_mode,
108 hwif->set_pio_mode = &cs5520_set_pio_mode; 108 .set_dma_mode = cs5520_set_dma_mode,
109 hwif->set_dma_mode = &cs5520_set_dma_mode; 109};
110
111 if (hwif->dma_base == 0)
112 return;
113 110
114 hwif->dma_host_set = &cs5520_dma_host_set; 111static const struct ide_dma_ops cs5520_dma_ops = {
115} 112 .dma_host_set = cs5520_dma_host_set,
113 .dma_setup = ide_dma_setup,
114 .dma_exec_cmd = ide_dma_exec_cmd,
115 .dma_start = ide_dma_start,
116 .dma_end = __ide_dma_end,
117 .dma_test_irq = ide_dma_test_irq,
118 .dma_lost_irq = ide_dma_lost_irq,
119 .dma_timeout = ide_dma_timeout,
120};
116 121
117#define DECLARE_CS_DEV(name_str) \ 122#define DECLARE_CS_DEV(name_str) \
118 { \ 123 { \
119 .name = name_str, \ 124 .name = name_str, \
120 .init_hwif = init_hwif_cs5520, \ 125 .port_ops = &cs5520_port_ops, \
126 .dma_ops = &cs5520_dma_ops, \
121 .host_flags = IDE_HFLAG_ISA_PORTS | \ 127 .host_flags = IDE_HFLAG_ISA_PORTS | \
122 IDE_HFLAG_CS5520 | \ 128 IDE_HFLAG_CS5520 | \
123 IDE_HFLAG_VDMA | \ 129 IDE_HFLAG_VDMA | \
124 IDE_HFLAG_NO_ATAPI_DMA | \ 130 IDE_HFLAG_NO_ATAPI_DMA | \
125 IDE_HFLAG_ABUSE_SET_DMA_MODE |\ 131 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
126 IDE_HFLAG_BOOTABLE, \
127 .pio_mask = ATA_PIO4, \ 132 .pio_mask = ATA_PIO4, \
128 } 133 }
129 134
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 941a1344820b..f5534c1ff349 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -228,29 +228,27 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
228 unsigned long basereg; 228 unsigned long basereg;
229 u32 d0_timings; 229 u32 d0_timings;
230 230
231 hwif->set_pio_mode = &cs5530_set_pio_mode;
232 hwif->set_dma_mode = &cs5530_set_dma_mode;
233
234 basereg = CS5530_BASEREG(hwif); 231 basereg = CS5530_BASEREG(hwif);
235 d0_timings = inl(basereg + 0); 232 d0_timings = inl(basereg + 0);
236 if (CS5530_BAD_PIO(d0_timings)) 233 if (CS5530_BAD_PIO(d0_timings))
237 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0); 234 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
238 if (CS5530_BAD_PIO(inl(basereg + 8))) 235 if (CS5530_BAD_PIO(inl(basereg + 8)))
239 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8); 236 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
240
241 if (hwif->dma_base == 0)
242 return;
243
244 hwif->udma_filter = cs5530_udma_filter;
245} 237}
246 238
239static const struct ide_port_ops cs5530_port_ops = {
240 .set_pio_mode = cs5530_set_pio_mode,
241 .set_dma_mode = cs5530_set_dma_mode,
242 .udma_filter = cs5530_udma_filter,
243};
244
247static const struct ide_port_info cs5530_chipset __devinitdata = { 245static const struct ide_port_info cs5530_chipset __devinitdata = {
248 .name = "CS5530", 246 .name = "CS5530",
249 .init_chipset = init_chipset_cs5530, 247 .init_chipset = init_chipset_cs5530,
250 .init_hwif = init_hwif_cs5530, 248 .init_hwif = init_hwif_cs5530,
249 .port_ops = &cs5530_port_ops,
251 .host_flags = IDE_HFLAG_SERIALIZE | 250 .host_flags = IDE_HFLAG_SERIALIZE |
252 IDE_HFLAG_POST_SET_MODE | 251 IDE_HFLAG_POST_SET_MODE,
253 IDE_HFLAG_BOOTABLE,
254 .pio_mask = ATA_PIO4, 252 .pio_mask = ATA_PIO4,
255 .mwdma_mask = ATA_MWDMA2, 253 .mwdma_mask = ATA_MWDMA2,
256 .udma_mask = ATA_UDMA2, 254 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index d7b5ea992e94..99fe91a191b8 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -166,27 +166,17 @@ static u8 __devinit cs5535_cable_detect(ide_hwif_t *hwif)
166 return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 166 return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
167} 167}
168 168
169/**** 169static const struct ide_port_ops cs5535_port_ops = {
170 * init_hwif_cs5535 - Initialize one ide cannel 170 .set_pio_mode = cs5535_set_pio_mode,
171 * @hwif: Channel descriptor 171 .set_dma_mode = cs5535_set_dma_mode,
172 * 172 .cable_detect = cs5535_cable_detect,
173 * This gets invoked by the IDE driver once for each channel. It 173};
174 * performs channel-specific pre-initialization before drive probing.
175 *
176 */
177static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
178{
179 hwif->set_pio_mode = &cs5535_set_pio_mode;
180 hwif->set_dma_mode = &cs5535_set_dma_mode;
181
182 hwif->cable_detect = cs5535_cable_detect;
183}
184 174
185static const struct ide_port_info cs5535_chipset __devinitdata = { 175static const struct ide_port_info cs5535_chipset __devinitdata = {
186 .name = "CS5535", 176 .name = "CS5535",
187 .init_hwif = init_hwif_cs5535, 177 .port_ops = &cs5535_port_ops,
188 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | 178 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
189 IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE, 179 IDE_HFLAG_ABUSE_SET_DMA_MODE,
190 .pio_mask = ATA_PIO4, 180 .pio_mask = ATA_PIO4,
191 .mwdma_mask = ATA_MWDMA2, 181 .mwdma_mask = ATA_MWDMA2,
192 .udma_mask = ATA_UDMA4, 182 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 724cbacf4e5b..77cc22c2ad45 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards. 7 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
8 * Writing the driver was quite simple, since most of the job is 8 * Writing the driver was quite simple, since most of the job is
9 * done by the generic pci-ide support. 9 * done by the generic pci-ide support.
10 * The hard part was finding the CY82C693's datasheet on Cypress's 10 * The hard part was finding the CY82C693's datasheet on Cypress's
11 * web page :-(. But Altavista solved this problem :-). 11 * web page :-(. But Altavista solved this problem :-).
12 * 12 *
@@ -15,12 +15,10 @@
15 * - I recently got a 16.8G IBM DTTA, so I was able to test it with 15 * - I recently got a 16.8G IBM DTTA, so I was able to test it with
16 * a large and fast disk - the results look great, so I'd say the 16 * a large and fast disk - the results look great, so I'd say the
17 * driver is working fine :-) 17 * driver is working fine :-)
18 * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA 18 * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
19 * - this is my first linux driver, so there's probably a lot of room 19 * - this is my first linux driver, so there's probably a lot of room
20 * for optimizations and bug fixing, so feel free to do it. 20 * for optimizations and bug fixing, so feel free to do it.
21 * - use idebus=xx parameter to set PCI bus speed - needed to calc 21 * - if using PIO mode it's a good idea to set the PIO mode and
22 * timings for PIO modes (default will be 40)
23 * - if using PIO mode it's a good idea to set the PIO mode and
24 * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda 22 * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
25 * - I had some problems with my IBM DHEA with PIO modes < 2 23 * - I had some problems with my IBM DHEA with PIO modes < 2
26 * (lost interrupts) ????? 24 * (lost interrupts) ?????
@@ -110,11 +108,11 @@ typedef struct pio_clocks_s {
110 * calc clocks using bus_speed 108 * calc clocks using bus_speed
111 * returns (rounded up) time in bus clocks for time in ns 109 * returns (rounded up) time in bus clocks for time in ns
112 */ 110 */
113static int calc_clk (int time, int bus_speed) 111static int calc_clk(int time, int bus_speed)
114{ 112{
115 int clocks; 113 int clocks;
116 114
117 clocks = (time*bus_speed+999)/1000 -1; 115 clocks = (time*bus_speed+999)/1000 - 1;
118 116
119 if (clocks < 0) 117 if (clocks < 0)
120 clocks = 0; 118 clocks = 0;
@@ -132,11 +130,11 @@ static int calc_clk (int time, int bus_speed)
132 * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used 130 * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
133 * for mode 3 and 4 drives 8 and 16-bit timings are the same 131 * for mode 3 and 4 drives 8 and 16-bit timings are the same
134 * 132 *
135 */ 133 */
136static void compute_clocks (u8 pio, pio_clocks_t *p_pclk) 134static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
137{ 135{
138 int clk1, clk2; 136 int clk1, clk2;
139 int bus_speed = system_bus_clock(); /* get speed of PCI bus */ 137 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
140 138
141 /* we don't check against CY82C693's min and max speed, 139 /* we don't check against CY82C693's min and max speed,
142 * so you can play with the idebus=xx parameter 140 * so you can play with the idebus=xx parameter
@@ -158,7 +156,7 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
158 clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */ 156 clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
159 157
160 /* note: we use the same values for 16bit IOR and IOW 158 /* note: we use the same values for 16bit IOR and IOW
161 * those are all the same, since I don't have other 159 * those are all the same, since I don't have other
162 * timings than those from ide-lib.c 160 * timings than those from ide-lib.c
163 */ 161 */
164 162
@@ -186,7 +184,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
186 outb(index, CY82_INDEX_PORT); 184 outb(index, CY82_INDEX_PORT);
187 data = inb(CY82_DATA_PORT); 185 data = inb(CY82_DATA_PORT);
188 186
189 printk (KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n", 187 printk(KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n",
190 drive->name, HWIF(drive)->channel, drive->select.b.unit, 188 drive->name, HWIF(drive)->channel, drive->select.b.unit,
191 (data&0x3), ((data>>2)&1)); 189 (data&0x3), ((data>>2)&1));
192#endif /* CY82C693_DEBUG_LOGS */ 190#endif /* CY82C693_DEBUG_LOGS */
@@ -202,7 +200,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
202 mode & 3, single); 200 mode & 3, single);
203#endif /* CY82C693_DEBUG_INFO */ 201#endif /* CY82C693_DEBUG_INFO */
204 202
205 /* 203 /*
206 * note: below we set the value for Bus Master IDE TimeOut Register 204 * note: below we set the value for Bus Master IDE TimeOut Register
207 * I'm not absolutly sure what this does, but it solved my problem 205 * I'm not absolutly sure what this does, but it solved my problem
208 * with IDE DMA and sound, so I now can play sound and work with 206 * with IDE DMA and sound, so I now can play sound and work with
@@ -216,8 +214,8 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
216 outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT); 214 outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
217 outb(data, CY82_DATA_PORT); 215 outb(data, CY82_DATA_PORT);
218 216
219#if CY82C693_DEBUG_INFO 217#if CY82C693_DEBUG_INFO
220 printk (KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n", 218 printk(KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n",
221 drive->name, data); 219 drive->name, data);
222#endif /* CY82C693_DEBUG_INFO */ 220#endif /* CY82C693_DEBUG_INFO */
223} 221}
@@ -242,14 +240,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
242 240
243#if CY82C693_DEBUG_LOGS 241#if CY82C693_DEBUG_LOGS
244 /* for debug let's show the register values */ 242 /* for debug let's show the register values */
245 243
246 if (drive->select.b.unit == 0) { 244 if (drive->select.b.unit == 0) {
247 /* 245 /*
248 * get master drive registers 246 * get master drive registers
249 * address setup control register 247 * address setup control register
250 * is 32 bit !!! 248 * is 32 bit !!!
251 */ 249 */
252 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 250 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
253 addrCtrl &= 0x0F; 251 addrCtrl &= 0x0F;
254 252
255 /* now let's get the remaining registers */ 253 /* now let's get the remaining registers */
@@ -261,7 +259,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
261 * set slave drive registers 259 * set slave drive registers
262 * address setup control register 260 * address setup control register
263 * is 32 bit !!! 261 * is 32 bit !!!
264 */ 262 */
265 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 263 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
266 264
267 addrCtrl &= 0xF0; 265 addrCtrl &= 0xF0;
@@ -288,9 +286,9 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
288 * set master drive 286 * set master drive
289 * address setup control register 287 * address setup control register
290 * is 32 bit !!! 288 * is 32 bit !!!
291 */ 289 */
292 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 290 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
293 291
294 addrCtrl &= (~0xF); 292 addrCtrl &= (~0xF);
295 addrCtrl |= (unsigned int)pclk.address_time; 293 addrCtrl |= (unsigned int)pclk.address_time;
296 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); 294 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
@@ -299,14 +297,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
299 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r); 297 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r);
300 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w); 298 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w);
301 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8); 299 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8);
302 300
303 addrCtrl &= 0xF; 301 addrCtrl &= 0xF;
304 } else { 302 } else {
305 /* 303 /*
306 * set slave drive 304 * set slave drive
307 * address setup control register 305 * address setup control register
308 * is 32 bit !!! 306 * is 32 bit !!!
309 */ 307 */
310 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 308 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
311 309
312 addrCtrl &= (~0xF0); 310 addrCtrl &= (~0xF0);
@@ -320,7 +318,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
320 318
321 addrCtrl >>= 4; 319 addrCtrl >>= 4;
322 addrCtrl &= 0xF; 320 addrCtrl &= 0xF;
323 } 321 }
324 322
325#if CY82C693_DEBUG_INFO 323#if CY82C693_DEBUG_INFO
326 printk(KERN_INFO "%s (ch=%d, dev=%d): set PIO timing to " 324 printk(KERN_INFO "%s (ch=%d, dev=%d): set PIO timing to "
@@ -340,41 +338,41 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
340 338
341#ifdef CY82C693_SETDMA_CLOCK 339#ifdef CY82C693_SETDMA_CLOCK
342 u8 data = 0; 340 u8 data = 0;
343#endif /* CY82C693_SETDMA_CLOCK */ 341#endif /* CY82C693_SETDMA_CLOCK */
344 342
345 /* write info about this verion of the driver */ 343 /* write info about this verion of the driver */
346 printk(KERN_INFO CY82_VERSION "\n"); 344 printk(KERN_INFO CY82_VERSION "\n");
347 345
348#ifdef CY82C693_SETDMA_CLOCK 346#ifdef CY82C693_SETDMA_CLOCK
349 /* okay let's set the DMA clock speed */ 347 /* okay let's set the DMA clock speed */
350 348
351 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT); 349 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
352 data = inb(CY82_DATA_PORT); 350 data = inb(CY82_DATA_PORT);
353 351
354#if CY82C693_DEBUG_INFO 352#if CY82C693_DEBUG_INFO
355 printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n", 353 printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n",
356 name, data); 354 name, data);
357#endif /* CY82C693_DEBUG_INFO */ 355#endif /* CY82C693_DEBUG_INFO */
358 356
359 /* 357 /*
360 * for some reason sometimes the DMA controller 358 * for some reason sometimes the DMA controller
361 * speed is set to ATCLK/2 ???? - we fix this here 359 * speed is set to ATCLK/2 ???? - we fix this here
362 * 360 *
363 * note: i don't know what causes this strange behaviour, 361 * note: i don't know what causes this strange behaviour,
364 * but even changing the dma speed doesn't solve it :-( 362 * but even changing the dma speed doesn't solve it :-(
365 * the ide performance is still only half the normal speed 363 * the ide performance is still only half the normal speed
366 * 364 *
367 * if anybody knows what goes wrong with my machine, please 365 * if anybody knows what goes wrong with my machine, please
368 * let me know - ASK 366 * let me know - ASK
369 */ 367 */
370 368
371 data |= 0x03; 369 data |= 0x03;
372 370
373 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT); 371 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
374 outb(data, CY82_DATA_PORT); 372 outb(data, CY82_DATA_PORT);
375 373
376#if CY82C693_DEBUG_INFO 374#if CY82C693_DEBUG_INFO
377 printk (KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n", 375 printk(KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n",
378 name, data); 376 name, data);
379#endif /* CY82C693_DEBUG_INFO */ 377#endif /* CY82C693_DEBUG_INFO */
380 378
@@ -382,15 +380,6 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
382 return 0; 380 return 0;
383} 381}
384 382
385/*
386 * the init function - called for each ide channel once
387 */
388static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
389{
390 hwif->set_pio_mode = &cy82c693_set_pio_mode;
391 hwif->set_dma_mode = &cy82c693_set_dma_mode;
392}
393
394static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) 383static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
395{ 384{
396 static ide_hwif_t *primary; 385 static ide_hwif_t *primary;
@@ -404,14 +393,18 @@ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
404 } 393 }
405} 394}
406 395
396static const struct ide_port_ops cy82c693_port_ops = {
397 .set_pio_mode = cy82c693_set_pio_mode,
398 .set_dma_mode = cy82c693_set_dma_mode,
399};
400
407static const struct ide_port_info cy82c693_chipset __devinitdata = { 401static const struct ide_port_info cy82c693_chipset __devinitdata = {
408 .name = "CY82C693", 402 .name = "CY82C693",
409 .init_chipset = init_chipset_cy82c693, 403 .init_chipset = init_chipset_cy82c693,
410 .init_iops = init_iops_cy82c693, 404 .init_iops = init_iops_cy82c693,
411 .init_hwif = init_hwif_cy82c693, 405 .port_ops = &cy82c693_port_ops,
412 .chipset = ide_cy82c693, 406 .chipset = ide_cy82c693,
413 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 | 407 .host_flags = IDE_HFLAG_SINGLE,
414 IDE_HFLAG_BOOTABLE,
415 .pio_mask = ATA_PIO4, 408 .pio_mask = ATA_PIO4,
416 .swdma_mask = ATA_SWDMA2, 409 .swdma_mask = ATA_SWDMA2,
417 .mwdma_mask = ATA_MWDMA2, 410 .mwdma_mask = ATA_MWDMA2,
@@ -424,7 +417,7 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
424 417
425 /* CY82C693 is more than only a IDE controller. 418 /* CY82C693 is more than only a IDE controller.
426 Function 1 is primary IDE channel, function 2 - secondary. */ 419 Function 1 is primary IDE channel, function 2 - secondary. */
427 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && 420 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
428 PCI_FUNC(dev->devfn) == 1) { 421 PCI_FUNC(dev->devfn) == 1) {
429 dev2 = pci_get_slot(dev->bus, dev->devfn + 1); 422 dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
430 ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset); 423 ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset);
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 961698d655eb..b9e457996d0e 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -43,6 +43,10 @@ static const u8 setup[] = {
43 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13, 43 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
44}; 44};
45 45
46static const struct ide_port_ops delkin_cb_port_ops = {
47 .quirkproc = ide_undecoded_slave,
48};
49
46static int __devinit 50static int __devinit
47delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) 51delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
48{ 52{
@@ -71,26 +75,21 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
71 if (setup[i]) 75 if (setup[i])
72 outb(setup[i], base + i); 76 outb(setup[i], base + i);
73 } 77 }
74 pci_release_regions(dev); /* IDE layer handles regions itself */
75 78
76 memset(&hw, 0, sizeof(hw)); 79 memset(&hw, 0, sizeof(hw));
77 ide_std_init_ports(&hw, base + 0x10, base + 0x1e); 80 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
78 hw.irq = dev->irq; 81 hw.irq = dev->irq;
79 hw.chipset = ide_pci; /* this enables IRQ sharing */ 82 hw.chipset = ide_pci; /* this enables IRQ sharing */
80 83
81 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 84 hwif = ide_find_port();
82 if (hwif == NULL) 85 if (hwif == NULL)
83 goto out_disable; 86 goto out_disable;
84 87
85 i = hwif->index; 88 i = hwif->index;
86 89
87 if (hwif->present) 90 ide_init_port_data(hwif, i);
88 ide_unregister(i);
89 else
90 ide_init_port_data(hwif, i);
91
92 ide_init_port_hw(hwif, &hw); 91 ide_init_port_hw(hwif, &hw);
93 hwif->quirkproc = &ide_undecoded_slave; 92 hwif->port_ops = &delkin_cb_port_ops;
94 93
95 idx[0] = i; 94 idx[0] = i;
96 95
@@ -110,6 +109,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
110 109
111out_disable: 110out_disable:
112 printk(KERN_ERR "delkin_cb: no IDE devices found\n"); 111 printk(KERN_ERR "delkin_cb: no IDE devices found\n");
112 pci_release_regions(dev);
113 pci_disable_device(dev); 113 pci_disable_device(dev);
114 return -ENODEV; 114 return -ENODEV;
115} 115}
@@ -119,9 +119,9 @@ delkin_cb_remove (struct pci_dev *dev)
119{ 119{
120 ide_hwif_t *hwif = pci_get_drvdata(dev); 120 ide_hwif_t *hwif = pci_get_drvdata(dev);
121 121
122 if (hwif) 122 ide_unregister(hwif);
123 ide_unregister(hwif->index);
124 123
124 pci_release_regions(dev);
125 pci_disable_device(dev); 125 pci_disable_device(dev);
126} 126}
127 127
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 7fd83a9d4dee..041720e22762 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -38,8 +38,7 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
38 { \ 38 { \
39 .name = name_str, \ 39 .name = name_str, \
40 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \ 40 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
41 extra_flags | \ 41 extra_flags, \
42 IDE_HFLAG_BOOTABLE, \
43 .swdma_mask = ATA_SWDMA2, \ 42 .swdma_mask = ATA_SWDMA2, \
44 .mwdma_mask = ATA_MWDMA2, \ 43 .mwdma_mask = ATA_MWDMA2, \
45 .udma_mask = ATA_UDMA6, \ 44 .udma_mask = ATA_UDMA6, \
@@ -50,9 +49,8 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
50 49
51 { /* 1 */ 50 { /* 1 */
52 .name = "NS87410", 51 .name = "NS87410",
53 .enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}}, 52 .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
54 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 53 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
55 IDE_HFLAG_BOOTABLE,
56 .swdma_mask = ATA_SWDMA2, 54 .swdma_mask = ATA_SWDMA2,
57 .mwdma_mask = ATA_MWDMA2, 55 .mwdma_mask = ATA_MWDMA2,
58 .udma_mask = ATA_UDMA6, 56 .udma_mask = ATA_UDMA6,
@@ -99,7 +97,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
99 * Called when the PCI registration layer (or the IDE initialization) 97 * Called when the PCI registration layer (or the IDE initialization)
100 * finds a device matching our IDE device tables. 98 * finds a device matching our IDE device tables.
101 */ 99 */
102 100
103static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) 101static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
104{ 102{
105 const struct ide_port_info *d = &generic_chipsets[id->driver_data]; 103 const struct ide_port_info *d = &generic_chipsets[id->driver_data];
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 9f01da46b016..84c36c117194 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -115,11 +115,10 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
115 return dev->irq; 115 return dev->irq;
116} 116}
117 117
118static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif) 118static const struct ide_port_ops hpt34x_port_ops = {
119{ 119 .set_pio_mode = hpt34x_set_pio_mode,
120 hwif->set_pio_mode = &hpt34x_set_pio_mode; 120 .set_dma_mode = hpt34x_set_mode,
121 hwif->set_dma_mode = &hpt34x_set_mode; 121};
122}
123 122
124#define IDE_HFLAGS_HPT34X \ 123#define IDE_HFLAGS_HPT34X \
125 (IDE_HFLAG_NO_ATAPI_DMA | \ 124 (IDE_HFLAG_NO_ATAPI_DMA | \
@@ -131,16 +130,14 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
131 { /* 0 */ 130 { /* 0 */
132 .name = "HPT343", 131 .name = "HPT343",
133 .init_chipset = init_chipset_hpt34x, 132 .init_chipset = init_chipset_hpt34x,
134 .init_hwif = init_hwif_hpt34x, 133 .port_ops = &hpt34x_port_ops,
135 .extra = 16, 134 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
136 .host_flags = IDE_HFLAGS_HPT34X,
137 .pio_mask = ATA_PIO5, 135 .pio_mask = ATA_PIO5,
138 }, 136 },
139 { /* 1 */ 137 { /* 1 */
140 .name = "HPT345", 138 .name = "HPT345",
141 .init_chipset = init_chipset_hpt34x, 139 .init_chipset = init_chipset_hpt34x,
142 .init_hwif = init_hwif_hpt34x, 140 .port_ops = &hpt34x_port_ops,
143 .extra = 16,
144 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD, 141 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
145 .pio_mask = ATA_PIO5, 142 .pio_mask = ATA_PIO5,
146#ifdef CONFIG_HPT34X_AUTODMA 143#ifdef CONFIG_HPT34X_AUTODMA
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 82d0e318a1fe..c929dadaaaff 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
760 } 760 }
761 } else 761 } else
762 outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), 762 outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
763 hwif->io_ports[IDE_CONTROL_OFFSET]); 763 hwif->io_ports.ctl_addr);
764} 764}
765 765
766/* 766/*
@@ -776,7 +776,7 @@ static void hpt366_dma_lost_irq(ide_drive_t *drive)
776 pci_read_config_byte(dev, 0x52, &mcr3); 776 pci_read_config_byte(dev, 0x52, &mcr3);
777 pci_read_config_byte(dev, 0x5a, &scr1); 777 pci_read_config_byte(dev, 0x5a, &scr1);
778 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n", 778 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
779 drive->name, __FUNCTION__, mcr1, mcr3, scr1); 779 drive->name, __func__, mcr1, mcr3, scr1);
780 if (scr1 & 0x10) 780 if (scr1 & 0x10)
781 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); 781 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
782 ide_dma_lost_irq(drive); 782 ide_dma_lost_irq(drive);
@@ -808,7 +808,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
808 hpt370_clear_engine(drive); 808 hpt370_clear_engine(drive);
809} 809}
810 810
811static void hpt370_ide_dma_start(ide_drive_t *drive) 811static void hpt370_dma_start(ide_drive_t *drive)
812{ 812{
813#ifdef HPT_RESET_STATE_ENGINE 813#ifdef HPT_RESET_STATE_ENGINE
814 hpt370_clear_engine(drive); 814 hpt370_clear_engine(drive);
@@ -816,7 +816,7 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
816 ide_dma_start(drive); 816 ide_dma_start(drive);
817} 817}
818 818
819static int hpt370_ide_dma_end(ide_drive_t *drive) 819static int hpt370_dma_end(ide_drive_t *drive)
820{ 820{
821 ide_hwif_t *hwif = HWIF(drive); 821 ide_hwif_t *hwif = HWIF(drive);
822 u8 dma_stat = inb(hwif->dma_status); 822 u8 dma_stat = inb(hwif->dma_status);
@@ -838,7 +838,7 @@ static void hpt370_dma_timeout(ide_drive_t *drive)
838} 838}
839 839
840/* returns 1 if DMA IRQ issued, 0 otherwise */ 840/* returns 1 if DMA IRQ issued, 0 otherwise */
841static int hpt374_ide_dma_test_irq(ide_drive_t *drive) 841static int hpt374_dma_test_irq(ide_drive_t *drive)
842{ 842{
843 ide_hwif_t *hwif = HWIF(drive); 843 ide_hwif_t *hwif = HWIF(drive);
844 struct pci_dev *dev = to_pci_dev(hwif->dev); 844 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -858,11 +858,11 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
858 858
859 if (!drive->waiting_for_dma) 859 if (!drive->waiting_for_dma)
860 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 860 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
861 drive->name, __FUNCTION__); 861 drive->name, __func__);
862 return 0; 862 return 0;
863} 863}
864 864
865static int hpt374_ide_dma_end(ide_drive_t *drive) 865static int hpt374_dma_end(ide_drive_t *drive)
866{ 866{
867 ide_hwif_t *hwif = HWIF(drive); 867 ide_hwif_t *hwif = HWIF(drive);
868 struct pci_dev *dev = to_pci_dev(hwif->dev); 868 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -1271,17 +1271,6 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1271 /* Cache the channel's MISC. control registers' offset */ 1271 /* Cache the channel's MISC. control registers' offset */
1272 hwif->select_data = hwif->channel ? 0x54 : 0x50; 1272 hwif->select_data = hwif->channel ? 0x54 : 0x50;
1273 1273
1274 hwif->set_pio_mode = &hpt3xx_set_pio_mode;
1275 hwif->set_dma_mode = &hpt3xx_set_mode;
1276
1277 hwif->quirkproc = &hpt3xx_quirkproc;
1278 hwif->maskproc = &hpt3xx_maskproc;
1279
1280 hwif->udma_filter = &hpt3xx_udma_filter;
1281 hwif->mdma_filter = &hpt3xx_mdma_filter;
1282
1283 hwif->cable_detect = hpt3xx_cable_detect;
1284
1285 /* 1274 /*
1286 * HPT3xxN chips have some complications: 1275 * HPT3xxN chips have some complications:
1287 * 1276 *
@@ -1323,29 +1312,19 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1323 1312
1324 if (new_mcr != old_mcr) 1313 if (new_mcr != old_mcr)
1325 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr); 1314 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
1326
1327 if (hwif->dma_base == 0)
1328 return;
1329
1330 if (chip_type >= HPT374) {
1331 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1332 hwif->ide_dma_end = &hpt374_ide_dma_end;
1333 } else if (chip_type >= HPT370) {
1334 hwif->dma_start = &hpt370_ide_dma_start;
1335 hwif->ide_dma_end = &hpt370_ide_dma_end;
1336 hwif->dma_timeout = &hpt370_dma_timeout;
1337 } else
1338 hwif->dma_lost_irq = &hpt366_dma_lost_irq;
1339} 1315}
1340 1316
1341static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase) 1317static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1318 const struct ide_port_info *d)
1342{ 1319{
1343 struct pci_dev *dev = to_pci_dev(hwif->dev); 1320 struct pci_dev *dev = to_pci_dev(hwif->dev);
1344 u8 masterdma = 0, slavedma = 0; 1321 unsigned long flags, base = ide_pci_dma_base(hwif, d);
1345 u8 dma_new = 0, dma_old = 0; 1322 u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
1346 unsigned long flags;
1347 1323
1348 dma_old = inb(dmabase + 2); 1324 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
1325 return -1;
1326
1327 dma_old = inb(base + 2);
1349 1328
1350 local_irq_save(flags); 1329 local_irq_save(flags);
1351 1330
@@ -1356,11 +1335,21 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1356 if (masterdma & 0x30) dma_new |= 0x20; 1335 if (masterdma & 0x30) dma_new |= 0x20;
1357 if ( slavedma & 0x30) dma_new |= 0x40; 1336 if ( slavedma & 0x30) dma_new |= 0x40;
1358 if (dma_new != dma_old) 1337 if (dma_new != dma_old)
1359 outb(dma_new, dmabase + 2); 1338 outb(dma_new, base + 2);
1360 1339
1361 local_irq_restore(flags); 1340 local_irq_restore(flags);
1362 1341
1363 ide_setup_dma(hwif, dmabase); 1342 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
1343 hwif->name, base, base + 7);
1344
1345 hwif->extra_base = base + (hwif->channel ? 8 : 16);
1346
1347 if (ide_allocate_dma_engine(hwif))
1348 return -1;
1349
1350 ide_setup_dma(hwif, base);
1351
1352 return 0;
1364} 1353}
1365 1354
1366static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2) 1355static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
@@ -1416,6 +1405,49 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1416 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 1405 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
1417 IDE_HFLAG_OFF_BOARD) 1406 IDE_HFLAG_OFF_BOARD)
1418 1407
1408static const struct ide_port_ops hpt3xx_port_ops = {
1409 .set_pio_mode = hpt3xx_set_pio_mode,
1410 .set_dma_mode = hpt3xx_set_mode,
1411 .quirkproc = hpt3xx_quirkproc,
1412 .maskproc = hpt3xx_maskproc,
1413 .mdma_filter = hpt3xx_mdma_filter,
1414 .udma_filter = hpt3xx_udma_filter,
1415 .cable_detect = hpt3xx_cable_detect,
1416};
1417
1418static const struct ide_dma_ops hpt37x_dma_ops = {
1419 .dma_host_set = ide_dma_host_set,
1420 .dma_setup = ide_dma_setup,
1421 .dma_exec_cmd = ide_dma_exec_cmd,
1422 .dma_start = ide_dma_start,
1423 .dma_end = hpt374_dma_end,
1424 .dma_test_irq = hpt374_dma_test_irq,
1425 .dma_lost_irq = ide_dma_lost_irq,
1426 .dma_timeout = ide_dma_timeout,
1427};
1428
1429static const struct ide_dma_ops hpt370_dma_ops = {
1430 .dma_host_set = ide_dma_host_set,
1431 .dma_setup = ide_dma_setup,
1432 .dma_exec_cmd = ide_dma_exec_cmd,
1433 .dma_start = hpt370_dma_start,
1434 .dma_end = hpt370_dma_end,
1435 .dma_test_irq = ide_dma_test_irq,
1436 .dma_lost_irq = ide_dma_lost_irq,
1437 .dma_timeout = hpt370_dma_timeout,
1438};
1439
1440static const struct ide_dma_ops hpt36x_dma_ops = {
1441 .dma_host_set = ide_dma_host_set,
1442 .dma_setup = ide_dma_setup,
1443 .dma_exec_cmd = ide_dma_exec_cmd,
1444 .dma_start = ide_dma_start,
1445 .dma_end = __ide_dma_end,
1446 .dma_test_irq = ide_dma_test_irq,
1447 .dma_lost_irq = hpt366_dma_lost_irq,
1448 .dma_timeout = ide_dma_timeout,
1449};
1450
1419static const struct ide_port_info hpt366_chipsets[] __devinitdata = { 1451static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1420 { /* 0 */ 1452 { /* 0 */
1421 .name = "HPT36x", 1453 .name = "HPT36x",
@@ -1429,7 +1461,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1429 * Bit 4 is for the primary channel, bit 5 for the secondary. 1461 * Bit 4 is for the primary channel, bit 5 for the secondary.
1430 */ 1462 */
1431 .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, 1463 .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
1432 .extra = 240, 1464 .port_ops = &hpt3xx_port_ops,
1465 .dma_ops = &hpt36x_dma_ops,
1433 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, 1466 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
1434 .pio_mask = ATA_PIO4, 1467 .pio_mask = ATA_PIO4,
1435 .mwdma_mask = ATA_MWDMA2, 1468 .mwdma_mask = ATA_MWDMA2,
@@ -1439,7 +1472,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1439 .init_hwif = init_hwif_hpt366, 1472 .init_hwif = init_hwif_hpt366,
1440 .init_dma = init_dma_hpt366, 1473 .init_dma = init_dma_hpt366,
1441 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1474 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1442 .extra = 240, 1475 .port_ops = &hpt3xx_port_ops,
1476 .dma_ops = &hpt37x_dma_ops,
1443 .host_flags = IDE_HFLAGS_HPT3XX, 1477 .host_flags = IDE_HFLAGS_HPT3XX,
1444 .pio_mask = ATA_PIO4, 1478 .pio_mask = ATA_PIO4,
1445 .mwdma_mask = ATA_MWDMA2, 1479 .mwdma_mask = ATA_MWDMA2,
@@ -1449,7 +1483,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1449 .init_hwif = init_hwif_hpt366, 1483 .init_hwif = init_hwif_hpt366,
1450 .init_dma = init_dma_hpt366, 1484 .init_dma = init_dma_hpt366,
1451 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1485 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1452 .extra = 240, 1486 .port_ops = &hpt3xx_port_ops,
1487 .dma_ops = &hpt37x_dma_ops,
1453 .host_flags = IDE_HFLAGS_HPT3XX, 1488 .host_flags = IDE_HFLAGS_HPT3XX,
1454 .pio_mask = ATA_PIO4, 1489 .pio_mask = ATA_PIO4,
1455 .mwdma_mask = ATA_MWDMA2, 1490 .mwdma_mask = ATA_MWDMA2,
@@ -1459,7 +1494,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1459 .init_hwif = init_hwif_hpt366, 1494 .init_hwif = init_hwif_hpt366,
1460 .init_dma = init_dma_hpt366, 1495 .init_dma = init_dma_hpt366,
1461 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1496 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1462 .extra = 240, 1497 .port_ops = &hpt3xx_port_ops,
1498 .dma_ops = &hpt37x_dma_ops,
1463 .host_flags = IDE_HFLAGS_HPT3XX, 1499 .host_flags = IDE_HFLAGS_HPT3XX,
1464 .pio_mask = ATA_PIO4, 1500 .pio_mask = ATA_PIO4,
1465 .mwdma_mask = ATA_MWDMA2, 1501 .mwdma_mask = ATA_MWDMA2,
@@ -1470,7 +1506,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1470 .init_dma = init_dma_hpt366, 1506 .init_dma = init_dma_hpt366,
1471 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1507 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1472 .udma_mask = ATA_UDMA5, 1508 .udma_mask = ATA_UDMA5,
1473 .extra = 240, 1509 .port_ops = &hpt3xx_port_ops,
1510 .dma_ops = &hpt37x_dma_ops,
1474 .host_flags = IDE_HFLAGS_HPT3XX, 1511 .host_flags = IDE_HFLAGS_HPT3XX,
1475 .pio_mask = ATA_PIO4, 1512 .pio_mask = ATA_PIO4,
1476 .mwdma_mask = ATA_MWDMA2, 1513 .mwdma_mask = ATA_MWDMA2,
@@ -1480,7 +1517,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1480 .init_hwif = init_hwif_hpt366, 1517 .init_hwif = init_hwif_hpt366,
1481 .init_dma = init_dma_hpt366, 1518 .init_dma = init_dma_hpt366,
1482 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1519 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1483 .extra = 240, 1520 .port_ops = &hpt3xx_port_ops,
1521 .dma_ops = &hpt37x_dma_ops,
1484 .host_flags = IDE_HFLAGS_HPT3XX, 1522 .host_flags = IDE_HFLAGS_HPT3XX,
1485 .pio_mask = ATA_PIO4, 1523 .pio_mask = ATA_PIO4,
1486 .mwdma_mask = ATA_MWDMA2, 1524 .mwdma_mask = ATA_MWDMA2,
@@ -1543,6 +1581,10 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1543 d.name = info->chip_name; 1581 d.name = info->chip_name;
1544 d.udma_mask = info->udma_mask; 1582 d.udma_mask = info->udma_mask;
1545 1583
1584 /* fixup ->dma_ops for HPT370/HPT370A */
1585 if (info == &hpt370 || info == &hpt370a)
1586 d.dma_ops = &hpt370_dma_ops;
1587
1546 pci_set_drvdata(dev, (void *)info); 1588 pci_set_drvdata(dev, (void *)info);
1547 1589
1548 if (info == &hpt36x || info == &hpt374) 1590 if (info == &hpt36x || info == &hpt374)
@@ -1557,7 +1599,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1557 hpt374_init(dev, dev2); 1599 hpt374_init(dev, dev2);
1558 else { 1600 else {
1559 if (hpt36x_init(dev, dev2)) 1601 if (hpt36x_init(dev, dev2))
1560 d.host_flags |= IDE_HFLAG_BOOTABLE; 1602 d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
1561 } 1603 }
1562 1604
1563 ret = ide_setup_pci_devices(dev, dev2, &d); 1605 ret = ide_setup_pci_devices(dev, dev2, &d);
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index e3427eaab430..9053c8771e6e 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -35,7 +35,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
35 static DEFINE_SPINLOCK(tune_lock); 35 static DEFINE_SPINLOCK(tune_lock);
36 int control = 0; 36 int control = 0;
37 37
38 static const u8 timings[][2]= { 38 static const u8 timings[][2] = {
39 { 0, 0 }, 39 { 0, 0 },
40 { 0, 0 }, 40 { 0, 0 },
41 { 1, 0 }, 41 { 1, 0 },
@@ -105,11 +105,10 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
105 105
106 if (!(reg48 & u_flag)) 106 if (!(reg48 & u_flag))
107 pci_write_config_byte(dev, 0x48, reg48 | u_flag); 107 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
108 if (speed >= XFER_UDMA_5) { 108 if (speed >= XFER_UDMA_5)
109 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); 109 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
110 } else { 110 else
111 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 111 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
112 }
113 112
114 if ((reg4a & a_speed) != u_speed) 113 if ((reg4a & a_speed) != u_speed)
115 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); 114 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
@@ -150,29 +149,18 @@ static u8 __devinit it8213_cable_detect(ide_hwif_t *hwif)
150 return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 149 return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
151} 150}
152 151
153/** 152static const struct ide_port_ops it8213_port_ops = {
154 * init_hwif_it8213 - set up hwif structs 153 .set_pio_mode = it8213_set_pio_mode,
155 * @hwif: interface to set up 154 .set_dma_mode = it8213_set_dma_mode,
156 * 155 .cable_detect = it8213_cable_detect,
157 * We do the basic set up of the interface structure. 156};
158 */
159
160static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
161{
162 hwif->set_dma_mode = &it8213_set_dma_mode;
163 hwif->set_pio_mode = &it8213_set_pio_mode;
164
165 hwif->cable_detect = it8213_cable_detect;
166}
167
168 157
169#define DECLARE_ITE_DEV(name_str) \ 158#define DECLARE_ITE_DEV(name_str) \
170 { \ 159 { \
171 .name = name_str, \ 160 .name = name_str, \
172 .init_hwif = init_hwif_it8213, \ 161 .enablebits = { {0x41, 0x80, 0x80} }, \
173 .enablebits = {{0x41,0x80,0x80}}, \ 162 .port_ops = &it8213_port_ops, \
174 .host_flags = IDE_HFLAG_SINGLE | \ 163 .host_flags = IDE_HFLAG_SINGLE, \
175 IDE_HFLAG_BOOTABLE, \
176 .pio_mask = ATA_PIO4, \ 164 .pio_mask = ATA_PIO4, \
177 .swdma_mask = ATA_SWDMA2_ONLY, \ 165 .swdma_mask = ATA_SWDMA2_ONLY, \
178 .mwdma_mask = ATA_MWDMA12_ONLY, \ 166 .mwdma_mask = ATA_MWDMA12_ONLY, \
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index d8a167451fd6..6ab04115286b 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -418,7 +418,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
418} 418}
419 419
420/** 420/**
421 * ata66_it821x - check for 80 pin cable 421 * it821x_cable_detect - cable detection
422 * @hwif: interface to check 422 * @hwif: interface to check
423 * 423 *
424 * Check for the presence of an ATA66 capable cable on the 424 * Check for the presence of an ATA66 capable cable on the
@@ -426,7 +426,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
426 * the needed logic onboard. 426 * the needed logic onboard.
427 */ 427 */
428 428
429static u8 __devinit ata66_it821x(ide_hwif_t *hwif) 429static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif)
430{ 430{
431 /* The reference driver also only does disk side */ 431 /* The reference driver also only does disk side */
432 return ATA_CBL_PATA80; 432 return ATA_CBL_PATA80;
@@ -511,6 +511,11 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
511 511
512} 512}
513 513
514static struct ide_dma_ops it821x_pass_through_dma_ops = {
515 .dma_start = it821x_dma_start,
516 .dma_end = it821x_dma_end,
517};
518
514/** 519/**
515 * init_hwif_it821x - set up hwif structs 520 * init_hwif_it821x - set up hwif structs
516 * @hwif: interface to set up 521 * @hwif: interface to set up
@@ -523,16 +528,10 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
523static void __devinit init_hwif_it821x(ide_hwif_t *hwif) 528static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
524{ 529{
525 struct pci_dev *dev = to_pci_dev(hwif->dev); 530 struct pci_dev *dev = to_pci_dev(hwif->dev);
526 struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL); 531 struct it821x_dev **itdevs = (struct it821x_dev **)pci_get_drvdata(dev);
532 struct it821x_dev *idev = itdevs[hwif->channel];
527 u8 conf; 533 u8 conf;
528 534
529 hwif->quirkproc = &it821x_quirkproc;
530
531 if (idev == NULL) {
532 printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
533 return;
534 }
535
536 ide_set_hwifdata(hwif, idev); 535 ide_set_hwifdata(hwif, idev);
537 536
538 pci_read_config_byte(dev, 0x50, &conf); 537 pci_read_config_byte(dev, 0x50, &conf);
@@ -567,17 +566,11 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
567 } 566 }
568 567
569 if (idev->smart == 0) { 568 if (idev->smart == 0) {
570 hwif->set_pio_mode = &it821x_set_pio_mode;
571 hwif->set_dma_mode = &it821x_set_dma_mode;
572
573 /* MWDMA/PIO clock switching for pass through mode */ 569 /* MWDMA/PIO clock switching for pass through mode */
574 hwif->dma_start = &it821x_dma_start; 570 hwif->dma_ops = &it821x_pass_through_dma_ops;
575 hwif->ide_dma_end = &it821x_dma_end;
576 } else 571 } else
577 hwif->host_flags |= IDE_HFLAG_NO_SET_MODE; 572 hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
578 573
579 hwif->cable_detect = ata66_it821x;
580
581 if (hwif->dma_base == 0) 574 if (hwif->dma_base == 0)
582 return; 575 return;
583 576
@@ -617,13 +610,20 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
617 return 0; 610 return 0;
618} 611}
619 612
613static const struct ide_port_ops it821x_port_ops = {
614 /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
615 .set_pio_mode = it821x_set_pio_mode,
616 .set_dma_mode = it821x_set_dma_mode,
617 .quirkproc = it821x_quirkproc,
618 .cable_detect = it821x_cable_detect,
619};
620 620
621#define DECLARE_ITE_DEV(name_str) \ 621#define DECLARE_ITE_DEV(name_str) \
622 { \ 622 { \
623 .name = name_str, \ 623 .name = name_str, \
624 .init_chipset = init_chipset_it821x, \ 624 .init_chipset = init_chipset_it821x, \
625 .init_hwif = init_hwif_it821x, \ 625 .init_hwif = init_hwif_it821x, \
626 .host_flags = IDE_HFLAG_BOOTABLE, \ 626 .port_ops = &it821x_port_ops, \
627 .pio_mask = ATA_PIO4, \ 627 .pio_mask = ATA_PIO4, \
628 } 628 }
629 629
@@ -642,6 +642,22 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
642 642
643static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id) 643static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
644{ 644{
645 struct it821x_dev *itdevs[2] = { NULL, NULL} , *itdev;
646 unsigned int i;
647
648 for (i = 0; i < 2; i++) {
649 itdev = kzalloc(sizeof(*itdev), GFP_KERNEL);
650 if (itdev == NULL) {
651 kfree(itdevs[0]);
652 printk(KERN_ERR "it821x: out of memory\n");
653 return -ENOMEM;
654 }
655
656 itdevs[i] = itdev;
657 }
658
659 pci_set_drvdata(dev, itdevs);
660
645 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); 661 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
646} 662}
647 663
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index a56bcb4f22f4..96ef7394f283 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -19,13 +19,13 @@ typedef enum {
19} port_type; 19} port_type;
20 20
21/** 21/**
22 * ata66_jmicron - Cable check 22 * jmicron_cable_detect - cable detection
23 * @hwif: IDE port 23 * @hwif: IDE port
24 * 24 *
25 * Returns the cable type. 25 * Returns the cable type.
26 */ 26 */
27 27
28static u8 __devinit ata66_jmicron(ide_hwif_t *hwif) 28static u8 __devinit jmicron_cable_detect(ide_hwif_t *hwif)
29{ 29{
30 struct pci_dev *pdev = to_pci_dev(hwif->dev); 30 struct pci_dev *pdev = to_pci_dev(hwif->dev);
31 31
@@ -63,8 +63,7 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
63 * actually do our cable checking etc. Thankfully we don't need 63 * actually do our cable checking etc. Thankfully we don't need
64 * to do the plumbing for other cases. 64 * to do the plumbing for other cases.
65 */ 65 */
66 switch (port_map[port]) 66 switch (port_map[port]) {
67 {
68 case PORT_PATA0: 67 case PORT_PATA0:
69 if (control & (1 << 3)) /* 40/80 pin primary */ 68 if (control & (1 << 3)) /* 40/80 pin primary */
70 return ATA_CBL_PATA40; 69 return ATA_CBL_PATA40;
@@ -96,26 +95,16 @@ static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
96{ 95{
97} 96}
98 97
99/** 98static const struct ide_port_ops jmicron_port_ops = {
100 * init_hwif_jmicron - set up hwif structs 99 .set_pio_mode = jmicron_set_pio_mode,
101 * @hwif: interface to set up 100 .set_dma_mode = jmicron_set_dma_mode,
102 * 101 .cable_detect = jmicron_cable_detect,
103 * Minimal set up is required for the Jmicron hardware. 102};
104 */
105
106static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
107{
108 hwif->set_pio_mode = &jmicron_set_pio_mode;
109 hwif->set_dma_mode = &jmicron_set_dma_mode;
110
111 hwif->cable_detect = ata66_jmicron;
112}
113 103
114static const struct ide_port_info jmicron_chipset __devinitdata = { 104static const struct ide_port_info jmicron_chipset __devinitdata = {
115 .name = "JMB", 105 .name = "JMB",
116 .init_hwif = init_hwif_jmicron,
117 .host_flags = IDE_HFLAG_BOOTABLE,
118 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, 106 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
107 .port_ops = &jmicron_port_ops,
119 .pio_mask = ATA_PIO5, 108 .pio_mask = ATA_PIO5,
120 .mwdma_mask = ATA_MWDMA2, 109 .mwdma_mask = ATA_MWDMA2,
121 .udma_mask = ATA_UDMA6, 110 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 75513320aad9..c13e299077ec 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -72,8 +72,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
72 base = pci_resource_start(pdev, port * 2) & ~3; 72 base = pci_resource_start(pdev, port * 2) & ~3;
73 dmabase = pci_resource_start(pdev, 4) & ~3; 73 dmabase = pci_resource_start(pdev, 4) & ~3;
74 74
75 superio_ide_status[port] = base + IDE_STATUS_OFFSET; 75 superio_ide_status[port] = base + 7;
76 superio_ide_select[port] = base + IDE_SELECT_OFFSET; 76 superio_ide_select[port] = base + 6;
77 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa); 77 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
78 78
79 /* Clear error/interrupt, enable dma */ 79 /* Clear error/interrupt, enable dma */
@@ -150,7 +150,7 @@ static void ns87415_selectproc (ide_drive_t *drive)
150 ns87415_prepare_drive (drive, drive->using_dma); 150 ns87415_prepare_drive (drive, drive->using_dma);
151} 151}
152 152
153static int ns87415_ide_dma_end (ide_drive_t *drive) 153static int ns87415_dma_end(ide_drive_t *drive)
154{ 154{
155 ide_hwif_t *hwif = HWIF(drive); 155 ide_hwif_t *hwif = HWIF(drive);
156 u8 dma_stat = 0, dma_cmd = 0; 156 u8 dma_stat = 0, dma_cmd = 0;
@@ -170,7 +170,7 @@ static int ns87415_ide_dma_end (ide_drive_t *drive)
170 return (dma_stat & 7) != 4; 170 return (dma_stat & 7) != 4;
171} 171}
172 172
173static int ns87415_ide_dma_setup(ide_drive_t *drive) 173static int ns87415_dma_setup(ide_drive_t *drive)
174{ 174{
175 /* select DMA xfer */ 175 /* select DMA xfer */
176 ns87415_prepare_drive(drive, 1); 176 ns87415_prepare_drive(drive, 1);
@@ -195,8 +195,6 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
195 u8 stat; 195 u8 stat;
196#endif 196#endif
197 197
198 hwif->selectproc = &ns87415_selectproc;
199
200 /* 198 /*
201 * We cannot probe for IRQ: both ports share common IRQ on INTA. 199 * We cannot probe for IRQ: both ports share common IRQ on INTA.
202 * Also, leave IRQ masked during drive probing, to prevent infinite 200 * Also, leave IRQ masked during drive probing, to prevent infinite
@@ -233,12 +231,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
233 * SELECT_DRIVE() properly during first ide_probe_port(). 231 * SELECT_DRIVE() properly during first ide_probe_port().
234 */ 232 */
235 timeout = 10000; 233 timeout = 10000;
236 outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]); 234 outb(12, hwif->io_ports.ctl_addr);
237 udelay(10); 235 udelay(10);
238 outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]); 236 outb(8, hwif->io_ports.ctl_addr);
239 do { 237 do {
240 udelay(50); 238 udelay(50);
241 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 239 stat = hwif->INB(hwif->io_ports.status_addr);
242 if (stat == 0xff) 240 if (stat == 0xff)
243 break; 241 break;
244 } while ((stat & BUSY_STAT) && --timeout); 242 } while ((stat & BUSY_STAT) && --timeout);
@@ -246,7 +244,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
246 } 244 }
247 245
248 if (!using_inta) 246 if (!using_inta)
249 hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]); 247 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
250 else if (!hwif->irq && hwif->mate && hwif->mate->irq) 248 else if (!hwif->irq && hwif->mate && hwif->mate->irq)
251 hwif->irq = hwif->mate->irq; /* share IRQ with mate */ 249 hwif->irq = hwif->mate->irq; /* share IRQ with mate */
252 250
@@ -254,19 +252,33 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
254 return; 252 return;
255 253
256 outb(0x60, hwif->dma_status); 254 outb(0x60, hwif->dma_status);
257 hwif->dma_setup = &ns87415_ide_dma_setup;
258 hwif->ide_dma_end = &ns87415_ide_dma_end;
259} 255}
260 256
257static const struct ide_port_ops ns87415_port_ops = {
258 .selectproc = ns87415_selectproc,
259};
260
261static const struct ide_dma_ops ns87415_dma_ops = {
262 .dma_host_set = ide_dma_host_set,
263 .dma_setup = ns87415_dma_setup,
264 .dma_exec_cmd = ide_dma_exec_cmd,
265 .dma_start = ide_dma_start,
266 .dma_end = ns87415_dma_end,
267 .dma_test_irq = ide_dma_test_irq,
268 .dma_lost_irq = ide_dma_lost_irq,
269 .dma_timeout = ide_dma_timeout,
270};
271
261static const struct ide_port_info ns87415_chipset __devinitdata = { 272static const struct ide_port_info ns87415_chipset __devinitdata = {
262 .name = "NS87415", 273 .name = "NS87415",
263#ifdef CONFIG_SUPERIO 274#ifdef CONFIG_SUPERIO
264 .init_iops = init_iops_ns87415, 275 .init_iops = init_iops_ns87415,
265#endif 276#endif
266 .init_hwif = init_hwif_ns87415, 277 .init_hwif = init_hwif_ns87415,
278 .port_ops = &ns87415_port_ops,
279 .dma_ops = &ns87415_dma_ops,
267 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 280 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
268 IDE_HFLAG_NO_ATAPI_DMA | 281 IDE_HFLAG_NO_ATAPI_DMA,
269 IDE_HFLAG_BOOTABLE,
270}; 282};
271 283
272static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) 284static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 46e8748f507e..6e99080497bf 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -53,13 +53,12 @@
53 * If you then set the second drive to another PIO, the old value 53 * If you then set the second drive to another PIO, the old value
54 * (automatically selected) will be overrided by yours. 54 * (automatically selected) will be overrided by yours.
55 * There is a 25/33MHz switch in configuration 55 * There is a 25/33MHz switch in configuration
56 * register, but driver is written for use at any frequency which get 56 * register, but driver is written for use at any frequency.
57 * (use idebus=xx to select PCI bus speed).
58 * 57 *
59 * Version 0.1, Nov 8, 1996 58 * Version 0.1, Nov 8, 1996
60 * by Jaromir Koutek, for 2.1.8. 59 * by Jaromir Koutek, for 2.1.8.
61 * Initial version of driver. 60 * Initial version of driver.
62 * 61 *
63 * Version 0.2 62 * Version 0.2
64 * Number 0.2 skipped. 63 * Number 0.2 skipped.
65 * 64 *
@@ -75,7 +74,7 @@
75 * by Jaromir Koutek 74 * by Jaromir Koutek
76 * Updates for use with (again) new IDE block driver. 75 * Updates for use with (again) new IDE block driver.
77 * Update of documentation. 76 * Update of documentation.
78 * 77 *
79 * Version 0.6, Jan 2, 1999 78 * Version 0.6, Jan 2, 1999
80 * by Jaromir Koutek 79 * by Jaromir Koutek
81 * Reversed to version 0.3 of the driver, because 80 * Reversed to version 0.3 of the driver, because
@@ -208,29 +207,34 @@ typedef struct pio_clocks_s {
208 207
209static void compute_clocks(int pio, pio_clocks_t *clks) 208static void compute_clocks(int pio, pio_clocks_t *clks)
210{ 209{
211 if (pio != PIO_NOT_EXIST) { 210 if (pio != PIO_NOT_EXIST) {
212 int adr_setup, data_pls; 211 int adr_setup, data_pls;
213 int bus_speed = system_bus_clock(); 212 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
214 213
215 adr_setup = ide_pio_timings[pio].setup_time; 214 adr_setup = ide_pio_timings[pio].setup_time;
216 data_pls = ide_pio_timings[pio].active_time; 215 data_pls = ide_pio_timings[pio].active_time;
217 clks->address_time = cmpt_clk(adr_setup, bus_speed); 216 clks->address_time = cmpt_clk(adr_setup, bus_speed);
218 clks->data_time = cmpt_clk(data_pls, bus_speed); 217 clks->data_time = cmpt_clk(data_pls, bus_speed);
219 clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time 218 clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time
220 - adr_setup-data_pls, bus_speed); 219 - adr_setup-data_pls, bus_speed);
221 if (clks->address_time<1) clks->address_time = 1; 220 if (clks->address_time < 1)
222 if (clks->address_time>4) clks->address_time = 4; 221 clks->address_time = 1;
223 if (clks->data_time<1) clks->data_time = 1; 222 if (clks->address_time > 4)
224 if (clks->data_time>16) clks->data_time = 16; 223 clks->address_time = 4;
225 if (clks->recovery_time<2) clks->recovery_time = 2; 224 if (clks->data_time < 1)
226 if (clks->recovery_time>17) clks->recovery_time = 17; 225 clks->data_time = 1;
226 if (clks->data_time > 16)
227 clks->data_time = 16;
228 if (clks->recovery_time < 2)
229 clks->recovery_time = 2;
230 if (clks->recovery_time > 17)
231 clks->recovery_time = 17;
227 } else { 232 } else {
228 clks->address_time = 1; 233 clks->address_time = 1;
229 clks->data_time = 1; 234 clks->data_time = 1;
230 clks->recovery_time = 2; 235 clks->recovery_time = 2;
231 /* minimal values */ 236 /* minimal values */
232 } 237 }
233
234} 238}
235 239
236static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio) 240static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -247,8 +251,8 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
247 251
248 /* sets drive->drive_data for both drives */ 252 /* sets drive->drive_data for both drives */
249 compute_pios(drive, pio); 253 compute_pios(drive, pio);
250 pio1 = hwif->drives[0].drive_data; 254 pio1 = hwif->drives[0].drive_data;
251 pio2 = hwif->drives[1].drive_data; 255 pio2 = hwif->drives[1].drive_data;
252 256
253 compute_clocks(pio1, &first); 257 compute_clocks(pio1, &first);
254 compute_clocks(pio2, &second); 258 compute_clocks(pio2, &second);
@@ -275,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
275 279
276 spin_lock_irqsave(&opti621_lock, flags); 280 spin_lock_irqsave(&opti621_lock, flags);
277 281
278 reg_base = hwif->io_ports[IDE_DATA_OFFSET]; 282 reg_base = hwif->io_ports.data_addr;
279 283
280 /* allow Register-B */ 284 /* allow Register-B */
281 outb(0xc0, reg_base + CNTRL_REG); 285 outb(0xc0, reg_base + CNTRL_REG);
@@ -321,31 +325,25 @@ static void __devinit opti621_port_init_devs(ide_hwif_t *hwif)
321 hwif->drives[1].drive_data = PIO_DONT_KNOW; 325 hwif->drives[1].drive_data = PIO_DONT_KNOW;
322} 326}
323 327
324/* 328static const struct ide_port_ops opti621_port_ops = {
325 * init_hwif_opti621() is called once for each hwif found at boot. 329 .port_init_devs = opti621_port_init_devs,
326 */ 330 .set_pio_mode = opti621_set_pio_mode,
327static void __devinit init_hwif_opti621 (ide_hwif_t *hwif) 331};
328{
329 hwif->port_init_devs = opti621_port_init_devs;
330 hwif->set_pio_mode = &opti621_set_pio_mode;
331}
332 332
333static const struct ide_port_info opti621_chipsets[] __devinitdata = { 333static const struct ide_port_info opti621_chipsets[] __devinitdata = {
334 { /* 0 */ 334 { /* 0 */
335 .name = "OPTI621", 335 .name = "OPTI621",
336 .init_hwif = init_hwif_opti621, 336 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
337 .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, 337 .port_ops = &opti621_port_ops,
338 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 338 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
339 IDE_HFLAG_BOOTABLE,
340 .pio_mask = ATA_PIO3, 339 .pio_mask = ATA_PIO3,
341 .swdma_mask = ATA_SWDMA2, 340 .swdma_mask = ATA_SWDMA2,
342 .mwdma_mask = ATA_MWDMA2, 341 .mwdma_mask = ATA_MWDMA2,
343 },{ /* 1 */ 342 }, { /* 1 */
344 .name = "OPTI621X", 343 .name = "OPTI621X",
345 .init_hwif = init_hwif_opti621, 344 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
346 .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, 345 .port_ops = &opti621_port_ops,
347 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 346 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
348 IDE_HFLAG_BOOTABLE,
349 .pio_mask = ATA_PIO3, 347 .pio_mask = ATA_PIO3,
350 .swdma_mask = ATA_SWDMA2, 348 .swdma_mask = ATA_SWDMA2,
351 .mwdma_mask = ATA_MWDMA2, 349 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 1c8cb7797a4a..ec9bd7b352fc 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -34,7 +34,7 @@
34#undef DEBUG 34#undef DEBUG
35 35
36#ifdef DEBUG 36#ifdef DEBUG
37#define DBG(fmt, args...) printk("%s: " fmt, __FUNCTION__, ## args) 37#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
38#else 38#else
39#define DBG(fmt, args...) 39#define DBG(fmt, args...)
40#endif 40#endif
@@ -442,17 +442,6 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
442 return dev->irq; 442 return dev->irq;
443} 443}
444 444
445static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
446{
447 hwif->set_pio_mode = &pdcnew_set_pio_mode;
448 hwif->set_dma_mode = &pdcnew_set_dma_mode;
449
450 hwif->quirkproc = &pdcnew_quirkproc;
451 hwif->resetproc = &pdcnew_reset;
452
453 hwif->cable_detect = pdcnew_cable_detect;
454}
455
456static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev) 445static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
457{ 446{
458 struct pci_dev *dev2; 447 struct pci_dev *dev2;
@@ -476,11 +465,19 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
476 return NULL; 465 return NULL;
477} 466}
478 467
468static const struct ide_port_ops pdcnew_port_ops = {
469 .set_pio_mode = pdcnew_set_pio_mode,
470 .set_dma_mode = pdcnew_set_dma_mode,
471 .quirkproc = pdcnew_quirkproc,
472 .resetproc = pdcnew_reset,
473 .cable_detect = pdcnew_cable_detect,
474};
475
479#define DECLARE_PDCNEW_DEV(name_str, udma) \ 476#define DECLARE_PDCNEW_DEV(name_str, udma) \
480 { \ 477 { \
481 .name = name_str, \ 478 .name = name_str, \
482 .init_chipset = init_chipset_pdcnew, \ 479 .init_chipset = init_chipset_pdcnew, \
483 .init_hwif = init_hwif_pdc202new, \ 480 .port_ops = &pdcnew_port_ops, \
484 .host_flags = IDE_HFLAG_POST_SET_MODE | \ 481 .host_flags = IDE_HFLAG_POST_SET_MODE | \
485 IDE_HFLAG_ERROR_STOPS_FIFO | \ 482 IDE_HFLAG_ERROR_STOPS_FIFO | \
486 IDE_HFLAG_OFF_BOARD, \ 483 IDE_HFLAG_OFF_BOARD, \
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 150422ec3cfa..fca89eda5c02 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -115,7 +115,7 @@ static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
115 pdc202xx_set_mode(drive, XFER_PIO_0 + pio); 115 pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
116} 116}
117 117
118static u8 __devinit pdc2026x_old_cable_detect(ide_hwif_t *hwif) 118static u8 __devinit pdc2026x_cable_detect(ide_hwif_t *hwif)
119{ 119{
120 struct pci_dev *dev = to_pci_dev(hwif->dev); 120 struct pci_dev *dev = to_pci_dev(hwif->dev);
121 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10); 121 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
@@ -163,7 +163,7 @@ static void pdc202xx_quirkproc(ide_drive_t *drive)
163 drive->quirk_list = 0; 163 drive->quirk_list = 0;
164} 164}
165 165
166static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) 166static void pdc202xx_dma_start(ide_drive_t *drive)
167{ 167{
168 if (drive->current_speed > XFER_UDMA_2) 168 if (drive->current_speed > XFER_UDMA_2)
169 pdc_old_enable_66MHz_clock(drive->hwif); 169 pdc_old_enable_66MHz_clock(drive->hwif);
@@ -185,7 +185,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
185 ide_dma_start(drive); 185 ide_dma_start(drive);
186} 186}
187 187
188static int pdc202xx_old_ide_dma_end(ide_drive_t *drive) 188static int pdc202xx_dma_end(ide_drive_t *drive)
189{ 189{
190 if (drive->media != ide_disk || drive->addressing == 1) { 190 if (drive->media != ide_disk || drive->addressing == 1) {
191 ide_hwif_t *hwif = HWIF(drive); 191 ide_hwif_t *hwif = HWIF(drive);
@@ -202,7 +202,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
202 return __ide_dma_end(drive); 202 return __ide_dma_end(drive);
203} 203}
204 204
205static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive) 205static int pdc202xx_dma_test_irq(ide_drive_t *drive)
206{ 206{
207 ide_hwif_t *hwif = HWIF(drive); 207 ide_hwif_t *hwif = HWIF(drive);
208 unsigned long high_16 = hwif->extra_base - 16; 208 unsigned long high_16 = hwif->extra_base - 16;
@@ -226,26 +226,6 @@ somebody_else:
226 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ 226 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
227} 227}
228 228
229static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
230{
231 ide_hwif_t *hwif = HWIF(drive);
232
233 if (hwif->resetproc != NULL)
234 hwif->resetproc(drive);
235
236 ide_dma_lost_irq(drive);
237}
238
239static void pdc202xx_dma_timeout(ide_drive_t *drive)
240{
241 ide_hwif_t *hwif = HWIF(drive);
242
243 if (hwif->resetproc != NULL)
244 hwif->resetproc(drive);
245
246 ide_dma_timeout(drive);
247}
248
249static void pdc202xx_reset_host (ide_hwif_t *hwif) 229static void pdc202xx_reset_host (ide_hwif_t *hwif)
250{ 230{
251 unsigned long high_16 = hwif->extra_base - 16; 231 unsigned long high_16 = hwif->extra_base - 16;
@@ -271,68 +251,46 @@ static void pdc202xx_reset (ide_drive_t *drive)
271 ide_set_max_pio(drive); 251 ide_set_max_pio(drive);
272} 252}
273 253
274static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, 254static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
275 const char *name)
276{ 255{
277 return dev->irq; 256 pdc202xx_reset(drive);
257 ide_dma_lost_irq(drive);
278} 258}
279 259
280static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif) 260static void pdc202xx_dma_timeout(ide_drive_t *drive)
281{ 261{
282 struct pci_dev *dev = to_pci_dev(hwif->dev); 262 pdc202xx_reset(drive);
283 263 ide_dma_timeout(drive);
284 hwif->set_pio_mode = &pdc202xx_set_pio_mode;
285 hwif->set_dma_mode = &pdc202xx_set_mode;
286
287 hwif->quirkproc = &pdc202xx_quirkproc;
288
289 if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
290 hwif->resetproc = &pdc202xx_reset;
291
292 hwif->cable_detect = pdc2026x_old_cable_detect;
293 }
294
295 if (hwif->dma_base == 0)
296 return;
297
298 hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
299 hwif->dma_timeout = &pdc202xx_dma_timeout;
300
301 if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
302 hwif->dma_start = &pdc202xx_old_ide_dma_start;
303 hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
304 }
305 hwif->ide_dma_test_irq = &pdc202xx_old_ide_dma_test_irq;
306} 264}
307 265
308static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase) 266static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
267 const char *name)
309{ 268{
269 unsigned long dmabase = pci_resource_start(dev, 4);
310 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0; 270 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
311 271
312 if (hwif->channel) { 272 if (dmabase == 0)
313 ide_setup_dma(hwif, dmabase); 273 goto out;
314 return;
315 }
316 274
317 udma_speed_flag = inb(dmabase | 0x1f); 275 udma_speed_flag = inb(dmabase | 0x1f);
318 primary_mode = inb(dmabase | 0x1a); 276 primary_mode = inb(dmabase | 0x1a);
319 secondary_mode = inb(dmabase | 0x1b); 277 secondary_mode = inb(dmabase | 0x1b);
320 printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \ 278 printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
321 "Primary %s Mode " \ 279 "Primary %s Mode " \
322 "Secondary %s Mode.\n", hwif->cds->name, 280 "Secondary %s Mode.\n", pci_name(dev),
323 (udma_speed_flag & 1) ? "EN" : "DIS", 281 (udma_speed_flag & 1) ? "EN" : "DIS",
324 (primary_mode & 1) ? "MASTER" : "PCI", 282 (primary_mode & 1) ? "MASTER" : "PCI",
325 (secondary_mode & 1) ? "MASTER" : "PCI" ); 283 (secondary_mode & 1) ? "MASTER" : "PCI" );
326 284
327 if (!(udma_speed_flag & 1)) { 285 if (!(udma_speed_flag & 1)) {
328 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ", 286 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
329 hwif->cds->name, udma_speed_flag, 287 pci_name(dev), udma_speed_flag,
330 (udma_speed_flag|1)); 288 (udma_speed_flag|1));
331 outb(udma_speed_flag | 1, dmabase | 0x1f); 289 outb(udma_speed_flag | 1, dmabase | 0x1f);
332 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN"); 290 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
333 } 291 }
334 292out:
335 ide_setup_dma(hwif, dmabase); 293 return dev->irq;
336} 294}
337 295
338static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, 296static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
@@ -357,13 +315,48 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
357 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 315 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
358 IDE_HFLAG_OFF_BOARD) 316 IDE_HFLAG_OFF_BOARD)
359 317
318static const struct ide_port_ops pdc20246_port_ops = {
319 .set_pio_mode = pdc202xx_set_pio_mode,
320 .set_dma_mode = pdc202xx_set_mode,
321 .quirkproc = pdc202xx_quirkproc,
322};
323
324static const struct ide_port_ops pdc2026x_port_ops = {
325 .set_pio_mode = pdc202xx_set_pio_mode,
326 .set_dma_mode = pdc202xx_set_mode,
327 .quirkproc = pdc202xx_quirkproc,
328 .resetproc = pdc202xx_reset,
329 .cable_detect = pdc2026x_cable_detect,
330};
331
332static const struct ide_dma_ops pdc20246_dma_ops = {
333 .dma_host_set = ide_dma_host_set,
334 .dma_setup = ide_dma_setup,
335 .dma_exec_cmd = ide_dma_exec_cmd,
336 .dma_start = ide_dma_start,
337 .dma_end = __ide_dma_end,
338 .dma_test_irq = pdc202xx_dma_test_irq,
339 .dma_lost_irq = pdc202xx_dma_lost_irq,
340 .dma_timeout = pdc202xx_dma_timeout,
341};
342
343static const struct ide_dma_ops pdc2026x_dma_ops = {
344 .dma_host_set = ide_dma_host_set,
345 .dma_setup = ide_dma_setup,
346 .dma_exec_cmd = ide_dma_exec_cmd,
347 .dma_start = pdc202xx_dma_start,
348 .dma_end = pdc202xx_dma_end,
349 .dma_test_irq = pdc202xx_dma_test_irq,
350 .dma_lost_irq = pdc202xx_dma_lost_irq,
351 .dma_timeout = pdc202xx_dma_timeout,
352};
353
360#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \ 354#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
361 { \ 355 { \
362 .name = name_str, \ 356 .name = name_str, \
363 .init_chipset = init_chipset_pdc202xx, \ 357 .init_chipset = init_chipset_pdc202xx, \
364 .init_hwif = init_hwif_pdc202xx, \ 358 .port_ops = &pdc2026x_port_ops, \
365 .init_dma = init_dma_pdc202xx, \ 359 .dma_ops = &pdc2026x_dma_ops, \
366 .extra = 48, \
367 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ 360 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \
368 .pio_mask = ATA_PIO4, \ 361 .pio_mask = ATA_PIO4, \
369 .mwdma_mask = ATA_MWDMA2, \ 362 .mwdma_mask = ATA_MWDMA2, \
@@ -374,9 +367,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
374 { /* 0 */ 367 { /* 0 */
375 .name = "PDC20246", 368 .name = "PDC20246",
376 .init_chipset = init_chipset_pdc202xx, 369 .init_chipset = init_chipset_pdc202xx,
377 .init_hwif = init_hwif_pdc202xx, 370 .port_ops = &pdc20246_port_ops,
378 .init_dma = init_dma_pdc202xx, 371 .dma_ops = &pdc20246_dma_ops,
379 .extra = 16,
380 .host_flags = IDE_HFLAGS_PDC202XX, 372 .host_flags = IDE_HFLAGS_PDC202XX,
381 .pio_mask = ATA_PIO4, 373 .pio_mask = ATA_PIO4,
382 .mwdma_mask = ATA_MWDMA2, 374 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index decef0f47674..21c5dd23f928 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -285,11 +285,6 @@ static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
285 285
286static void __devinit init_hwif_piix(ide_hwif_t *hwif) 286static void __devinit init_hwif_piix(ide_hwif_t *hwif)
287{ 287{
288 hwif->set_pio_mode = &piix_set_pio_mode;
289 hwif->set_dma_mode = &piix_set_dma_mode;
290
291 hwif->cable_detect = piix_cable_detect;
292
293 if (!hwif->dma_base) 288 if (!hwif->dma_base)
294 return; 289 return;
295 290
@@ -306,10 +301,16 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
306 hwif->ide_dma_clear_irq = &piix_dma_clear_irq; 301 hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
307} 302}
308 303
304static const struct ide_port_ops piix_port_ops = {
305 .set_pio_mode = piix_set_pio_mode,
306 .set_dma_mode = piix_set_dma_mode,
307 .cable_detect = piix_cable_detect,
308};
309
309#ifndef CONFIG_IA64 310#ifndef CONFIG_IA64
310 #define IDE_HFLAGS_PIIX (IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE) 311 #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
311#else 312#else
312 #define IDE_HFLAGS_PIIX IDE_HFLAG_BOOTABLE 313 #define IDE_HFLAGS_PIIX 0
313#endif 314#endif
314 315
315#define DECLARE_PIIX_DEV(name_str, udma) \ 316#define DECLARE_PIIX_DEV(name_str, udma) \
@@ -317,6 +318,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
317 .name = name_str, \ 318 .name = name_str, \
318 .init_hwif = init_hwif_piix, \ 319 .init_hwif = init_hwif_piix, \
319 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 320 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
321 .port_ops = &piix_port_ops, \
320 .host_flags = IDE_HFLAGS_PIIX, \ 322 .host_flags = IDE_HFLAGS_PIIX, \
321 .pio_mask = ATA_PIO4, \ 323 .pio_mask = ATA_PIO4, \
322 .swdma_mask = ATA_SWDMA2_ONLY, \ 324 .swdma_mask = ATA_SWDMA2_ONLY, \
@@ -330,6 +332,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
330 .init_chipset = init_chipset_ich, \ 332 .init_chipset = init_chipset_ich, \
331 .init_hwif = init_hwif_ich, \ 333 .init_hwif = init_hwif_ich, \
332 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 334 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
335 .port_ops = &piix_port_ops, \
333 .host_flags = IDE_HFLAGS_PIIX, \ 336 .host_flags = IDE_HFLAGS_PIIX, \
334 .pio_mask = ATA_PIO4, \ 337 .pio_mask = ATA_PIO4, \
335 .swdma_mask = ATA_SWDMA2_ONLY, \ 338 .swdma_mask = ATA_SWDMA2_ONLY, \
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index 51676612f78f..532154adba29 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -43,7 +43,7 @@ static const struct ide_port_info rz1000_chipset __devinitdata = {
43 .name = "RZ100x", 43 .name = "RZ100x",
44 .init_hwif = init_hwif_rz1000, 44 .init_hwif = init_hwif_rz1000,
45 .chipset = ide_rz1000, 45 .chipset = ide_rz1000,
46 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_BOOTABLE, 46 .host_flags = IDE_HFLAG_NO_DMA,
47}; 47};
48 48
49static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) 49static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 561aa47c7720..14c787b5d95f 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -165,7 +165,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
165 * 165 *
166 * returns 1 on error, 0 otherwise 166 * returns 1 on error, 0 otherwise
167 */ 167 */
168static int sc1200_ide_dma_end (ide_drive_t *drive) 168static int sc1200_dma_end(ide_drive_t *drive)
169{ 169{
170 ide_hwif_t *hwif = HWIF(drive); 170 ide_hwif_t *hwif = HWIF(drive);
171 unsigned long dma_base = hwif->dma_base; 171 unsigned long dma_base = hwif->dma_base;
@@ -214,7 +214,7 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
214 printk("SC1200: %s: changing (U)DMA mode\n", drive->name); 214 printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
215 ide_dma_off_quietly(drive); 215 ide_dma_off_quietly(drive);
216 if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma) 216 if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
217 hwif->dma_host_set(drive, 1); 217 hwif->dma_ops->dma_host_set(drive, 1);
218 return; 218 return;
219 } 219 }
220 220
@@ -286,29 +286,30 @@ static int sc1200_resume (struct pci_dev *dev)
286} 286}
287#endif 287#endif
288 288
289/* 289static const struct ide_port_ops sc1200_port_ops = {
290 * This gets invoked by the IDE driver once for each channel, 290 .set_pio_mode = sc1200_set_pio_mode,
291 * and performs channel-specific pre-initialization before drive probing. 291 .set_dma_mode = sc1200_set_dma_mode,
292 */ 292 .udma_filter = sc1200_udma_filter,
293static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif) 293};
294{
295 hwif->set_pio_mode = &sc1200_set_pio_mode;
296 hwif->set_dma_mode = &sc1200_set_dma_mode;
297
298 if (hwif->dma_base == 0)
299 return;
300 294
301 hwif->udma_filter = sc1200_udma_filter; 295static const struct ide_dma_ops sc1200_dma_ops = {
302 hwif->ide_dma_end = &sc1200_ide_dma_end; 296 .dma_host_set = ide_dma_host_set,
303} 297 .dma_setup = ide_dma_setup,
298 .dma_exec_cmd = ide_dma_exec_cmd,
299 .dma_start = ide_dma_start,
300 .dma_end = sc1200_dma_end,
301 .dma_test_irq = ide_dma_test_irq,
302 .dma_lost_irq = ide_dma_lost_irq,
303 .dma_timeout = ide_dma_timeout,
304};
304 305
305static const struct ide_port_info sc1200_chipset __devinitdata = { 306static const struct ide_port_info sc1200_chipset __devinitdata = {
306 .name = "SC1200", 307 .name = "SC1200",
307 .init_hwif = init_hwif_sc1200, 308 .port_ops = &sc1200_port_ops,
309 .dma_ops = &sc1200_dma_ops,
308 .host_flags = IDE_HFLAG_SERIALIZE | 310 .host_flags = IDE_HFLAG_SERIALIZE |
309 IDE_HFLAG_POST_SET_MODE | 311 IDE_HFLAG_POST_SET_MODE |
310 IDE_HFLAG_ABUSE_DMA_MODES | 312 IDE_HFLAG_ABUSE_DMA_MODES,
311 IDE_HFLAG_BOOTABLE,
312 .pio_mask = ATA_PIO4, 313 .pio_mask = ATA_PIO4,
313 .mwdma_mask = ATA_MWDMA2, 314 .mwdma_mask = ATA_MWDMA2,
314 .udma_mask = ATA_UDMA2, 315 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index ef07c7a8b97a..ad7cdf9060ca 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
65 65
66static struct scc_ports { 66static struct scc_ports {
67 unsigned long ctl, dma; 67 unsigned long ctl, dma;
68 unsigned char hwif_id; /* for removing hwif from system */ 68 ide_hwif_t *hwif; /* for removing port from system */
69} scc_ports[MAX_HWIFS]; 69} scc_ports[MAX_HWIFS];
70 70
71/* PIO transfer mode table */ 71/* PIO transfer mode table */
@@ -317,14 +317,14 @@ static int scc_dma_setup(ide_drive_t *drive)
317 317
318 318
319/** 319/**
320 * scc_ide_dma_end - Stop DMA 320 * scc_dma_end - Stop DMA
321 * @drive: IDE drive 321 * @drive: IDE drive
322 * 322 *
323 * Check and clear INT Status register. 323 * Check and clear INT Status register.
324 * Then call __ide_dma_end(). 324 * Then call __ide_dma_end().
325 */ 325 */
326 326
327static int scc_ide_dma_end(ide_drive_t * drive) 327static int scc_dma_end(ide_drive_t *drive)
328{ 328{
329 ide_hwif_t *hwif = HWIF(drive); 329 ide_hwif_t *hwif = HWIF(drive);
330 unsigned long intsts_port = hwif->dma_base + 0x014; 330 unsigned long intsts_port = hwif->dma_base + 0x014;
@@ -334,7 +334,7 @@ static int scc_ide_dma_end(ide_drive_t * drive)
334 334
335 /* errata A308 workaround: Step5 (check data loss) */ 335 /* errata A308 workaround: Step5 (check data loss) */
336 /* We don't check non ide_disk because it is limited to UDMA4 */ 336 /* We don't check non ide_disk because it is limited to UDMA4 */
337 if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) 337 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
338 & ERR_STAT) && 338 & ERR_STAT) &&
339 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { 339 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
340 reg = in_be32((void __iomem *)intsts_port); 340 reg = in_be32((void __iomem *)intsts_port);
@@ -438,7 +438,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
438 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); 438 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
439 439
440 /* SCC errata A252,A308 workaround: Step4 */ 440 /* SCC errata A252,A308 workaround: Step4 */
441 if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) 441 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
442 & ERR_STAT) && 442 & ERR_STAT) &&
443 (int_stat & INTSTS_INTRQ)) 443 (int_stat & INTSTS_INTRQ))
444 return 1; 444 return 1;
@@ -449,7 +449,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
449 449
450 if (!drive->waiting_for_dma) 450 if (!drive->waiting_for_dma)
451 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 451 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
452 drive->name, __FUNCTION__); 452 drive->name, __func__);
453 return 0; 453 return 0;
454} 454}
455 455
@@ -483,7 +483,7 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
483 unsigned long dma_size = pci_resource_len(dev, 1); 483 unsigned long dma_size = pci_resource_len(dev, 1);
484 void __iomem *ctl_addr; 484 void __iomem *ctl_addr;
485 void __iomem *dma_addr; 485 void __iomem *dma_addr;
486 int i; 486 int i, ret;
487 487
488 for (i = 0; i < MAX_HWIFS; i++) { 488 for (i = 0; i < MAX_HWIFS; i++) {
489 if (scc_ports[i].ctl == 0) 489 if (scc_ports[i].ctl == 0)
@@ -492,21 +492,17 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
492 if (i >= MAX_HWIFS) 492 if (i >= MAX_HWIFS)
493 return -ENOMEM; 493 return -ENOMEM;
494 494
495 if (!request_mem_region(ctl_base, ctl_size, name)) { 495 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
496 printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME); 496 if (ret < 0) {
497 goto fail_0; 497 printk(KERN_ERR "%s: can't reserve resources\n", name);
498 } 498 return ret;
499
500 if (!request_mem_region(dma_base, dma_size, name)) {
501 printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
502 goto fail_1;
503 } 499 }
504 500
505 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL) 501 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
506 goto fail_2; 502 goto fail_0;
507 503
508 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL) 504 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
509 goto fail_3; 505 goto fail_1;
510 506
511 pci_set_master(dev); 507 pci_set_master(dev);
512 scc_ports[i].ctl = (unsigned long)ctl_addr; 508 scc_ports[i].ctl = (unsigned long)ctl_addr;
@@ -515,12 +511,8 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
515 511
516 return 1; 512 return 1;
517 513
518 fail_3:
519 iounmap(ctl_addr);
520 fail_2:
521 release_mem_region(dma_base, dma_size);
522 fail_1: 514 fail_1:
523 release_mem_region(ctl_base, ctl_size); 515 iounmap(ctl_addr);
524 fail_0: 516 fail_0:
525 return -ENOMEM; 517 return -ENOMEM;
526} 518}
@@ -534,26 +526,21 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
534 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 526 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
535 int i; 527 int i;
536 528
537 for (i = 0; i < MAX_HWIFS; i++) { 529 hwif = ide_find_port();
538 hwif = &ide_hwifs[i]; 530 if (hwif == NULL) {
539 if (hwif->chipset == ide_unknown)
540 break; /* pick an unused entry */
541 }
542 if (i == MAX_HWIFS) {
543 printk(KERN_ERR "%s: too many IDE interfaces, " 531 printk(KERN_ERR "%s: too many IDE interfaces, "
544 "no room in table\n", SCC_PATA_NAME); 532 "no room in table\n", SCC_PATA_NAME);
545 return -ENOMEM; 533 return -ENOMEM;
546 } 534 }
547 535
548 memset(&hw, 0, sizeof(hw)); 536 memset(&hw, 0, sizeof(hw));
549 for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++) 537 for (i = 0; i <= 8; i++)
550 hw.io_ports[i] = ports->dma + 0x20 + i * 4; 538 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
551 hw.irq = dev->irq; 539 hw.irq = dev->irq;
552 hw.dev = &dev->dev; 540 hw.dev = &dev->dev;
553 hw.chipset = ide_pci; 541 hw.chipset = ide_pci;
554 ide_init_port_hw(hwif, &hw); 542 ide_init_port_hw(hwif, &hw);
555 hwif->dev = &dev->dev; 543 hwif->dev = &dev->dev;
556 hwif->cds = d;
557 544
558 idx[0] = hwif->index; 545 idx[0] = hwif->index;
559 546
@@ -696,7 +683,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
696{ 683{
697 struct scc_ports *ports = ide_get_hwifdata(hwif); 684 struct scc_ports *ports = ide_get_hwifdata(hwif);
698 685
699 ports->hwif_id = hwif->index; 686 ports->hwif = hwif;
700 687
701 hwif->dma_command = hwif->dma_base; 688 hwif->dma_command = hwif->dma_base;
702 hwif->dma_status = hwif->dma_base + 0x04; 689 hwif->dma_status = hwif->dma_base + 0x04;
@@ -705,28 +692,38 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
705 /* PTERADD */ 692 /* PTERADD */
706 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 693 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
707 694
708 hwif->dma_setup = scc_dma_setup;
709 hwif->ide_dma_end = scc_ide_dma_end;
710 hwif->set_pio_mode = scc_set_pio_mode;
711 hwif->set_dma_mode = scc_set_dma_mode;
712 hwif->ide_dma_test_irq = scc_dma_test_irq;
713 hwif->udma_filter = scc_udma_filter;
714
715 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN) 695 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
716 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */ 696 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
717 else 697 else
718 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 698 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
719
720 hwif->cable_detect = scc_cable_detect;
721} 699}
722 700
701static const struct ide_port_ops scc_port_ops = {
702 .set_pio_mode = scc_set_pio_mode,
703 .set_dma_mode = scc_set_dma_mode,
704 .udma_filter = scc_udma_filter,
705 .cable_detect = scc_cable_detect,
706};
707
708static const struct ide_dma_ops scc_dma_ops = {
709 .dma_host_set = ide_dma_host_set,
710 .dma_setup = scc_dma_setup,
711 .dma_exec_cmd = ide_dma_exec_cmd,
712 .dma_start = ide_dma_start,
713 .dma_end = scc_dma_end,
714 .dma_test_irq = scc_dma_test_irq,
715 .dma_lost_irq = ide_dma_lost_irq,
716 .dma_timeout = ide_dma_timeout,
717};
718
723#define DECLARE_SCC_DEV(name_str) \ 719#define DECLARE_SCC_DEV(name_str) \
724 { \ 720 { \
725 .name = name_str, \ 721 .name = name_str, \
726 .init_iops = init_iops_scc, \ 722 .init_iops = init_iops_scc, \
727 .init_hwif = init_hwif_scc, \ 723 .init_hwif = init_hwif_scc, \
728 .host_flags = IDE_HFLAG_SINGLE | \ 724 .port_ops = &scc_port_ops, \
729 IDE_HFLAG_BOOTABLE, \ 725 .dma_ops = &scc_dma_ops, \
726 .host_flags = IDE_HFLAG_SINGLE, \
730 .pio_mask = ATA_PIO4, \ 727 .pio_mask = ATA_PIO4, \
731 } 728 }
732 729
@@ -758,11 +755,7 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
758static void __devexit scc_remove(struct pci_dev *dev) 755static void __devexit scc_remove(struct pci_dev *dev)
759{ 756{
760 struct scc_ports *ports = pci_get_drvdata(dev); 757 struct scc_ports *ports = pci_get_drvdata(dev);
761 ide_hwif_t *hwif = &ide_hwifs[ports->hwif_id]; 758 ide_hwif_t *hwif = ports->hwif;
762 unsigned long ctl_base = pci_resource_start(dev, 0);
763 unsigned long dma_base = pci_resource_start(dev, 1);
764 unsigned long ctl_size = pci_resource_len(dev, 0);
765 unsigned long dma_size = pci_resource_len(dev, 1);
766 759
767 if (hwif->dmatable_cpu) { 760 if (hwif->dmatable_cpu) {
768 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, 761 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -770,13 +763,11 @@ static void __devexit scc_remove(struct pci_dev *dev)
770 hwif->dmatable_cpu = NULL; 763 hwif->dmatable_cpu = NULL;
771 } 764 }
772 765
773 ide_unregister(hwif->index); 766 ide_unregister(hwif);
774 767
775 hwif->chipset = ide_unknown;
776 iounmap((void*)ports->dma); 768 iounmap((void*)ports->dma);
777 iounmap((void*)ports->ctl); 769 iounmap((void*)ports->ctl);
778 release_mem_region(dma_base, dma_size); 770 pci_release_selected_regions(dev, (1 << 2) - 1);
779 release_mem_region(ctl_base, ctl_size);
780 memset(ports, 0, sizeof(*ports)); 771 memset(ports, 0, sizeof(*ports));
781} 772}
782 773
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index c11880b0709f..a1fb20826a5b 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -312,7 +312,7 @@ static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
312 return ATA_CBL_PATA40; 312 return ATA_CBL_PATA40;
313} 313}
314 314
315static u8 __devinit ata66_svwks(ide_hwif_t *hwif) 315static u8 __devinit svwks_cable_detect(ide_hwif_t *hwif)
316{ 316{
317 struct pci_dev *dev = to_pci_dev(hwif->dev); 317 struct pci_dev *dev = to_pci_dev(hwif->dev);
318 318
@@ -336,28 +336,28 @@ static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
336 return ATA_CBL_PATA40; 336 return ATA_CBL_PATA40;
337} 337}
338 338
339static void __devinit init_hwif_svwks (ide_hwif_t *hwif) 339static const struct ide_port_ops osb4_port_ops = {
340{ 340 .set_pio_mode = svwks_set_pio_mode,
341 struct pci_dev *dev = to_pci_dev(hwif->dev); 341 .set_dma_mode = svwks_set_dma_mode,
342 342 .udma_filter = svwks_udma_filter,
343 hwif->set_pio_mode = &svwks_set_pio_mode; 343};
344 hwif->set_dma_mode = &svwks_set_dma_mode;
345 hwif->udma_filter = &svwks_udma_filter;
346 344
347 if (dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) 345static const struct ide_port_ops svwks_port_ops = {
348 hwif->cable_detect = ata66_svwks; 346 .set_pio_mode = svwks_set_pio_mode,
349} 347 .set_dma_mode = svwks_set_dma_mode,
348 .udma_filter = svwks_udma_filter,
349 .cable_detect = svwks_cable_detect,
350};
350 351
351#define IDE_HFLAGS_SVWKS \ 352#define IDE_HFLAGS_SVWKS \
352 (IDE_HFLAG_LEGACY_IRQS | \ 353 (IDE_HFLAG_LEGACY_IRQS | \
353 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 354 IDE_HFLAG_ABUSE_SET_DMA_MODE)
354 IDE_HFLAG_BOOTABLE)
355 355
356static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 356static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
357 { /* 0 */ 357 { /* 0 */
358 .name = "SvrWks OSB4", 358 .name = "SvrWks OSB4",
359 .init_chipset = init_chipset_svwks, 359 .init_chipset = init_chipset_svwks,
360 .init_hwif = init_hwif_svwks, 360 .port_ops = &osb4_port_ops,
361 .host_flags = IDE_HFLAGS_SVWKS, 361 .host_flags = IDE_HFLAGS_SVWKS,
362 .pio_mask = ATA_PIO4, 362 .pio_mask = ATA_PIO4,
363 .mwdma_mask = ATA_MWDMA2, 363 .mwdma_mask = ATA_MWDMA2,
@@ -365,7 +365,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
365 },{ /* 1 */ 365 },{ /* 1 */
366 .name = "SvrWks CSB5", 366 .name = "SvrWks CSB5",
367 .init_chipset = init_chipset_svwks, 367 .init_chipset = init_chipset_svwks,
368 .init_hwif = init_hwif_svwks, 368 .port_ops = &svwks_port_ops,
369 .host_flags = IDE_HFLAGS_SVWKS, 369 .host_flags = IDE_HFLAGS_SVWKS,
370 .pio_mask = ATA_PIO4, 370 .pio_mask = ATA_PIO4,
371 .mwdma_mask = ATA_MWDMA2, 371 .mwdma_mask = ATA_MWDMA2,
@@ -373,7 +373,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
373 },{ /* 2 */ 373 },{ /* 2 */
374 .name = "SvrWks CSB6", 374 .name = "SvrWks CSB6",
375 .init_chipset = init_chipset_svwks, 375 .init_chipset = init_chipset_svwks,
376 .init_hwif = init_hwif_svwks, 376 .port_ops = &svwks_port_ops,
377 .host_flags = IDE_HFLAGS_SVWKS, 377 .host_flags = IDE_HFLAGS_SVWKS,
378 .pio_mask = ATA_PIO4, 378 .pio_mask = ATA_PIO4,
379 .mwdma_mask = ATA_MWDMA2, 379 .mwdma_mask = ATA_MWDMA2,
@@ -381,7 +381,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
381 },{ /* 3 */ 381 },{ /* 3 */
382 .name = "SvrWks CSB6", 382 .name = "SvrWks CSB6",
383 .init_chipset = init_chipset_svwks, 383 .init_chipset = init_chipset_svwks,
384 .init_hwif = init_hwif_svwks, 384 .port_ops = &svwks_port_ops,
385 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 385 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
386 .pio_mask = ATA_PIO4, 386 .pio_mask = ATA_PIO4,
387 .mwdma_mask = ATA_MWDMA2, 387 .mwdma_mask = ATA_MWDMA2,
@@ -389,7 +389,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
389 },{ /* 4 */ 389 },{ /* 4 */
390 .name = "SvrWks HT1000", 390 .name = "SvrWks HT1000",
391 .init_chipset = init_chipset_svwks, 391 .init_chipset = init_chipset_svwks,
392 .init_hwif = init_hwif_svwks, 392 .port_ops = &svwks_port_ops,
393 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 393 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
394 .pio_mask = ATA_PIO4, 394 .pio_mask = ATA_PIO4,
395 .mwdma_mask = ATA_MWDMA2, 395 .mwdma_mask = ATA_MWDMA2,
@@ -418,7 +418,7 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
418 else if (idx == 2 || idx == 3) { 418 else if (idx == 2 || idx == 3) {
419 if ((PCI_FUNC(dev->devfn) & 1) == 0) { 419 if ((PCI_FUNC(dev->devfn) & 1) == 0) {
420 if (pci_resource_start(dev, 0) != 0x01f1) 420 if (pci_resource_start(dev, 0) != 0x01f1)
421 d.host_flags &= ~IDE_HFLAG_BOOTABLE; 421 d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
422 d.host_flags |= IDE_HFLAG_SINGLE; 422 d.host_flags |= IDE_HFLAG_SINGLE;
423 } else 423 } else
424 d.host_flags &= ~IDE_HFLAG_SINGLE; 424 d.host_flags &= ~IDE_HFLAG_SINGLE;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 9d1a3038af9b..63e28f4e6d3b 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
98 int i; 98 int i;
99 99
100 /* Registers are word (32 bit) aligned */ 100 /* Registers are word (32 bit) aligned */
101 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 101 for (i = 0; i <= 7; i++)
102 hw->io_ports[i] = reg + i * 4; 102 hw->io_ports_array[i] = reg + i * 4;
103 103
104 if (ctrl_port) 104 if (ctrl_port)
105 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; 105 hw->io_ports.ctl_addr = ctrl_port;
106 106
107 if (irq_port) 107 if (irq_port)
108 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 108 hw->io_ports.irq_addr = irq_port;
109} 109}
110 110
111static void 111static void
112sgiioc4_maskproc(ide_drive_t * drive, int mask) 112sgiioc4_maskproc(ide_drive_t * drive, int mask)
113{ 113{
114 writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), 114 writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
115 (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]); 115 (void __iomem *)drive->hwif->io_ports.ctl_addr);
116} 116}
117 117
118static int 118static int
119sgiioc4_checkirq(ide_hwif_t * hwif) 119sgiioc4_checkirq(ide_hwif_t * hwif)
120{ 120{
121 unsigned long intr_addr = 121 unsigned long intr_addr =
122 hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4; 122 hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
123 123
124 if ((u8)readl((void __iomem *)intr_addr) & 0x03) 124 if ((u8)readl((void __iomem *)intr_addr) & 0x03)
125 return 1; 125 return 1;
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive)
134{ 134{
135 u32 intr_reg; 135 u32 intr_reg;
136 ide_hwif_t *hwif = HWIF(drive); 136 ide_hwif_t *hwif = HWIF(drive);
137 unsigned long other_ir = 137 struct ide_io_ports *io_ports = &hwif->io_ports;
138 hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2); 138 unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
139 139
140 /* Code to check for PCI error conditions */ 140 /* Code to check for PCI error conditions */
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive)
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 150 u8 stat = sgiioc4_INB(io_ports->status_addr);
151 int count = 0; 151 int count = 0;
152 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 152 stat = sgiioc4_INB(io_ports->status_addr);
153 while ((stat & 0x80) && (count++ < 100)) { 153 while ((stat & 0x80) && (count++ < 100)) {
154 udelay(1); 154 udelay(1);
155 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 155 stat = sgiioc4_INB(io_ports->status_addr);
156 } 156 }
157 157
158 if (intr_reg & 0x02) { 158 if (intr_reg & 0x02) {
@@ -162,18 +162,18 @@ sgiioc4_clearirq(ide_drive_t * drive)
162 pci_stat_cmd_reg; 162 pci_stat_cmd_reg;
163 163
164 pci_err_addr_low = 164 pci_err_addr_low =
165 readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]); 165 readl((void __iomem *)io_ports->irq_addr);
166 pci_err_addr_high = 166 pci_err_addr_high =
167 readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4)); 167 readl((void __iomem *)(io_ports->irq_addr + 4));
168 pci_read_config_dword(dev, PCI_COMMAND, 168 pci_read_config_dword(dev, PCI_COMMAND,
169 &pci_stat_cmd_reg); 169 &pci_stat_cmd_reg);
170 printk(KERN_ERR 170 printk(KERN_ERR
171 "%s(%s) : PCI Bus Error when doing DMA:" 171 "%s(%s) : PCI Bus Error when doing DMA:"
172 " status-cmd reg is 0x%x\n", 172 " status-cmd reg is 0x%x\n",
173 __FUNCTION__, drive->name, pci_stat_cmd_reg); 173 __func__, drive->name, pci_stat_cmd_reg);
174 printk(KERN_ERR 174 printk(KERN_ERR
175 "%s(%s) : PCI Error Address is 0x%x%x\n", 175 "%s(%s) : PCI Error Address is 0x%x%x\n",
176 __FUNCTION__, drive->name, 176 __func__, drive->name,
177 pci_err_addr_high, pci_err_addr_low); 177 pci_err_addr_high, pci_err_addr_low);
178 /* Clear the PCI Error indicator */ 178 /* Clear the PCI Error indicator */
179 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); 179 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
@@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive)
188 return intr_reg & 3; 188 return intr_reg & 3;
189} 189}
190 190
191static void sgiioc4_ide_dma_start(ide_drive_t * drive) 191static void sgiioc4_dma_start(ide_drive_t *drive)
192{ 192{
193 ide_hwif_t *hwif = HWIF(drive); 193 ide_hwif_t *hwif = HWIF(drive);
194 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; 194 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
@@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
215} 215}
216 216
217/* Stops the IOC4 DMA Engine */ 217/* Stops the IOC4 DMA Engine */
218static int 218static int sgiioc4_dma_end(ide_drive_t *drive)
219sgiioc4_ide_dma_end(ide_drive_t * drive)
220{ 219{
221 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; 220 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
222 ide_hwif_t *hwif = HWIF(drive); 221 ide_hwif_t *hwif = HWIF(drive);
@@ -232,7 +231,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
232 printk(KERN_ERR 231 printk(KERN_ERR
233 "%s(%s): IOC4 DMA STOP bit is still 1 :" 232 "%s(%s): IOC4 DMA STOP bit is still 1 :"
234 "ioc4_dma_reg 0x%x\n", 233 "ioc4_dma_reg 0x%x\n",
235 __FUNCTION__, drive->name, ioc4_dma); 234 __func__, drive->name, ioc4_dma);
236 dma_stat = 1; 235 dma_stat = 1;
237 } 236 }
238 237
@@ -251,7 +250,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
251 udelay(1); 250 udelay(1);
252 } 251 }
253 if (!valid) { 252 if (!valid) {
254 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__, 253 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
255 drive->name); 254 drive->name);
256 dma_stat = 1; 255 dma_stat = 1;
257 } 256 }
@@ -264,7 +263,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
264 printk(KERN_ERR 263 printk(KERN_ERR
265 "%s(%s): WARNING!! byte_count_dev %d " 264 "%s(%s): WARNING!! byte_count_dev %d "
266 "!= byte_count_mem %d\n", 265 "!= byte_count_mem %d\n",
267 __FUNCTION__, drive->name, bc_dev, bc_mem); 266 __func__, drive->name, bc_dev, bc_mem);
268 } 267 }
269 } 268 }
270 269
@@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
279} 278}
280 279
281/* returns 1 if dma irq issued, 0 otherwise */ 280/* returns 1 if dma irq issued, 0 otherwise */
282static int 281static int sgiioc4_dma_test_irq(ide_drive_t *drive)
283sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
284{ 282{
285 return sgiioc4_checkirq(HWIF(drive)); 283 return sgiioc4_checkirq(HWIF(drive));
286} 284}
@@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
294static void 292static void
295sgiioc4_resetproc(ide_drive_t * drive) 293sgiioc4_resetproc(ide_drive_t * drive)
296{ 294{
297 sgiioc4_ide_dma_end(drive); 295 sgiioc4_dma_end(drive);
298 sgiioc4_clearirq(drive); 296 sgiioc4_clearirq(drive);
299} 297}
300 298
@@ -329,13 +327,17 @@ sgiioc4_INB(unsigned long port)
329 327
330/* Creates a dma map for the scatter-gather list entries */ 328/* Creates a dma map for the scatter-gather list entries */
331static int __devinit 329static int __devinit
332ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) 330ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
333{ 331{
334 struct pci_dev *dev = to_pci_dev(hwif->dev); 332 struct pci_dev *dev = to_pci_dev(hwif->dev);
333 unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
335 void __iomem *virt_dma_base; 334 void __iomem *virt_dma_base;
336 int num_ports = sizeof (ioc4_dma_regs_t); 335 int num_ports = sizeof (ioc4_dma_regs_t);
337 void *pad; 336 void *pad;
338 337
338 if (dma_base == 0)
339 return -1;
340
339 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, 341 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
340 dma_base, dma_base + num_ports - 1); 342 dma_base, dma_base + num_ports - 1);
341 343
@@ -343,7 +345,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
343 printk(KERN_ERR 345 printk(KERN_ERR
344 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " 346 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
345 "ALREADY in use\n", 347 "ALREADY in use\n",
346 __FUNCTION__, hwif->name, (void *) dma_base, 348 __func__, hwif->name, (void *) dma_base,
347 (void *) dma_base + num_ports - 1); 349 (void *) dma_base + num_ports - 1);
348 return -1; 350 return -1;
349 } 351 }
@@ -352,7 +354,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
352 if (virt_dma_base == NULL) { 354 if (virt_dma_base == NULL) {
353 printk(KERN_ERR 355 printk(KERN_ERR
354 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n", 356 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
355 __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1); 357 __func__, hwif->name, dma_base, dma_base + num_ports - 1);
356 goto dma_remap_failure; 358 goto dma_remap_failure;
357 } 359 }
358 hwif->dma_base = (unsigned long) virt_dma_base; 360 hwif->dma_base = (unsigned long) virt_dma_base;
@@ -378,7 +380,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
378 hwif->dmatable_cpu, hwif->dmatable_dma); 380 hwif->dmatable_cpu, hwif->dmatable_dma);
379 printk(KERN_INFO 381 printk(KERN_INFO
380 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", 382 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
381 __FUNCTION__, hwif->name); 383 __func__, hwif->name);
382 printk(KERN_INFO 384 printk(KERN_INFO
383 "Changing from DMA to PIO mode for Drive %s\n", hwif->name); 385 "Changing from DMA to PIO mode for Drive %s\n", hwif->name);
384 386
@@ -406,14 +408,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
406 if (ioc4_dma & IOC4_S_DMA_ACTIVE) { 408 if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
407 printk(KERN_WARNING 409 printk(KERN_WARNING
408 "%s(%s):Warning!! DMA from previous transfer was still active\n", 410 "%s(%s):Warning!! DMA from previous transfer was still active\n",
409 __FUNCTION__, drive->name); 411 __func__, drive->name);
410 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 412 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
411 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 413 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
412 414
413 if (ioc4_dma & IOC4_S_DMA_STOP) 415 if (ioc4_dma & IOC4_S_DMA_STOP)
414 printk(KERN_ERR 416 printk(KERN_ERR
415 "%s(%s) : IOC4 Dma STOP bit is still 1\n", 417 "%s(%s) : IOC4 Dma STOP bit is still 1\n",
416 __FUNCTION__, drive->name); 418 __func__, drive->name);
417 } 419 }
418 420
419 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 421 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
@@ -421,14 +423,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
421 printk(KERN_WARNING 423 printk(KERN_WARNING
422 "%s(%s) : Warning!! - DMA Error during Previous" 424 "%s(%s) : Warning!! - DMA Error during Previous"
423 " transfer | status 0x%x\n", 425 " transfer | status 0x%x\n",
424 __FUNCTION__, drive->name, ioc4_dma); 426 __func__, drive->name, ioc4_dma);
425 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 427 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
426 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 428 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
427 429
428 if (ioc4_dma & IOC4_S_DMA_STOP) 430 if (ioc4_dma & IOC4_S_DMA_STOP)
429 printk(KERN_ERR 431 printk(KERN_ERR
430 "%s(%s) : IOC4 DMA STOP bit is still 1\n", 432 "%s(%s) : IOC4 DMA STOP bit is still 1\n",
431 __FUNCTION__, drive->name); 433 __func__, drive->name);
432 } 434 }
433 435
434 /* Address of the Scatter Gather List */ 436 /* Address of the Scatter Gather List */
@@ -519,7 +521,7 @@ use_pio_instead:
519 return 0; /* revert to PIO for this request */ 521 return 0; /* revert to PIO for this request */
520} 522}
521 523
522static int sgiioc4_ide_dma_setup(ide_drive_t *drive) 524static int sgiioc4_dma_setup(ide_drive_t *drive)
523{ 525{
524 struct request *rq = HWGROUP(drive)->rq; 526 struct request *rq = HWGROUP(drive)->rq;
525 unsigned int count = 0; 527 unsigned int count = 0;
@@ -548,62 +550,45 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
548 return 0; 550 return 0;
549} 551}
550 552
551static void __devinit 553static const struct ide_port_ops sgiioc4_port_ops = {
552ide_init_sgiioc4(ide_hwif_t * hwif) 554 .set_dma_mode = sgiioc4_set_dma_mode,
553{ 555 /* reset DMA engine, clear IRQs */
554 hwif->mmio = 1; 556 .resetproc = sgiioc4_resetproc,
555 hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */ 557 /* mask on/off NIEN register */
556 hwif->set_dma_mode = &sgiioc4_set_dma_mode; 558 .maskproc = sgiioc4_maskproc,
557 hwif->selectproc = NULL;/* Use the default routine to select drive */ 559};
558 hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */
559 hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
560 hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
561 clear interrupts */
562 hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */
563 hwif->quirkproc = NULL;
564
565 hwif->INB = &sgiioc4_INB;
566
567 if (hwif->dma_base == 0)
568 return;
569 560
570 hwif->dma_host_set = &sgiioc4_dma_host_set; 561static const struct ide_dma_ops sgiioc4_dma_ops = {
571 hwif->dma_setup = &sgiioc4_ide_dma_setup; 562 .dma_host_set = sgiioc4_dma_host_set,
572 hwif->dma_start = &sgiioc4_ide_dma_start; 563 .dma_setup = sgiioc4_dma_setup,
573 hwif->ide_dma_end = &sgiioc4_ide_dma_end; 564 .dma_start = sgiioc4_dma_start,
574 hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; 565 .dma_end = sgiioc4_dma_end,
575 hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; 566 .dma_test_irq = sgiioc4_dma_test_irq,
576 hwif->dma_timeout = &ide_dma_timeout; 567 .dma_lost_irq = sgiioc4_dma_lost_irq,
577} 568 .dma_timeout = ide_dma_timeout,
569};
578 570
579static const struct ide_port_info sgiioc4_port_info __devinitdata = { 571static const struct ide_port_info sgiioc4_port_info __devinitdata = {
580 .chipset = ide_pci, 572 .chipset = ide_pci,
581 .host_flags = IDE_HFLAG_NO_DMA | /* no SFF-style DMA */ 573 .init_dma = ide_dma_sgiioc4,
582 IDE_HFLAG_NO_AUTOTUNE, 574 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops,
583 .mwdma_mask = ATA_MWDMA2_ONLY, 576 .mwdma_mask = ATA_MWDMA2_ONLY,
584}; 577};
585 578
586static int __devinit 579static int __devinit
587sgiioc4_ide_setup_pci_device(struct pci_dev *dev) 580sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
588{ 581{
589 unsigned long cmd_base, dma_base, irqport; 582 unsigned long cmd_base, irqport;
590 unsigned long bar0, cmd_phys_base, ctl; 583 unsigned long bar0, cmd_phys_base, ctl;
591 void __iomem *virt_base; 584 void __iomem *virt_base;
592 ide_hwif_t *hwif; 585 ide_hwif_t *hwif;
593 int h;
594 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 586 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
595 hw_regs_t hw; 587 hw_regs_t hw;
596 struct ide_port_info d = sgiioc4_port_info; 588 struct ide_port_info d = sgiioc4_port_info;
597 589
598 /* 590 hwif = ide_find_port();
599 * Find an empty HWIF; if none available, return -ENOMEM. 591 if (hwif == NULL) {
600 */
601 for (h = 0; h < MAX_HWIFS; ++h) {
602 hwif = &ide_hwifs[h];
603 if (hwif->chipset == ide_unknown)
604 break;
605 }
606 if (h == MAX_HWIFS) {
607 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", 592 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n",
608 DRV_NAME); 593 DRV_NAME);
609 return -ENOMEM; 594 return -ENOMEM;
@@ -620,7 +605,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
620 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET; 605 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
621 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET; 606 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
622 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET; 607 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
623 dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
624 608
625 cmd_phys_base = bar0 + IOC4_CMD_OFFSET; 609 cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
626 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 610 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
@@ -628,7 +612,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
628 printk(KERN_ERR 612 printk(KERN_ERR
629 "%s : %s -- ERROR, Addresses " 613 "%s : %s -- ERROR, Addresses "
630 "0x%p to 0x%p ALREADY in use\n", 614 "0x%p to 0x%p ALREADY in use\n",
631 __FUNCTION__, hwif->name, (void *) cmd_phys_base, 615 __func__, hwif->name, (void *) cmd_phys_base,
632 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 616 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
633 return -ENOMEM; 617 return -ENOMEM;
634 } 618 }
@@ -649,13 +633,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
649 /* Initializing chipset IRQ Registers */ 633 /* Initializing chipset IRQ Registers */
650 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 634 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
651 635
652 if (dma_base == 0 || ide_dma_sgiioc4(hwif, dma_base)) { 636 hwif->INB = &sgiioc4_INB;
653 printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n",
654 hwif->name, DRV_NAME);
655 d.mwdma_mask = 0;
656 }
657
658 ide_init_sgiioc4(hwif);
659 637
660 idx[0] = hwif->index; 638 idx[0] = hwif->index;
661 639
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index b6be1b45f329..c2040a017f47 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -301,7 +301,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
301} 301}
302 302
303/* returns 1 if dma irq issued, 0 otherwise */ 303/* returns 1 if dma irq issued, 0 otherwise */
304static int siimage_io_ide_dma_test_irq (ide_drive_t *drive) 304static int siimage_io_dma_test_irq(ide_drive_t *drive)
305{ 305{
306 ide_hwif_t *hwif = HWIF(drive); 306 ide_hwif_t *hwif = HWIF(drive);
307 struct pci_dev *dev = to_pci_dev(hwif->dev); 307 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -320,14 +320,14 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
320} 320}
321 321
322/** 322/**
323 * siimage_mmio_ide_dma_test_irq - check we caused an IRQ 323 * siimage_mmio_dma_test_irq - check we caused an IRQ
324 * @drive: drive we are testing 324 * @drive: drive we are testing
325 * 325 *
326 * Check if we caused an IDE DMA interrupt. We may also have caused 326 * Check if we caused an IDE DMA interrupt. We may also have caused
327 * SATA status interrupts, if so we clean them up and continue. 327 * SATA status interrupts, if so we clean them up and continue.
328 */ 328 */
329 329
330static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive) 330static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
331{ 331{
332 ide_hwif_t *hwif = HWIF(drive); 332 ide_hwif_t *hwif = HWIF(drive);
333 unsigned long addr = siimage_selreg(hwif, 0x1); 333 unsigned long addr = siimage_selreg(hwif, 0x1);
@@ -347,7 +347,7 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
347 printk(KERN_WARNING "%s: sata_error = 0x%08x, " 347 printk(KERN_WARNING "%s: sata_error = 0x%08x, "
348 "watchdog = %d, %s\n", 348 "watchdog = %d, %s\n",
349 drive->name, sata_error, watchdog, 349 drive->name, sata_error, watchdog,
350 __FUNCTION__); 350 __func__);
351 351
352 } else { 352 } else {
353 watchdog = (ext_stat & 0x8000) ? 1 : 0; 353 watchdog = (ext_stat & 0x8000) ? 1 : 0;
@@ -369,6 +369,14 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
369 return 0; 369 return 0;
370} 370}
371 371
372static int siimage_dma_test_irq(ide_drive_t *drive)
373{
374 if (drive->hwif->mmio)
375 return siimage_mmio_dma_test_irq(drive);
376 else
377 return siimage_io_dma_test_irq(drive);
378}
379
372/** 380/**
373 * sil_sata_reset_poll - wait for SATA reset 381 * sil_sata_reset_poll - wait for SATA reset
374 * @drive: drive we are resetting 382 * @drive: drive we are resetting
@@ -614,9 +622,10 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
614 struct pci_dev *dev = to_pci_dev(hwif->dev); 622 struct pci_dev *dev = to_pci_dev(hwif->dev);
615 void *addr = pci_get_drvdata(dev); 623 void *addr = pci_get_drvdata(dev);
616 u8 ch = hwif->channel; 624 u8 ch = hwif->channel;
617 hw_regs_t hw;
618 unsigned long base; 625 unsigned long base;
619 626
627 struct ide_io_ports *io_ports = &hwif->io_ports;
628
620 /* 629 /*
621 * Fill in the basic HWIF bits 630 * Fill in the basic HWIF bits
622 */ 631 */
@@ -630,7 +639,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
630 * based I/O 639 * based I/O
631 */ 640 */
632 641
633 memset(&hw, 0, sizeof(hw_regs_t)); 642 memset(io_ports, 0, sizeof(*io_ports));
634 643
635 base = (unsigned long)addr; 644 base = (unsigned long)addr;
636 if (ch) 645 if (ch)
@@ -643,17 +652,15 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
643 * so we can't currently use it sanely since we want to 652 * so we can't currently use it sanely since we want to
644 * use LBA48 mode. 653 * use LBA48 mode.
645 */ 654 */
646 hw.io_ports[IDE_DATA_OFFSET] = base; 655 io_ports->data_addr = base;
647 hw.io_ports[IDE_ERROR_OFFSET] = base + 1; 656 io_ports->error_addr = base + 1;
648 hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2; 657 io_ports->nsect_addr = base + 2;
649 hw.io_ports[IDE_SECTOR_OFFSET] = base + 3; 658 io_ports->lbal_addr = base + 3;
650 hw.io_ports[IDE_LCYL_OFFSET] = base + 4; 659 io_ports->lbam_addr = base + 4;
651 hw.io_ports[IDE_HCYL_OFFSET] = base + 5; 660 io_ports->lbah_addr = base + 5;
652 hw.io_ports[IDE_SELECT_OFFSET] = base + 6; 661 io_ports->device_addr = base + 6;
653 hw.io_ports[IDE_STATUS_OFFSET] = base + 7; 662 io_ports->status_addr = base + 7;
654 hw.io_ports[IDE_CONTROL_OFFSET] = base + 10; 663 io_ports->ctl_addr = base + 10;
655
656 hw.io_ports[IDE_IRQ_OFFSET] = 0;
657 664
658 if (pdev_is_sata(dev)) { 665 if (pdev_is_sata(dev)) {
659 base = (unsigned long)addr; 666 base = (unsigned long)addr;
@@ -664,8 +671,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
664 hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100; 671 hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
665 } 672 }
666 673
667 memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
668
669 hwif->irq = dev->irq; 674 hwif->irq = dev->irq;
670 675
671 hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00); 676 hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
@@ -735,14 +740,14 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
735} 740}
736 741
737/** 742/**
738 * ata66_siimage - check for 80 pin cable 743 * sil_cable_detect - cable detection
739 * @hwif: interface to check 744 * @hwif: interface to check
740 * 745 *
741 * Check for the presence of an ATA66 capable cable on the 746 * Check for the presence of an ATA66 capable cable on the
742 * interface. 747 * interface.
743 */ 748 */
744 749
745static u8 __devinit ata66_siimage(ide_hwif_t *hwif) 750static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
746{ 751{
747 struct pci_dev *dev = to_pci_dev(hwif->dev); 752 struct pci_dev *dev = to_pci_dev(hwif->dev);
748 unsigned long addr = siimage_selreg(hwif, 0); 753 unsigned long addr = siimage_selreg(hwif, 0);
@@ -756,68 +761,44 @@ static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
756 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 761 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
757} 762}
758 763
759/** 764static const struct ide_port_ops sil_pata_port_ops = {
760 * init_hwif_siimage - set up hwif structs 765 .set_pio_mode = sil_set_pio_mode,
761 * @hwif: interface to set up 766 .set_dma_mode = sil_set_dma_mode,
762 * 767 .quirkproc = sil_quirkproc,
763 * We do the basic set up of the interface structure. The SIIMAGE 768 .udma_filter = sil_pata_udma_filter,
764 * requires several custom handlers so we override the default 769 .cable_detect = sil_cable_detect,
765 * ide DMA handlers appropriately 770};
766 */
767
768static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
769{
770 u8 sata = is_sata(hwif);
771
772 hwif->set_pio_mode = &sil_set_pio_mode;
773 hwif->set_dma_mode = &sil_set_dma_mode;
774 hwif->quirkproc = &sil_quirkproc;
775
776 if (sata) {
777 static int first = 1;
778
779 hwif->reset_poll = &sil_sata_reset_poll;
780 hwif->pre_reset = &sil_sata_pre_reset;
781 hwif->udma_filter = &sil_sata_udma_filter;
782
783 if (first) {
784 printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
785 first = 0;
786 }
787 } else
788 hwif->udma_filter = &sil_pata_udma_filter;
789
790 hwif->cable_detect = ata66_siimage;
791
792 if (hwif->dma_base == 0)
793 return;
794 771
795 if (sata) 772static const struct ide_port_ops sil_sata_port_ops = {
796 hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA; 773 .set_pio_mode = sil_set_pio_mode,
774 .set_dma_mode = sil_set_dma_mode,
775 .reset_poll = sil_sata_reset_poll,
776 .pre_reset = sil_sata_pre_reset,
777 .quirkproc = sil_quirkproc,
778 .udma_filter = sil_sata_udma_filter,
779 .cable_detect = sil_cable_detect,
780};
797 781
798 if (hwif->mmio) { 782static struct ide_dma_ops sil_dma_ops = {
799 hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq; 783 .dma_test_irq = siimage_dma_test_irq,
800 } else { 784};
801 hwif->ide_dma_test_irq = & siimage_io_ide_dma_test_irq;
802 }
803}
804 785
805#define DECLARE_SII_DEV(name_str) \ 786#define DECLARE_SII_DEV(name_str, p_ops) \
806 { \ 787 { \
807 .name = name_str, \ 788 .name = name_str, \
808 .init_chipset = init_chipset_siimage, \ 789 .init_chipset = init_chipset_siimage, \
809 .init_iops = init_iops_siimage, \ 790 .init_iops = init_iops_siimage, \
810 .init_hwif = init_hwif_siimage, \ 791 .port_ops = p_ops, \
811 .host_flags = IDE_HFLAG_BOOTABLE, \ 792 .dma_ops = &sil_dma_ops, \
812 .pio_mask = ATA_PIO4, \ 793 .pio_mask = ATA_PIO4, \
813 .mwdma_mask = ATA_MWDMA2, \ 794 .mwdma_mask = ATA_MWDMA2, \
814 .udma_mask = ATA_UDMA6, \ 795 .udma_mask = ATA_UDMA6, \
815 } 796 }
816 797
817static const struct ide_port_info siimage_chipsets[] __devinitdata = { 798static const struct ide_port_info siimage_chipsets[] __devinitdata = {
818 /* 0 */ DECLARE_SII_DEV("SiI680"), 799 /* 0 */ DECLARE_SII_DEV("SiI680", &sil_pata_port_ops),
819 /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA"), 800 /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA", &sil_sata_port_ops),
820 /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA") 801 /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA", &sil_sata_port_ops)
821}; 802};
822 803
823/** 804/**
@@ -831,7 +812,24 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
831 812
832static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id) 813static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
833{ 814{
834 return ide_setup_pci_device(dev, &siimage_chipsets[id->driver_data]); 815 struct ide_port_info d;
816 u8 idx = id->driver_data;
817
818 d = siimage_chipsets[idx];
819
820 if (idx) {
821 static int first = 1;
822
823 if (first) {
824 printk(KERN_INFO "siimage: For full SATA support you "
825 "should use the libata sata_sil module.\n");
826 first = 0;
827 }
828
829 d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
830 }
831
832 return ide_setup_pci_device(dev, &d);
835} 833}
836 834
837static const struct pci_device_id siimage_pci_tbl[] = { 835static const struct pci_device_id siimage_pci_tbl[] = {
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 512bb4c1fd5c..4b0b85d8faf5 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -59,10 +59,10 @@
59#define ATA_16 0x01 59#define ATA_16 0x01
60#define ATA_33 0x02 60#define ATA_33 0x02
61#define ATA_66 0x03 61#define ATA_66 0x03
62#define ATA_100a 0x04 // SiS730/SiS550 is ATA100 with ATA66 layout 62#define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */
63#define ATA_100 0x05 63#define ATA_100 0x05
64#define ATA_133a 0x06 // SiS961b with 133 support 64#define ATA_133a 0x06 /* SiS961b with 133 support */
65#define ATA_133 0x07 // SiS962/963 65#define ATA_133 0x07 /* SiS962/963 */
66 66
67static u8 chipset_family; 67static u8 chipset_family;
68 68
@@ -111,69 +111,70 @@ static const struct {
111 Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */ 111 Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */
112 112
113/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */ 113/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */
114static u8 cycle_time_offset[] = {0,0,5,4,4,0,0}; 114static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 };
115static u8 cycle_time_range[] = {0,0,2,3,3,4,4}; 115static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 };
116static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { 116static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
117 {0,0,0,0,0,0,0}, /* no udma */ 117 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
118 {0,0,0,0,0,0,0}, /* no udma */ 118 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
119 {3,2,1,0,0,0,0}, /* ATA_33 */ 119 { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */
120 {7,5,3,2,1,0,0}, /* ATA_66 */ 120 { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */
121 {7,5,3,2,1,0,0}, /* ATA_100a (730 specific), differences are on cycle_time range and offset */ 121 { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific),
122 {11,7,5,4,2,1,0}, /* ATA_100 */ 122 different cycle_time range and offset */
123 {15,10,7,5,3,2,1}, /* ATA_133a (earliest 691 southbridges) */ 123 { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */
124 {15,10,7,5,3,2,1}, /* ATA_133 */ 124 { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */
125 { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */
125}; 126};
126/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133 127/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133
127 See SiS962 data sheet for more detail */ 128 See SiS962 data sheet for more detail */
128static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { 129static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
129 {0,0,0,0,0,0,0}, /* no udma */ 130 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
130 {0,0,0,0,0,0,0}, /* no udma */ 131 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
131 {2,1,1,0,0,0,0}, 132 { 2, 1, 1, 0, 0, 0, 0 },
132 {4,3,2,1,0,0,0}, 133 { 4, 3, 2, 1, 0, 0, 0 },
133 {4,3,2,1,0,0,0}, 134 { 4, 3, 2, 1, 0, 0, 0 },
134 {6,4,3,1,1,1,0}, 135 { 6, 4, 3, 1, 1, 1, 0 },
135 {9,6,4,2,2,2,2}, 136 { 9, 6, 4, 2, 2, 2, 2 },
136 {9,6,4,2,2,2,2}, 137 { 9, 6, 4, 2, 2, 2, 2 },
137}; 138};
138/* Initialize time, Active time, Recovery time vary across 139/* Initialize time, Active time, Recovery time vary across
139 IDE clock settings. These 3 arrays hold the register value 140 IDE clock settings. These 3 arrays hold the register value
140 for PIO0/1/2/3/4 and DMA0/1/2 mode in order */ 141 for PIO0/1/2/3/4 and DMA0/1/2 mode in order */
141static u8 ini_time_value[][8] = { 142static u8 ini_time_value[][8] = {
142 {0,0,0,0,0,0,0,0}, 143 { 0, 0, 0, 0, 0, 0, 0, 0 },
143 {0,0,0,0,0,0,0,0}, 144 { 0, 0, 0, 0, 0, 0, 0, 0 },
144 {2,1,0,0,0,1,0,0}, 145 { 2, 1, 0, 0, 0, 1, 0, 0 },
145 {4,3,1,1,1,3,1,1}, 146 { 4, 3, 1, 1, 1, 3, 1, 1 },
146 {4,3,1,1,1,3,1,1}, 147 { 4, 3, 1, 1, 1, 3, 1, 1 },
147 {6,4,2,2,2,4,2,2}, 148 { 6, 4, 2, 2, 2, 4, 2, 2 },
148 {9,6,3,3,3,6,3,3}, 149 { 9, 6, 3, 3, 3, 6, 3, 3 },
149 {9,6,3,3,3,6,3,3}, 150 { 9, 6, 3, 3, 3, 6, 3, 3 },
150}; 151};
151static u8 act_time_value[][8] = { 152static u8 act_time_value[][8] = {
152 {0,0,0,0,0,0,0,0}, 153 { 0, 0, 0, 0, 0, 0, 0, 0 },
153 {0,0,0,0,0,0,0,0}, 154 { 0, 0, 0, 0, 0, 0, 0, 0 },
154 {9,9,9,2,2,7,2,2}, 155 { 9, 9, 9, 2, 2, 7, 2, 2 },
155 {19,19,19,5,4,14,5,4}, 156 { 19, 19, 19, 5, 4, 14, 5, 4 },
156 {19,19,19,5,4,14,5,4}, 157 { 19, 19, 19, 5, 4, 14, 5, 4 },
157 {28,28,28,7,6,21,7,6}, 158 { 28, 28, 28, 7, 6, 21, 7, 6 },
158 {38,38,38,10,9,28,10,9}, 159 { 38, 38, 38, 10, 9, 28, 10, 9 },
159 {38,38,38,10,9,28,10,9}, 160 { 38, 38, 38, 10, 9, 28, 10, 9 },
160}; 161};
161static u8 rco_time_value[][8] = { 162static u8 rco_time_value[][8] = {
162 {0,0,0,0,0,0,0,0}, 163 { 0, 0, 0, 0, 0, 0, 0, 0 },
163 {0,0,0,0,0,0,0,0}, 164 { 0, 0, 0, 0, 0, 0, 0, 0 },
164 {9,2,0,2,0,7,1,1}, 165 { 9, 2, 0, 2, 0, 7, 1, 1 },
165 {19,5,1,5,2,16,3,2}, 166 { 19, 5, 1, 5, 2, 16, 3, 2 },
166 {19,5,1,5,2,16,3,2}, 167 { 19, 5, 1, 5, 2, 16, 3, 2 },
167 {30,9,3,9,4,25,6,4}, 168 { 30, 9, 3, 9, 4, 25, 6, 4 },
168 {40,12,4,12,5,34,12,5}, 169 { 40, 12, 4, 12, 5, 34, 12, 5 },
169 {40,12,4,12,5,34,12,5}, 170 { 40, 12, 4, 12, 5, 34, 12, 5 },
170}; 171};
171 172
172/* 173/*
173 * Printing configuration 174 * Printing configuration
174 */ 175 */
175/* Used for chipset type printing at boot time */ 176/* Used for chipset type printing at boot time */
176static char* chipset_capability[] = { 177static char *chipset_capability[] = {
177 "ATA", "ATA 16", 178 "ATA", "ATA 16",
178 "ATA 33", "ATA 66", 179 "ATA 33", "ATA 66",
179 "ATA 100 (1st gen)", "ATA 100 (2nd gen)", 180 "ATA 100 (1st gen)", "ATA 100 (2nd gen)",
@@ -272,7 +273,7 @@ static void sis_program_timings(ide_drive_t *drive, const u8 mode)
272 sis_ata133_program_timings(drive, mode); 273 sis_ata133_program_timings(drive, mode);
273} 274}
274 275
275static void config_drive_art_rwp (ide_drive_t *drive) 276static void config_drive_art_rwp(ide_drive_t *drive)
276{ 277{
277 ide_hwif_t *hwif = HWIF(drive); 278 ide_hwif_t *hwif = HWIF(drive);
278 struct pci_dev *dev = to_pci_dev(hwif->dev); 279 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -346,7 +347,7 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
346 sis_program_timings(drive, speed); 347 sis_program_timings(drive, speed);
347} 348}
348 349
349static u8 sis5513_ata133_udma_filter(ide_drive_t *drive) 350static u8 sis_ata133_udma_filter(ide_drive_t *drive)
350{ 351{
351 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 352 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
352 u32 regdw = 0; 353 u32 regdw = 0;
@@ -358,8 +359,7 @@ static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
358 return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5; 359 return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
359} 360}
360 361
361/* Chip detection and general config */ 362static int __devinit sis_find_family(struct pci_dev *dev)
362static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const char *name)
363{ 363{
364 struct pci_dev *host; 364 struct pci_dev *host;
365 int i = 0; 365 int i = 0;
@@ -381,7 +381,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
381 chipset_family = ATA_100a; 381 chipset_family = ATA_100a;
382 } 382 }
383 pci_dev_put(host); 383 pci_dev_put(host);
384 384
385 printk(KERN_INFO "SIS5513: %s %s controller\n", 385 printk(KERN_INFO "SIS5513: %s %s controller\n",
386 SiSHostChipInfo[i].name, chipset_capability[chipset_family]); 386 SiSHostChipInfo[i].name, chipset_capability[chipset_family]);
387 } 387 }
@@ -440,63 +440,60 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
440 } 440 }
441 } 441 }
442 442
443 if (!chipset_family) 443 return chipset_family;
444 return -1; 444}
445 445
446static unsigned int __devinit init_chipset_sis5513(struct pci_dev *dev,
447 const char *name)
448{
446 /* Make general config ops here 449 /* Make general config ops here
447 1/ tell IDE channels to operate in Compatibility mode only 450 1/ tell IDE channels to operate in Compatibility mode only
448 2/ tell old chips to allow per drive IDE timings */ 451 2/ tell old chips to allow per drive IDE timings */
449 452
450 { 453 u8 reg;
451 u8 reg; 454 u16 regw;
452 u16 regw; 455
453 456 switch (chipset_family) {
454 switch(chipset_family) { 457 case ATA_133:
455 case ATA_133: 458 /* SiS962 operation mode */
456 /* SiS962 operation mode */ 459 pci_read_config_word(dev, 0x50, &regw);
457 pci_read_config_word(dev, 0x50, &regw); 460 if (regw & 0x08)
458 if (regw & 0x08) 461 pci_write_config_word(dev, 0x50, regw&0xfff7);
459 pci_write_config_word(dev, 0x50, regw&0xfff7); 462 pci_read_config_word(dev, 0x52, &regw);
460 pci_read_config_word(dev, 0x52, &regw); 463 if (regw & 0x08)
461 if (regw & 0x08) 464 pci_write_config_word(dev, 0x52, regw&0xfff7);
462 pci_write_config_word(dev, 0x52, regw&0xfff7); 465 break;
463 break; 466 case ATA_133a:
464 case ATA_133a: 467 case ATA_100:
465 case ATA_100: 468 /* Fixup latency */
466 /* Fixup latency */ 469 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
467 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); 470 /* Set compatibility bit */
468 /* Set compatibility bit */ 471 pci_read_config_byte(dev, 0x49, &reg);
469 pci_read_config_byte(dev, 0x49, &reg); 472 if (!(reg & 0x01))
470 if (!(reg & 0x01)) { 473 pci_write_config_byte(dev, 0x49, reg|0x01);
471 pci_write_config_byte(dev, 0x49, reg|0x01); 474 break;
472 } 475 case ATA_100a:
473 break; 476 case ATA_66:
474 case ATA_100a: 477 /* Fixup latency */
475 case ATA_66: 478 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
476 /* Fixup latency */ 479
477 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10); 480 /* On ATA_66 chips the bit was elsewhere */
478 481 pci_read_config_byte(dev, 0x52, &reg);
479 /* On ATA_66 chips the bit was elsewhere */ 482 if (!(reg & 0x04))
480 pci_read_config_byte(dev, 0x52, &reg); 483 pci_write_config_byte(dev, 0x52, reg|0x04);
481 if (!(reg & 0x04)) { 484 break;
482 pci_write_config_byte(dev, 0x52, reg|0x04); 485 case ATA_33:
483 } 486 /* On ATA_33 we didn't have a single bit to set */
484 break; 487 pci_read_config_byte(dev, 0x09, &reg);
485 case ATA_33: 488 if ((reg & 0x0f) != 0x00)
486 /* On ATA_33 we didn't have a single bit to set */ 489 pci_write_config_byte(dev, 0x09, reg&0xf0);
487 pci_read_config_byte(dev, 0x09, &reg); 490 case ATA_16:
488 if ((reg & 0x0f) != 0x00) { 491 /* force per drive recovery and active timings
489 pci_write_config_byte(dev, 0x09, reg&0xf0); 492 needed on ATA_33 and below chips */
490 } 493 pci_read_config_byte(dev, 0x52, &reg);
491 case ATA_16: 494 if (!(reg & 0x08))
492 /* force per drive recovery and active timings 495 pci_write_config_byte(dev, 0x52, reg|0x08);
493 needed on ATA_33 and below chips */ 496 break;
494 pci_read_config_byte(dev, 0x52, &reg);
495 if (!(reg & 0x08)) {
496 pci_write_config_byte(dev, 0x52, reg|0x08);
497 }
498 break;
499 }
500 } 497 }
501 498
502 return 0; 499 return 0;
@@ -517,7 +514,7 @@ static const struct sis_laptop sis_laptop[] = {
517 { 0, } 514 { 0, }
518}; 515};
519 516
520static u8 __devinit ata66_sis5513(ide_hwif_t *hwif) 517static u8 __devinit sis_cable_detect(ide_hwif_t *hwif)
521{ 518{
522 struct pci_dev *pdev = to_pci_dev(hwif->dev); 519 struct pci_dev *pdev = to_pci_dev(hwif->dev);
523 const struct sis_laptop *lap = &sis_laptop[0]; 520 const struct sis_laptop *lap = &sis_laptop[0];
@@ -546,38 +543,44 @@ static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
546 return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 543 return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
547} 544}
548 545
549static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) 546static const struct ide_port_ops sis_port_ops = {
550{ 547 .set_pio_mode = sis_set_pio_mode,
551 u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; 548 .set_dma_mode = sis_set_dma_mode,
552 549 .cable_detect = sis_cable_detect,
553 hwif->set_pio_mode = &sis_set_pio_mode; 550};
554 hwif->set_dma_mode = &sis_set_dma_mode;
555
556 if (chipset_family >= ATA_133)
557 hwif->udma_filter = sis5513_ata133_udma_filter;
558
559 hwif->cable_detect = ata66_sis5513;
560
561 if (hwif->dma_base == 0)
562 return;
563 551
564 hwif->ultra_mask = udma_rates[chipset_family]; 552static const struct ide_port_ops sis_ata133_port_ops = {
565} 553 .set_pio_mode = sis_set_pio_mode,
554 .set_dma_mode = sis_set_dma_mode,
555 .udma_filter = sis_ata133_udma_filter,
556 .cable_detect = sis_cable_detect,
557};
566 558
567static const struct ide_port_info sis5513_chipset __devinitdata = { 559static const struct ide_port_info sis5513_chipset __devinitdata = {
568 .name = "SIS5513", 560 .name = "SIS5513",
569 .init_chipset = init_chipset_sis5513, 561 .init_chipset = init_chipset_sis5513,
570 .init_hwif = init_hwif_sis5513, 562 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
571 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 563 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
572 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA |
573 IDE_HFLAG_BOOTABLE,
574 .pio_mask = ATA_PIO4, 564 .pio_mask = ATA_PIO4,
575 .mwdma_mask = ATA_MWDMA2, 565 .mwdma_mask = ATA_MWDMA2,
576}; 566};
577 567
578static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id) 568static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
579{ 569{
580 return ide_setup_pci_device(dev, &sis5513_chipset); 570 struct ide_port_info d = sis5513_chipset;
571 u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
572
573 if (sis_find_family(dev) == 0)
574 return -ENOTSUPP;
575
576 if (chipset_family >= ATA_133)
577 d.port_ops = &sis_ata133_port_ops;
578 else
579 d.port_ops = &sis_port_ops;
580
581 d.udma_mask = udma_rates[chipset_family];
582
583 return ide_setup_pci_device(dev, &d);
581} 584}
582 585
583static const struct pci_device_id sis5513_pci_tbl[] = { 586static const struct pci_device_id sis5513_pci_tbl[] = {
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 1f00251a4a87..ce84fa045d39 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -179,7 +179,7 @@ static void sl82c105_dma_start(ide_drive_t *drive)
179 struct pci_dev *dev = to_pci_dev(hwif->dev); 179 struct pci_dev *dev = to_pci_dev(hwif->dev);
180 int reg = 0x44 + drive->dn * 4; 180 int reg = 0x44 + drive->dn * 4;
181 181
182 DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); 182 DBG(("%s(drive:%s)\n", __func__, drive->name));
183 183
184 pci_write_config_word(dev, reg, drive->drive_data >> 16); 184 pci_write_config_word(dev, reg, drive->drive_data >> 16);
185 185
@@ -203,7 +203,7 @@ static int sl82c105_dma_end(ide_drive_t *drive)
203 int reg = 0x44 + drive->dn * 4; 203 int reg = 0x44 + drive->dn * 4;
204 int ret; 204 int ret;
205 205
206 DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); 206 DBG(("%s(drive:%s)\n", __func__, drive->name));
207 207
208 ret = __ide_dma_end(drive); 208 ret = __ide_dma_end(drive);
209 209
@@ -232,7 +232,7 @@ static void sl82c105_resetproc(ide_drive_t *drive)
232 * Return the revision of the Winbond bridge 232 * Return the revision of the Winbond bridge
233 * which this function is part of. 233 * which this function is part of.
234 */ 234 */
235static unsigned int sl82c105_bridge_revision(struct pci_dev *dev) 235static u8 sl82c105_bridge_revision(struct pci_dev *dev)
236{ 236{
237 struct pci_dev *bridge; 237 struct pci_dev *bridge;
238 238
@@ -282,64 +282,59 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
282 return dev->irq; 282 return dev->irq;
283} 283}
284 284
285/* 285static const struct ide_port_ops sl82c105_port_ops = {
286 * Initialise IDE channel 286 .set_pio_mode = sl82c105_set_pio_mode,
287 */ 287 .set_dma_mode = sl82c105_set_dma_mode,
288static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) 288 .resetproc = sl82c105_resetproc,
289{ 289};
290 struct pci_dev *dev = to_pci_dev(hwif->dev);
291 unsigned int rev;
292
293 DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
294
295 hwif->set_pio_mode = &sl82c105_set_pio_mode;
296 hwif->set_dma_mode = &sl82c105_set_dma_mode;
297 hwif->resetproc = &sl82c105_resetproc;
298
299 if (!hwif->dma_base)
300 return;
301
302 rev = sl82c105_bridge_revision(dev);
303 if (rev <= 5) {
304 /*
305 * Never ever EVER under any circumstances enable
306 * DMA when the bridge is this old.
307 */
308 printk(" %s: Winbond W83C553 bridge revision %d, "
309 "BM-DMA disabled\n", hwif->name, rev);
310 return;
311 }
312
313 hwif->mwdma_mask = ATA_MWDMA2;
314
315 hwif->dma_lost_irq = &sl82c105_dma_lost_irq;
316 hwif->dma_start = &sl82c105_dma_start;
317 hwif->ide_dma_end = &sl82c105_dma_end;
318 hwif->dma_timeout = &sl82c105_dma_timeout;
319 290
320 if (hwif->mate) 291static const struct ide_dma_ops sl82c105_dma_ops = {
321 hwif->serialized = hwif->mate->serialized = 1; 292 .dma_host_set = ide_dma_host_set,
322} 293 .dma_setup = ide_dma_setup,
294 .dma_exec_cmd = ide_dma_exec_cmd,
295 .dma_start = sl82c105_dma_start,
296 .dma_end = sl82c105_dma_end,
297 .dma_test_irq = ide_dma_test_irq,
298 .dma_lost_irq = sl82c105_dma_lost_irq,
299 .dma_timeout = sl82c105_dma_timeout,
300};
323 301
324static const struct ide_port_info sl82c105_chipset __devinitdata = { 302static const struct ide_port_info sl82c105_chipset __devinitdata = {
325 .name = "W82C105", 303 .name = "W82C105",
326 .init_chipset = init_chipset_sl82c105, 304 .init_chipset = init_chipset_sl82c105,
327 .init_hwif = init_hwif_sl82c105,
328 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, 305 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
306 .port_ops = &sl82c105_port_ops,
307 .dma_ops = &sl82c105_dma_ops,
329 .host_flags = IDE_HFLAG_IO_32BIT | 308 .host_flags = IDE_HFLAG_IO_32BIT |
330 IDE_HFLAG_UNMASK_IRQS | 309 IDE_HFLAG_UNMASK_IRQS |
331/* FIXME: check for Compatibility mode in generic IDE PCI code */ 310/* FIXME: check for Compatibility mode in generic IDE PCI code */
332#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT) 311#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
333 IDE_HFLAG_FORCE_LEGACY_IRQS | 312 IDE_HFLAG_FORCE_LEGACY_IRQS |
334#endif 313#endif
335 IDE_HFLAG_NO_AUTODMA | 314 IDE_HFLAG_SERIALIZE_DMA |
336 IDE_HFLAG_BOOTABLE, 315 IDE_HFLAG_NO_AUTODMA,
337 .pio_mask = ATA_PIO5, 316 .pio_mask = ATA_PIO5,
317 .mwdma_mask = ATA_MWDMA2,
338}; 318};
339 319
340static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) 320static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
341{ 321{
342 return ide_setup_pci_device(dev, &sl82c105_chipset); 322 struct ide_port_info d = sl82c105_chipset;
323 u8 rev = sl82c105_bridge_revision(dev);
324
325 if (rev <= 5) {
326 /*
327 * Never ever EVER under any circumstances enable
328 * DMA when the bridge is this old.
329 */
330 printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge "
331 "revision %d, BM-DMA disabled\n", rev);
332 d.dma_ops = NULL;
333 d.mwdma_mask = 0;
334 d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
335 }
336
337 return ide_setup_pci_device(dev, &d);
343} 338}
344 339
345static const struct pci_device_id sl82c105_pci_tbl[] = { 340static const struct pci_device_id sl82c105_pci_tbl[] = {
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 65f4c2ffaa59..dae6e2c94d86 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -27,9 +27,9 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
27 unsigned long flags; 27 unsigned long flags;
28 u16 master_data; 28 u16 master_data;
29 u8 slave_data; 29 u8 slave_data;
30 int control = 0; 30 int control = 0;
31 /* ISP RTC */ 31 /* ISP RTC */
32 static const u8 timings[][2]= { 32 static const u8 timings[][2] = {
33 { 0, 0 }, 33 { 0, 0 },
34 { 0, 0 }, 34 { 0, 0 },
35 { 1, 0 }, 35 { 1, 0 },
@@ -125,19 +125,17 @@ static u8 __devinit slc90e66_cable_detect(ide_hwif_t *hwif)
125 return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 125 return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
126} 126}
127 127
128static void __devinit init_hwif_slc90e66(ide_hwif_t *hwif) 128static const struct ide_port_ops slc90e66_port_ops = {
129{ 129 .set_pio_mode = slc90e66_set_pio_mode,
130 hwif->set_pio_mode = &slc90e66_set_pio_mode; 130 .set_dma_mode = slc90e66_set_dma_mode,
131 hwif->set_dma_mode = &slc90e66_set_dma_mode; 131 .cable_detect = slc90e66_cable_detect,
132 132};
133 hwif->cable_detect = slc90e66_cable_detect;
134}
135 133
136static const struct ide_port_info slc90e66_chipset __devinitdata = { 134static const struct ide_port_info slc90e66_chipset __devinitdata = {
137 .name = "SLC90E66", 135 .name = "SLC90E66",
138 .init_hwif = init_hwif_slc90e66, 136 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
139 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, 137 .port_ops = &slc90e66_port_ops,
140 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, 138 .host_flags = IDE_HFLAG_LEGACY_IRQS,
141 .pio_mask = ATA_PIO4, 139 .pio_mask = ATA_PIO4,
142 .swdma_mask = ATA_SWDMA2_ONLY, 140 .swdma_mask = ATA_SWDMA2_ONLY,
143 .mwdma_mask = ATA_MWDMA12_ONLY, 141 .mwdma_mask = ATA_MWDMA12_ONLY,
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 1e4a6262bcef..9b4b27a4c711 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -18,20 +18,20 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
18 u16 mode, scr = inw(scr_port); 18 u16 mode, scr = inw(scr_port);
19 19
20 switch (speed) { 20 switch (speed) {
21 case XFER_UDMA_4: mode = 0x00c0; break; 21 case XFER_UDMA_4: mode = 0x00c0; break;
22 case XFER_UDMA_3: mode = 0x00b0; break; 22 case XFER_UDMA_3: mode = 0x00b0; break;
23 case XFER_UDMA_2: mode = 0x00a0; break; 23 case XFER_UDMA_2: mode = 0x00a0; break;
24 case XFER_UDMA_1: mode = 0x0090; break; 24 case XFER_UDMA_1: mode = 0x0090; break;
25 case XFER_UDMA_0: mode = 0x0080; break; 25 case XFER_UDMA_0: mode = 0x0080; break;
26 case XFER_MW_DMA_2: mode = 0x0070; break; 26 case XFER_MW_DMA_2: mode = 0x0070; break;
27 case XFER_MW_DMA_1: mode = 0x0060; break; 27 case XFER_MW_DMA_1: mode = 0x0060; break;
28 case XFER_MW_DMA_0: mode = 0x0050; break; 28 case XFER_MW_DMA_0: mode = 0x0050; break;
29 case XFER_PIO_4: mode = 0x0400; break; 29 case XFER_PIO_4: mode = 0x0400; break;
30 case XFER_PIO_3: mode = 0x0300; break; 30 case XFER_PIO_3: mode = 0x0300; break;
31 case XFER_PIO_2: mode = 0x0200; break; 31 case XFER_PIO_2: mode = 0x0200; break;
32 case XFER_PIO_1: mode = 0x0100; break; 32 case XFER_PIO_1: mode = 0x0100; break;
33 case XFER_PIO_0: 33 case XFER_PIO_0:
34 default: mode = 0x0000; break; 34 default: mode = 0x0000; break;
35 } 35 }
36 36
37 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f; 37 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
@@ -157,11 +157,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
157 /* Store the system control register base for convenience... */ 157 /* Store the system control register base for convenience... */
158 hwif->config_data = sc_base; 158 hwif->config_data = sc_base;
159 159
160 hwif->set_pio_mode = &tc86c001_set_pio_mode;
161 hwif->set_dma_mode = &tc86c001_set_mode;
162
163 hwif->cable_detect = tc86c001_cable_detect;
164
165 if (!hwif->dma_base) 160 if (!hwif->dma_base)
166 return; 161 return;
167 162
@@ -173,8 +168,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
173 168
174 /* Sector Count Register limit */ 169 /* Sector Count Register limit */
175 hwif->rqsize = 0xffff; 170 hwif->rqsize = 0xffff;
176
177 hwif->dma_start = &tc86c001_dma_start;
178} 171}
179 172
180static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev, 173static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
@@ -187,10 +180,29 @@ static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
187 return err; 180 return err;
188} 181}
189 182
183static const struct ide_port_ops tc86c001_port_ops = {
184 .set_pio_mode = tc86c001_set_pio_mode,
185 .set_dma_mode = tc86c001_set_mode,
186 .cable_detect = tc86c001_cable_detect,
187};
188
189static const struct ide_dma_ops tc86c001_dma_ops = {
190 .dma_host_set = ide_dma_host_set,
191 .dma_setup = ide_dma_setup,
192 .dma_exec_cmd = ide_dma_exec_cmd,
193 .dma_start = tc86c001_dma_start,
194 .dma_end = __ide_dma_end,
195 .dma_test_irq = ide_dma_test_irq,
196 .dma_lost_irq = ide_dma_lost_irq,
197 .dma_timeout = ide_dma_timeout,
198};
199
190static const struct ide_port_info tc86c001_chipset __devinitdata = { 200static const struct ide_port_info tc86c001_chipset __devinitdata = {
191 .name = "TC86C001", 201 .name = "TC86C001",
192 .init_chipset = init_chipset_tc86c001, 202 .init_chipset = init_chipset_tc86c001,
193 .init_hwif = init_hwif_tc86c001, 203 .init_hwif = init_hwif_tc86c001,
204 .port_ops = &tc86c001_port_ops,
205 .dma_ops = &tc86c001_dma_ops,
194 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | 206 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
195 IDE_HFLAG_ABUSE_SET_DMA_MODE, 207 IDE_HFLAG_ABUSE_SET_DMA_MODE,
196 .pio_mask = ATA_PIO4, 208 .pio_mask = ATA_PIO4,
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index a67d02a3f96e..db65a558d4ec 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -87,17 +87,15 @@ static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
87 triflex_set_mode(drive, XFER_PIO_0 + pio); 87 triflex_set_mode(drive, XFER_PIO_0 + pio);
88} 88}
89 89
90static void __devinit init_hwif_triflex(ide_hwif_t *hwif) 90static const struct ide_port_ops triflex_port_ops = {
91{ 91 .set_pio_mode = triflex_set_pio_mode,
92 hwif->set_pio_mode = &triflex_set_pio_mode; 92 .set_dma_mode = triflex_set_mode,
93 hwif->set_dma_mode = &triflex_set_mode; 93};
94}
95 94
96static const struct ide_port_info triflex_device __devinitdata = { 95static const struct ide_port_info triflex_device __devinitdata = {
97 .name = "TRIFLEX", 96 .name = "TRIFLEX",
98 .init_hwif = init_hwif_triflex,
99 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, 97 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
100 .host_flags = IDE_HFLAG_BOOTABLE, 98 .port_ops = &triflex_port_ops,
101 .pio_mask = ATA_PIO4, 99 .pio_mask = ATA_PIO4,
102 .swdma_mask = ATA_SWDMA2, 100 .swdma_mask = ATA_SWDMA2,
103 .mwdma_mask = ATA_MWDMA2, 101 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index de750f7a43e9..a8a3138682ef 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -214,7 +214,7 @@ static void trm290_dma_start(ide_drive_t *drive)
214{ 214{
215} 215}
216 216
217static int trm290_ide_dma_end (ide_drive_t *drive) 217static int trm290_dma_end(ide_drive_t *drive)
218{ 218{
219 u16 status; 219 u16 status;
220 220
@@ -225,7 +225,7 @@ static int trm290_ide_dma_end (ide_drive_t *drive)
225 return status != 0x00ff; 225 return status != 0x00ff;
226} 226}
227 227
228static int trm290_ide_dma_test_irq (ide_drive_t *drive) 228static int trm290_dma_test_irq(ide_drive_t *drive)
229{ 229{
230 u16 status; 230 u16 status;
231 231
@@ -254,22 +254,11 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
254 hwif->config_data = cfg_base; 254 hwif->config_data = cfg_base;
255 hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0); 255 hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
256 256
257 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx", 257 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
258 hwif->name, hwif->dma_base, hwif->dma_base + 3); 258 hwif->name, hwif->dma_base, hwif->dma_base + 3);
259 259
260 if (!request_region(hwif->dma_base, 4, hwif->name)) { 260 if (ide_allocate_dma_engine(hwif))
261 printk(KERN_CONT " -- Error, ports in use.\n");
262 return; 261 return;
263 }
264
265 hwif->dmatable_cpu = pci_alloc_consistent(dev, PRD_ENTRIES * PRD_BYTES,
266 &hwif->dmatable_dma);
267 if (!hwif->dmatable_cpu) {
268 printk(KERN_CONT " -- Error, unable to allocate DMA table.\n");
269 release_region(hwif->dma_base, 4);
270 return;
271 }
272 printk(KERN_CONT "\n");
273 262
274 local_irq_save(flags); 263 local_irq_save(flags);
275 /* put config reg into first byte of hwif->select_data */ 264 /* put config reg into first byte of hwif->select_data */
@@ -291,14 +280,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
291 /* sharing IRQ with mate */ 280 /* sharing IRQ with mate */
292 hwif->irq = hwif->mate->irq; 281 hwif->irq = hwif->mate->irq;
293 282
294 hwif->dma_host_set = &trm290_dma_host_set;
295 hwif->dma_setup = &trm290_dma_setup;
296 hwif->dma_exec_cmd = &trm290_dma_exec_cmd;
297 hwif->dma_start = &trm290_dma_start;
298 hwif->ide_dma_end = &trm290_ide_dma_end;
299 hwif->ide_dma_test_irq = &trm290_ide_dma_test_irq;
300
301 hwif->selectproc = &trm290_selectproc;
302#if 1 283#if 1
303 { 284 {
304 /* 285 /*
@@ -317,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
317 if (old != compat && old_mask == 0xff) { 298 if (old != compat && old_mask == 0xff) {
318 /* leave lower 10 bits untouched */ 299 /* leave lower 10 bits untouched */
319 compat += (next_offset += 0x400); 300 compat += (next_offset += 0x400);
320 hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2; 301 hwif->io_ports.ctl_addr = compat + 2;
321 outw(compat | 1, hwif->config_data); 302 outw(compat | 1, hwif->config_data);
322 new = inw(hwif->config_data); 303 new = inw(hwif->config_data);
323 printk(KERN_INFO "%s: control basereg workaround: " 304 printk(KERN_INFO "%s: control basereg workaround: "
@@ -328,16 +309,32 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
328#endif 309#endif
329} 310}
330 311
312static const struct ide_port_ops trm290_port_ops = {
313 .selectproc = trm290_selectproc,
314};
315
316static struct ide_dma_ops trm290_dma_ops = {
317 .dma_host_set = trm290_dma_host_set,
318 .dma_setup = trm290_dma_setup,
319 .dma_exec_cmd = trm290_dma_exec_cmd,
320 .dma_start = trm290_dma_start,
321 .dma_end = trm290_dma_end,
322 .dma_test_irq = trm290_dma_test_irq,
323 .dma_lost_irq = ide_dma_lost_irq,
324 .dma_timeout = ide_dma_timeout,
325};
326
331static const struct ide_port_info trm290_chipset __devinitdata = { 327static const struct ide_port_info trm290_chipset __devinitdata = {
332 .name = "TRM290", 328 .name = "TRM290",
333 .init_hwif = init_hwif_trm290, 329 .init_hwif = init_hwif_trm290,
334 .chipset = ide_trm290, 330 .chipset = ide_trm290,
331 .port_ops = &trm290_port_ops,
332 .dma_ops = &trm290_dma_ops,
335 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 333 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
336#if 0 /* play it safe for now */ 334#if 0 /* play it safe for now */
337 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 335 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
338#endif 336#endif
339 IDE_HFLAG_NO_AUTODMA | 337 IDE_HFLAG_NO_AUTODMA |
340 IDE_HFLAG_BOOTABLE |
341 IDE_HFLAG_NO_LBA48, 338 IDE_HFLAG_NO_LBA48,
342}; 339};
343 340
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 9004e7521889..566e0ecb8db1 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
340 * Determine system bus clock. 340 * Determine system bus clock.
341 */ 341 */
342 342
343 via_clock = system_bus_clock() * 1000; 343 via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
344 344
345 switch (via_clock) { 345 switch (via_clock) {
346 case 33000: via_clock = 33333; break; 346 case 33000: via_clock = 33333; break;
@@ -415,25 +415,21 @@ static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
415 return ATA_CBL_PATA40; 415 return ATA_CBL_PATA40;
416} 416}
417 417
418static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) 418static const struct ide_port_ops via_port_ops = {
419{ 419 .set_pio_mode = via_set_pio_mode,
420 hwif->set_pio_mode = &via_set_pio_mode; 420 .set_dma_mode = via_set_drive,
421 hwif->set_dma_mode = &via_set_drive; 421 .cable_detect = via82cxxx_cable_detect,
422 422};
423 hwif->cable_detect = via82cxxx_cable_detect;
424}
425 423
426static const struct ide_port_info via82cxxx_chipset __devinitdata = { 424static const struct ide_port_info via82cxxx_chipset __devinitdata = {
427 .name = "VP_IDE", 425 .name = "VP_IDE",
428 .init_chipset = init_chipset_via82cxxx, 426 .init_chipset = init_chipset_via82cxxx,
429 .init_hwif = init_hwif_via82cxxx,
430 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 427 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
428 .port_ops = &via_port_ops,
431 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 429 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
432 IDE_HFLAG_PIO_NO_DOWNGRADE |
433 IDE_HFLAG_ABUSE_SET_DMA_MODE | 430 IDE_HFLAG_ABUSE_SET_DMA_MODE |
434 IDE_HFLAG_POST_SET_MODE | 431 IDE_HFLAG_POST_SET_MODE |
435 IDE_HFLAG_IO_32BIT | 432 IDE_HFLAG_IO_32BIT,
436 IDE_HFLAG_BOOTABLE,
437 .pio_mask = ATA_PIO5, 433 .pio_mask = ATA_PIO5,
438 .swdma_mask = ATA_SWDMA2, 434 .swdma_mask = ATA_SWDMA2,
439 .mwdma_mask = ATA_MWDMA2, 435 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index a784a97ca7ec..f0e638dcc3ab 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -36,6 +36,8 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/irq.h> 37#include <asm/irq.h>
38 38
39#define DRV_NAME "ide-mpc8xx"
40
39static int identify (volatile u8 *p); 41static int identify (volatile u8 *p);
40static void print_fixed (volatile u8 *p); 42static void print_fixed (volatile u8 *p);
41static void print_funcid (int func); 43static void print_funcid (int func);
@@ -127,9 +129,9 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL;
127 * MPC8xx's internal PCMCIA interface 129 * MPC8xx's internal PCMCIA interface
128 */ 130 */
129#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) 131#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
130static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) 132static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
131{ 133{
132 unsigned long *p = hw->io_ports; 134 unsigned long *p = hw->io_ports_array;
133 int i; 135 int i;
134 136
135 typedef struct { 137 typedef struct {
@@ -182,6 +184,13 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
182 pcmcia_phy_base, pcmcia_phy_end, 184 pcmcia_phy_base, pcmcia_phy_end,
183 pcmcia_phy_end - pcmcia_phy_base); 185 pcmcia_phy_end - pcmcia_phy_base);
184 186
187 if (!request_mem_region(pcmcia_phy_base,
188 pcmcia_phy_end - pcmcia_phy_base,
189 DRV_NAME)) {
190 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
191 return -EBUSY;
192 }
193
185 pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base, 194 pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base,
186 pcmcia_phy_end-pcmcia_phy_base); 195 pcmcia_phy_end-pcmcia_phy_base);
187 196
@@ -236,7 +245,7 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
236 if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) { 245 if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) {
237 printk ("No card in slot %c: PIPR=%08x\n", 246 printk ("No card in slot %c: PIPR=%08x\n",
238 'A' + _slot_, (u32) pcmp->pcmc_pipr); 247 'A' + _slot_, (u32) pcmp->pcmc_pipr);
239 return; /* No card in slot */ 248 return -ENODEV; /* No card in slot */
240 } 249 }
241 250
242 check_ide_device (pcmcia_base); 251 check_ide_device (pcmcia_base);
@@ -279,9 +288,6 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
279 } 288 }
280#endif /* CONFIG_IDE_8xx_PCCARD */ 289#endif /* CONFIG_IDE_8xx_PCCARD */
281 290
282 ide_hwifs[data_port].pio_mask = ATA_PIO4;
283 ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
284
285 /* Enable Harddisk Interrupt, 291 /* Enable Harddisk Interrupt,
286 * and make it edge sensitive 292 * and make it edge sensitive
287 */ 293 */
@@ -296,6 +302,8 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
296 /* Enable falling edge irq */ 302 /* Enable falling edge irq */
297 pcmp->pcmc_per = 0x100000 >> (16 * _slot_); 303 pcmp->pcmc_per = 0x100000 >> (16 * _slot_);
298#endif /* CONFIG_IDE_8xx_PCCARD */ 304#endif /* CONFIG_IDE_8xx_PCCARD */
305
306 return 0;
299} 307}
300#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ 308#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */
301 309
@@ -304,9 +312,9 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
304 * MPC8xx's internal PCMCIA interface 312 * MPC8xx's internal PCMCIA interface
305 */ 313 */
306#if defined(CONFIG_IDE_EXT_DIRECT) 314#if defined(CONFIG_IDE_EXT_DIRECT)
307static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) 315static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
308{ 316{
309 unsigned long *p = hw->io_ports; 317 unsigned long *p = hw->io_ports_array;
310 int i; 318 int i;
311 319
312 u32 ide_phy_base; 320 u32 ide_phy_base;
@@ -327,7 +335,12 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
327 printk ("IDE phys mem : %08x...%08x (size %08x)\n", 335 printk ("IDE phys mem : %08x...%08x (size %08x)\n",
328 ide_phy_base, ide_phy_end, 336 ide_phy_base, ide_phy_end,
329 ide_phy_end - ide_phy_base); 337 ide_phy_end - ide_phy_base);
330 338
339 if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) {
340 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
341 return -EBUSY;
342 }
343
331 ide_base=(unsigned long)ioremap(ide_phy_base, 344 ide_base=(unsigned long)ioremap(ide_phy_base,
332 ide_phy_end-ide_phy_base); 345 ide_phy_end-ide_phy_base);
333 346
@@ -357,15 +370,14 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
357 hw->irq = ioport_dsc[data_port].irq; 370 hw->irq = ioport_dsc[data_port].irq;
358 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; 371 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
359 372
360 ide_hwifs[data_port].pio_mask = ATA_PIO4;
361 ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
362
363 /* Enable Harddisk Interrupt, 373 /* Enable Harddisk Interrupt,
364 * and make it edge sensitive 374 * and make it edge sensitive
365 */ 375 */
366 /* (11-18) Set edge detect for irq, no wakeup from low power mode */ 376 /* (11-18) Set edge detect for irq, no wakeup from low power mode */
367 ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= 377 ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |=
368 (0x80000000 >> ioport_dsc[data_port].irq); 378 (0x80000000 >> ioport_dsc[data_port].irq);
379
380 return 0;
369} 381}
370#endif /* CONFIG_IDE_8xx_DIRECT */ 382#endif /* CONFIG_IDE_8xx_DIRECT */
371 383
@@ -426,10 +438,14 @@ static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
426#elif defined(CONFIG_IDE_EXT_DIRECT) 438#elif defined(CONFIG_IDE_EXT_DIRECT)
427 439
428 printk("%s[%d] %s: not implemented yet!\n", 440 printk("%s[%d] %s: not implemented yet!\n",
429 __FILE__,__LINE__,__FUNCTION__); 441 __FILE__, __LINE__, __func__);
430#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */ 442#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */
431} 443}
432 444
445static const struct ide_port_ops m8xx_port_ops = {
446 .set_pio_mode = m8xx_ide_set_pio_mode,
447};
448
433static void 449static void
434ide_interrupt_ack (void *dev) 450ide_interrupt_ack (void *dev)
435{ 451{
@@ -794,14 +810,30 @@ static int __init mpc8xx_ide_probe(void)
794 810
795#ifdef IDE0_BASE_OFFSET 811#ifdef IDE0_BASE_OFFSET
796 memset(&hw, 0, sizeof(hw)); 812 memset(&hw, 0, sizeof(hw));
797 m8xx_ide_init_ports(&hw, 0); 813 if (!m8xx_ide_init_ports(&hw, 0)) {
798 ide_init_port_hw(&ide_hwifs[0], &hw); 814 ide_hwif_t *hwif = ide_find_port();
799 idx[0] = 0; 815
816 if (hwif) {
817 ide_init_port_hw(hwif, &hw);
818 hwif->pio_mask = ATA_PIO4;
819 hwif->port_ops = &m8xx_port_ops;
820
821 idx[0] = hwif->index;
822 }
823 }
800#ifdef IDE1_BASE_OFFSET 824#ifdef IDE1_BASE_OFFSET
801 memset(&hw, 0, sizeof(hw)); 825 memset(&hw, 0, sizeof(hw));
802 m8xx_ide_init_ports(&hw, 1); 826 if (!m8xx_ide_init_ports(&hw, 1)) {
803 ide_init_port_hw(&ide_hwifs[1], &hw); 827 ide_hwif_t *mate = ide_find_port();
804 idx[1] = 1; 828
829 if (mate) {
830 ide_init_port_hw(mate, &hw);
831 mate->pio_mask = ATA_PIO4;
832 mate->port_ops = &m8xx_port_ops;
833
834 idx[1] = mate->index;
835 }
836 }
805#endif 837#endif
806#endif 838#endif
807 839
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 88619b50d9ef..3cac6b2790dd 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -79,8 +79,6 @@ typedef struct pmac_ide_hwif {
79 79
80} pmac_ide_hwif_t; 80} pmac_ide_hwif_t;
81 81
82static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
83
84enum { 82enum {
85 controller_ohare, /* OHare based */ 83 controller_ohare, /* OHare based */
86 controller_heathrow, /* Heathrow/Paddington */ 84 controller_heathrow, /* Heathrow/Paddington */
@@ -411,7 +409,7 @@ kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
411 */ 409 */
412#define IDE_WAKEUP_DELAY (1*HZ) 410#define IDE_WAKEUP_DELAY (1*HZ)
413 411
414static int pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif); 412static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
415static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq); 413static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
416static void pmac_ide_selectproc(ide_drive_t *drive); 414static void pmac_ide_selectproc(ide_drive_t *drive);
417static void pmac_ide_kauai_selectproc(ide_drive_t *drive); 415static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
@@ -419,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
419#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 417#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
420 418
421#define PMAC_IDE_REG(x) \ 419#define PMAC_IDE_REG(x) \
422 ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x))) 420 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
423 421
424/* 422/*
425 * Apply the timings of the proper unit (master/slave) to the shared 423 * Apply the timings of the proper unit (master/slave) to the shared
@@ -920,12 +918,29 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
920 return 0; 918 return 0;
921} 919}
922 920
921static const struct ide_port_ops pmac_ide_ata6_port_ops = {
922 .set_pio_mode = pmac_ide_set_pio_mode,
923 .set_dma_mode = pmac_ide_set_dma_mode,
924 .selectproc = pmac_ide_kauai_selectproc,
925};
926
927static const struct ide_port_ops pmac_ide_port_ops = {
928 .set_pio_mode = pmac_ide_set_pio_mode,
929 .set_dma_mode = pmac_ide_set_dma_mode,
930 .selectproc = pmac_ide_selectproc,
931};
932
933static const struct ide_dma_ops pmac_dma_ops;
934
923static const struct ide_port_info pmac_port_info = { 935static const struct ide_port_info pmac_port_info = {
936 .init_dma = pmac_ide_init_dma,
924 .chipset = ide_pmac, 937 .chipset = ide_pmac,
938#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
939 .dma_ops = &pmac_dma_ops,
940#endif
941 .port_ops = &pmac_ide_port_ops,
925 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 942 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
926 IDE_HFLAG_PIO_NO_DOWNGRADE |
927 IDE_HFLAG_POST_SET_MODE | 943 IDE_HFLAG_POST_SET_MODE |
928 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
929 IDE_HFLAG_UNMASK_IRQS, 944 IDE_HFLAG_UNMASK_IRQS,
930 .pio_mask = ATA_PIO4, 945 .pio_mask = ATA_PIO4,
931 .mwdma_mask = ATA_MWDMA2, 946 .mwdma_mask = ATA_MWDMA2,
@@ -950,12 +965,15 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
950 pmif->broken_dma = pmif->broken_dma_warn = 0; 965 pmif->broken_dma = pmif->broken_dma_warn = 0;
951 if (of_device_is_compatible(np, "shasta-ata")) { 966 if (of_device_is_compatible(np, "shasta-ata")) {
952 pmif->kind = controller_sh_ata6; 967 pmif->kind = controller_sh_ata6;
968 d.port_ops = &pmac_ide_ata6_port_ops;
953 d.udma_mask = ATA_UDMA6; 969 d.udma_mask = ATA_UDMA6;
954 } else if (of_device_is_compatible(np, "kauai-ata")) { 970 } else if (of_device_is_compatible(np, "kauai-ata")) {
955 pmif->kind = controller_un_ata6; 971 pmif->kind = controller_un_ata6;
972 d.port_ops = &pmac_ide_ata6_port_ops;
956 d.udma_mask = ATA_UDMA5; 973 d.udma_mask = ATA_UDMA5;
957 } else if (of_device_is_compatible(np, "K2-UATA")) { 974 } else if (of_device_is_compatible(np, "K2-UATA")) {
958 pmif->kind = controller_k2_ata6; 975 pmif->kind = controller_k2_ata6;
976 d.port_ops = &pmac_ide_ata6_port_ops;
959 d.udma_mask = ATA_UDMA5; 977 d.udma_mask = ATA_UDMA5;
960 } else if (of_device_is_compatible(np, "keylargo-ata")) { 978 } else if (of_device_is_compatible(np, "keylargo-ata")) {
961 if (strcmp(np->name, "ata-4") == 0) { 979 if (strcmp(np->name, "ata-4") == 0) {
@@ -1032,37 +1050,29 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
1032 default_hwif_mmiops(hwif); 1050 default_hwif_mmiops(hwif);
1033 hwif->OUTBSYNC = pmac_outbsync; 1051 hwif->OUTBSYNC = pmac_outbsync;
1034 1052
1035 /* Tell common code _not_ to mess with resources */
1036 hwif->mmio = 1;
1037 hwif->hwif_data = pmif; 1053 hwif->hwif_data = pmif;
1038 ide_init_port_hw(hwif, hw); 1054 ide_init_port_hw(hwif, hw);
1039 hwif->noprobe = pmif->mediabay;
1040 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 1055 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
1041 hwif->set_pio_mode = pmac_ide_set_pio_mode;
1042 if (pmif->kind == controller_un_ata6
1043 || pmif->kind == controller_k2_ata6
1044 || pmif->kind == controller_sh_ata6)
1045 hwif->selectproc = pmac_ide_kauai_selectproc;
1046 else
1047 hwif->selectproc = pmac_ide_selectproc;
1048 hwif->set_dma_mode = pmac_ide_set_dma_mode;
1049 1056
1050 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1057 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
1051 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id, 1058 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1052 pmif->mediabay ? " (mediabay)" : "", hwif->irq); 1059 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1053 1060
1061 if (pmif->mediabay) {
1054#ifdef CONFIG_PMAC_MEDIABAY 1062#ifdef CONFIG_PMAC_MEDIABAY
1055 if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0) 1063 if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
1056 hwif->noprobe = 0; 1064#else
1057#endif /* CONFIG_PMAC_MEDIABAY */ 1065 if (1) {
1066#endif
1067 hwif->drives[0].noprobe = 1;
1068 hwif->drives[1].noprobe = 1;
1069 }
1070 }
1058 1071
1059#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1072#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1060 if (pmif->cable_80 == 0) 1073 if (pmif->cable_80 == 0)
1061 d.udma_mask &= ATA_UDMA2; 1074 d.udma_mask &= ATA_UDMA2;
1062 /* has a DBDMA controller channel */
1063 if (pmif->dma_regs == 0 || pmac_ide_setup_dma(pmif, hwif) < 0)
1064#endif 1075#endif
1065 d.udma_mask = d.mwdma_mask = 0;
1066 1076
1067 idx[0] = hwif->index; 1077 idx[0] = hwif->index;
1068 1078
@@ -1076,8 +1086,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
1076 int i; 1086 int i;
1077 1087
1078 for (i = 0; i < 8; ++i) 1088 for (i = 0; i < 8; ++i)
1079 hw->io_ports[i] = base + i * 0x10; 1089 hw->io_ports_array[i] = base + i * 0x10;
1080 hw->io_ports[8] = base + 0x160; 1090
1091 hw->io_ports.ctl_addr = base + 0x160;
1081} 1092}
1082 1093
1083/* 1094/*
@@ -1088,35 +1099,36 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1088{ 1099{
1089 void __iomem *base; 1100 void __iomem *base;
1090 unsigned long regbase; 1101 unsigned long regbase;
1091 int irq;
1092 ide_hwif_t *hwif; 1102 ide_hwif_t *hwif;
1093 pmac_ide_hwif_t *pmif; 1103 pmac_ide_hwif_t *pmif;
1094 int i, rc; 1104 int irq, rc;
1095 hw_regs_t hw; 1105 hw_regs_t hw;
1096 1106
1097 i = 0; 1107 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1098 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 1108 if (pmif == NULL)
1099 || pmac_ide[i].node != NULL)) 1109 return -ENOMEM;
1100 ++i; 1110
1101 if (i >= MAX_HWIFS) { 1111 hwif = ide_find_port();
1112 if (hwif == NULL) {
1102 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n"); 1113 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1103 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name); 1114 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1104 return -ENODEV; 1115 rc = -ENODEV;
1116 goto out_free_pmif;
1105 } 1117 }
1106 1118
1107 pmif = &pmac_ide[i];
1108 hwif = &ide_hwifs[i];
1109
1110 if (macio_resource_count(mdev) == 0) { 1119 if (macio_resource_count(mdev) == 0) {
1111 printk(KERN_WARNING "ide%d: no address for %s\n", 1120 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1112 i, mdev->ofdev.node->full_name); 1121 mdev->ofdev.node->full_name);
1113 return -ENXIO; 1122 rc = -ENXIO;
1123 goto out_free_pmif;
1114 } 1124 }
1115 1125
1116 /* Request memory resource for IO ports */ 1126 /* Request memory resource for IO ports */
1117 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { 1127 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1118 printk(KERN_ERR "ide%d: can't request mmio resource !\n", i); 1128 printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
1119 return -EBUSY; 1129 "%s!\n", mdev->ofdev.node->full_name);
1130 rc = -EBUSY;
1131 goto out_free_pmif;
1120 } 1132 }
1121 1133
1122 /* XXX This is bogus. Should be fixed in the registry by checking 1134 /* XXX This is bogus. Should be fixed in the registry by checking
@@ -1125,8 +1137,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1125 * where that happens though... 1137 * where that happens though...
1126 */ 1138 */
1127 if (macio_irq_count(mdev) == 0) { 1139 if (macio_irq_count(mdev) == 0) {
1128 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n", 1140 printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
1129 i, mdev->ofdev.node->full_name); 1141 "13\n", mdev->ofdev.node->full_name);
1130 irq = irq_create_mapping(NULL, 13); 1142 irq = irq_create_mapping(NULL, 13);
1131 } else 1143 } else
1132 irq = macio_irq(mdev, 0); 1144 irq = macio_irq(mdev, 0);
@@ -1144,7 +1156,9 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1144#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1156#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1145 if (macio_resource_count(mdev) >= 2) { 1157 if (macio_resource_count(mdev) >= 2) {
1146 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1158 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1147 printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i); 1159 printk(KERN_WARNING "ide-pmac: can't request DMA "
1160 "resource for %s!\n",
1161 mdev->ofdev.node->full_name);
1148 else 1162 else
1149 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1163 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1150 } else 1164 } else
@@ -1166,11 +1180,15 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1166 iounmap(pmif->dma_regs); 1180 iounmap(pmif->dma_regs);
1167 macio_release_resource(mdev, 1); 1181 macio_release_resource(mdev, 1);
1168 } 1182 }
1169 memset(pmif, 0, sizeof(*pmif));
1170 macio_release_resource(mdev, 0); 1183 macio_release_resource(mdev, 0);
1184 kfree(pmif);
1171 } 1185 }
1172 1186
1173 return rc; 1187 return rc;
1188
1189out_free_pmif:
1190 kfree(pmif);
1191 return rc;
1174} 1192}
1175 1193
1176static int 1194static int
@@ -1215,7 +1233,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1215 pmac_ide_hwif_t *pmif; 1233 pmac_ide_hwif_t *pmif;
1216 void __iomem *base; 1234 void __iomem *base;
1217 unsigned long rbase, rlen; 1235 unsigned long rbase, rlen;
1218 int i, rc; 1236 int rc;
1219 hw_regs_t hw; 1237 hw_regs_t hw;
1220 1238
1221 np = pci_device_to_OF_node(pdev); 1239 np = pci_device_to_OF_node(pdev);
@@ -1223,30 +1241,32 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1223 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n"); 1241 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
1224 return -ENODEV; 1242 return -ENODEV;
1225 } 1243 }
1226 i = 0; 1244
1227 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 1245 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1228 || pmac_ide[i].node != NULL)) 1246 if (pmif == NULL)
1229 ++i; 1247 return -ENOMEM;
1230 if (i >= MAX_HWIFS) { 1248
1249 hwif = ide_find_port();
1250 if (hwif == NULL) {
1231 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n"); 1251 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1232 printk(KERN_ERR " %s\n", np->full_name); 1252 printk(KERN_ERR " %s\n", np->full_name);
1233 return -ENODEV; 1253 rc = -ENODEV;
1254 goto out_free_pmif;
1234 } 1255 }
1235 1256
1236 pmif = &pmac_ide[i];
1237 hwif = &ide_hwifs[i];
1238
1239 if (pci_enable_device(pdev)) { 1257 if (pci_enable_device(pdev)) {
1240 printk(KERN_WARNING "ide%i: Can't enable PCI device for %s\n", 1258 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1241 i, np->full_name); 1259 "%s\n", np->full_name);
1242 return -ENXIO; 1260 rc = -ENXIO;
1261 goto out_free_pmif;
1243 } 1262 }
1244 pci_set_master(pdev); 1263 pci_set_master(pdev);
1245 1264
1246 if (pci_request_regions(pdev, "Kauai ATA")) { 1265 if (pci_request_regions(pdev, "Kauai ATA")) {
1247 printk(KERN_ERR "ide%d: Cannot obtain PCI resources for %s\n", 1266 printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
1248 i, np->full_name); 1267 "%s\n", np->full_name);
1249 return -ENXIO; 1268 rc = -ENXIO;
1269 goto out_free_pmif;
1250 } 1270 }
1251 1271
1252 hwif->dev = &pdev->dev; 1272 hwif->dev = &pdev->dev;
@@ -1276,11 +1296,15 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1276 /* The inteface is released to the common IDE layer */ 1296 /* The inteface is released to the common IDE layer */
1277 pci_set_drvdata(pdev, NULL); 1297 pci_set_drvdata(pdev, NULL);
1278 iounmap(base); 1298 iounmap(base);
1279 memset(pmif, 0, sizeof(*pmif));
1280 pci_release_regions(pdev); 1299 pci_release_regions(pdev);
1300 kfree(pmif);
1281 } 1301 }
1282 1302
1283 return rc; 1303 return rc;
1304
1305out_free_pmif:
1306 kfree(pmif);
1307 return rc;
1284} 1308}
1285 1309
1286static int 1310static int
@@ -1652,18 +1676,31 @@ pmac_ide_dma_lost_irq (ide_drive_t *drive)
1652 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status); 1676 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
1653} 1677}
1654 1678
1679static const struct ide_dma_ops pmac_dma_ops = {
1680 .dma_host_set = pmac_ide_dma_host_set,
1681 .dma_setup = pmac_ide_dma_setup,
1682 .dma_exec_cmd = pmac_ide_dma_exec_cmd,
1683 .dma_start = pmac_ide_dma_start,
1684 .dma_end = pmac_ide_dma_end,
1685 .dma_test_irq = pmac_ide_dma_test_irq,
1686 .dma_timeout = ide_dma_timeout,
1687 .dma_lost_irq = pmac_ide_dma_lost_irq,
1688};
1689
1655/* 1690/*
1656 * Allocate the data structures needed for using DMA with an interface 1691 * Allocate the data structures needed for using DMA with an interface
1657 * and fill the proper list of functions pointers 1692 * and fill the proper list of functions pointers
1658 */ 1693 */
1659static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) 1694static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1695 const struct ide_port_info *d)
1660{ 1696{
1697 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1661 struct pci_dev *dev = to_pci_dev(hwif->dev); 1698 struct pci_dev *dev = to_pci_dev(hwif->dev);
1662 1699
1663 /* We won't need pci_dev if we switch to generic consistent 1700 /* We won't need pci_dev if we switch to generic consistent
1664 * DMA routines ... 1701 * DMA routines ...
1665 */ 1702 */
1666 if (dev == NULL) 1703 if (dev == NULL || pmif->dma_regs == 0)
1667 return -ENODEV; 1704 return -ENODEV;
1668 /* 1705 /*
1669 * Allocate space for the DBDMA commands. 1706 * Allocate space for the DBDMA commands.
@@ -1682,18 +1719,14 @@ static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1682 1719
1683 hwif->sg_max_nents = MAX_DCMDS; 1720 hwif->sg_max_nents = MAX_DCMDS;
1684 1721
1685 hwif->dma_host_set = &pmac_ide_dma_host_set;
1686 hwif->dma_setup = &pmac_ide_dma_setup;
1687 hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
1688 hwif->dma_start = &pmac_ide_dma_start;
1689 hwif->ide_dma_end = &pmac_ide_dma_end;
1690 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
1691 hwif->dma_timeout = &ide_dma_timeout;
1692 hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
1693
1694 return 0; 1722 return 0;
1695} 1723}
1696 1724#else
1725static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1726 const struct ide_port_info *d)
1727{
1728 return -EOPNOTSUPP;
1729}
1697#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1730#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1698 1731
1699module_init(pmac_ide_probe); 1732module_init(pmac_ide_probe);
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index f7ede0e42881..5171601fb255 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -20,73 +20,6 @@
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/irq.h> 21#include <asm/irq.h>
22 22
23
24/**
25 * ide_match_hwif - match a PCI IDE against an ide_hwif
26 * @io_base: I/O base of device
27 * @bootable: set if its bootable
28 * @name: name of device
29 *
30 * Match a PCI IDE port against an entry in ide_hwifs[],
31 * based on io_base port if possible. Return the matching hwif,
32 * or a new hwif. If we find an error (clashing, out of devices, etc)
33 * return NULL
34 *
35 * FIXME: we need to handle mmio matches here too
36 */
37
38static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char *name)
39{
40 int h;
41 ide_hwif_t *hwif;
42
43 /*
44 * Look for a hwif with matching io_base default value.
45 * If chipset is "ide_unknown", then claim that hwif slot.
46 * Otherwise, some other chipset has already claimed it.. :(
47 */
48 for (h = 0; h < MAX_HWIFS; ++h) {
49 hwif = &ide_hwifs[h];
50 if (hwif->io_ports[IDE_DATA_OFFSET] == io_base) {
51 if (hwif->chipset == ide_unknown)
52 return hwif; /* match */
53 printk(KERN_ERR "%s: port 0x%04lx already claimed by %s\n",
54 name, io_base, hwif->name);
55 return NULL; /* already claimed */
56 }
57 }
58 /*
59 * Okay, there is no hwif matching our io_base,
60 * so we'll just claim an unassigned slot.
61 * Give preference to claiming other slots before claiming ide0/ide1,
62 * just in case there's another interface yet-to-be-scanned
63 * which uses ports 1f0/170 (the ide0/ide1 defaults).
64 *
65 * Unless there is a bootable card that does not use the standard
66 * ports 1f0/170 (the ide0/ide1 defaults). The (bootable) flag.
67 */
68 if (bootable) {
69 for (h = 0; h < MAX_HWIFS; ++h) {
70 hwif = &ide_hwifs[h];
71 if (hwif->chipset == ide_unknown)
72 return hwif; /* pick an unused entry */
73 }
74 } else {
75 for (h = 2; h < MAX_HWIFS; ++h) {
76 hwif = ide_hwifs + h;
77 if (hwif->chipset == ide_unknown)
78 return hwif; /* pick an unused entry */
79 }
80 }
81 for (h = 0; h < 2 && h < MAX_HWIFS; ++h) {
82 hwif = ide_hwifs + h;
83 if (hwif->chipset == ide_unknown)
84 return hwif; /* pick an unused entry */
85 }
86 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", name);
87 return NULL;
88}
89
90/** 23/**
91 * ide_setup_pci_baseregs - place a PCI IDE controller native 24 * ide_setup_pci_baseregs - place a PCI IDE controller native
92 * @dev: PCI device of interface to switch native 25 * @dev: PCI device of interface to switch native
@@ -94,13 +27,13 @@ static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char
94 * 27 *
95 * We attempt to place the PCI interface into PCI native mode. If 28 * We attempt to place the PCI interface into PCI native mode. If
96 * we succeed the BARs are ok and the controller is in PCI mode. 29 * we succeed the BARs are ok and the controller is in PCI mode.
97 * Returns 0 on success or an errno code. 30 * Returns 0 on success or an errno code.
98 * 31 *
99 * FIXME: if we program the interface and then fail to set the BARS 32 * FIXME: if we program the interface and then fail to set the BARS
100 * we don't switch it back to legacy mode. Do we actually care ?? 33 * we don't switch it back to legacy mode. Do we actually care ??
101 */ 34 */
102 35
103static int ide_setup_pci_baseregs (struct pci_dev *dev, const char *name) 36static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
104{ 37{
105 u8 progif = 0; 38 u8 progif = 0;
106 39
@@ -139,16 +72,16 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
139} 72}
140 73
141/** 74/**
142 * ide_get_or_set_dma_base - setup BMIBA 75 * ide_pci_dma_base - setup BMIBA
143 * @d: IDE port info
144 * @hwif: IDE interface 76 * @hwif: IDE interface
77 * @d: IDE port info
145 * 78 *
146 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. 79 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
147 * Where a device has a partner that is already in DMA mode we check 80 * Where a device has a partner that is already in DMA mode we check
148 * and enforce IDE simplex rules. 81 * and enforce IDE simplex rules.
149 */ 82 */
150 83
151static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_hwif_t *hwif) 84unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
152{ 85{
153 struct pci_dev *dev = to_pci_dev(hwif->dev); 86 struct pci_dev *dev = to_pci_dev(hwif->dev);
154 unsigned long dma_base = 0; 87 unsigned long dma_base = 0;
@@ -199,6 +132,31 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
199out: 132out:
200 return dma_base; 133 return dma_base;
201} 134}
135EXPORT_SYMBOL_GPL(ide_pci_dma_base);
136
137/*
138 * Set up BM-DMA capability (PnP BIOS should have done this)
139 */
140int ide_pci_set_master(struct pci_dev *dev, const char *name)
141{
142 u16 pcicmd;
143
144 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
145
146 if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
147 pci_set_master(dev);
148
149 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
150 (pcicmd & PCI_COMMAND_MASTER) == 0) {
151 printk(KERN_ERR "%s: error updating PCICMD on %s\n",
152 name, pci_name(dev));
153 return -EIO;
154 }
155 }
156
157 return 0;
158}
159EXPORT_SYMBOL_GPL(ide_pci_set_master);
202#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 160#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
203 161
204void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d) 162void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
@@ -207,7 +165,6 @@ void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
207 " PCI slot %s\n", d->name, dev->vendor, dev->device, 165 " PCI slot %s\n", d->name, dev->vendor, dev->device,
208 dev->revision, pci_name(dev)); 166 dev->revision, pci_name(dev));
209} 167}
210
211EXPORT_SYMBOL_GPL(ide_setup_pci_noise); 168EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
212 169
213 170
@@ -220,13 +177,13 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
220 * but if that fails then we only need IO space. The PCI code should 177 * but if that fails then we only need IO space. The PCI code should
221 * have setup the proper resources for us already for controllers in 178 * have setup the proper resources for us already for controllers in
222 * legacy mode. 179 * legacy mode.
223 * 180 *
224 * Returns zero on success or an error code 181 * Returns zero on success or an error code
225 */ 182 */
226 183
227static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) 184static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
228{ 185{
229 int ret; 186 int ret, bars;
230 187
231 if (pci_enable_device(dev)) { 188 if (pci_enable_device(dev)) {
232 ret = pci_enable_device_io(dev); 189 ret = pci_enable_device_io(dev);
@@ -249,13 +206,21 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
249 goto out; 206 goto out;
250 } 207 }
251 208
252 /* FIXME: Temporary - until we put in the hotplug interface logic 209 if (d->host_flags & IDE_HFLAG_SINGLE)
253 Check that the bits we want are not in use by someone else. */ 210 bars = (1 << 2) - 1;
254 ret = pci_request_region(dev, 4, "ide_tmp"); 211 else
255 if (ret < 0) 212 bars = (1 << 4) - 1;
256 goto out; 213
214 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
215 if (d->host_flags & IDE_HFLAG_CS5520)
216 bars |= (1 << 2);
217 else
218 bars |= (1 << 4);
219 }
257 220
258 pci_release_region(dev, 4); 221 ret = pci_request_selected_regions(dev, bars, d->name);
222 if (ret < 0)
223 printk(KERN_ERR "%s: can't reserve resources\n", d->name);
259out: 224out:
260 return ret; 225 return ret;
261} 226}
@@ -279,8 +244,8 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
279 * Maybe the user deliberately *disabled* the device, 244 * Maybe the user deliberately *disabled* the device,
280 * but we'll eventually ignore it again if no drives respond. 245 * but we'll eventually ignore it again if no drives respond.
281 */ 246 */
282 if (ide_setup_pci_baseregs(dev, d->name) || pci_write_config_word(dev, PCI_COMMAND, pcicmd|PCI_COMMAND_IO)) 247 if (ide_setup_pci_baseregs(dev, d->name) ||
283 { 248 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
284 printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name); 249 printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name);
285 return -ENODEV; 250 return -ENODEV;
286 } 251 }
@@ -301,26 +266,24 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
301 * @d: IDE port info 266 * @d: IDE port info
302 * @bar: BAR number 267 * @bar: BAR number
303 * 268 *
304 * Checks if a BAR is configured and points to MMIO space. If so 269 * Checks if a BAR is configured and points to MMIO space. If so,
305 * print an error and return an error code. Otherwise return 0 270 * return an error code. Otherwise return 0
306 */ 271 */
307 272
308static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d, int bar) 273static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
274 int bar)
309{ 275{
310 ulong flags = pci_resource_flags(dev, bar); 276 ulong flags = pci_resource_flags(dev, bar);
311 277
312 /* Unconfigured ? */ 278 /* Unconfigured ? */
313 if (!flags || pci_resource_len(dev, bar) == 0) 279 if (!flags || pci_resource_len(dev, bar) == 0)
314 return 0; 280 return 0;
315 281
316 /* I/O space */ 282 /* I/O space */
317 if(flags & PCI_BASE_ADDRESS_IO_MASK) 283 if (flags & IORESOURCE_IO)
318 return 0; 284 return 0;
319 285
320 /* Bad */ 286 /* Bad */
321 printk(KERN_ERR "%s: IO baseregs (BIOS) are reported "
322 "as MEM, report to "
323 "<andre@linux-ide.org>.\n", d->name);
324 return -EINVAL; 287 return -EINVAL;
325} 288}
326 289
@@ -344,14 +307,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
344{ 307{
345 unsigned long ctl = 0, base = 0; 308 unsigned long ctl = 0, base = 0;
346 ide_hwif_t *hwif; 309 ide_hwif_t *hwif;
347 u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
348 struct hw_regs_s hw; 310 struct hw_regs_s hw;
349 311
350 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { 312 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
351 /* Possibly we should fail if these checks report true */ 313 if (ide_pci_check_iomem(dev, d, 2 * port) ||
352 ide_pci_check_iomem(dev, d, 2*port); 314 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
353 ide_pci_check_iomem(dev, d, 2*port+1); 315 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
354 316 "as MEM for port %d!\n", d->name, port);
317 return NULL;
318 }
319
355 ctl = pci_resource_start(dev, 2*port+1); 320 ctl = pci_resource_start(dev, 2*port+1);
356 base = pci_resource_start(dev, 2*port); 321 base = pci_resource_start(dev, 2*port);
357 if ((ctl && !base) || (base && !ctl)) { 322 if ((ctl && !base) || (base && !ctl)) {
@@ -360,14 +325,18 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
360 return NULL; 325 return NULL;
361 } 326 }
362 } 327 }
363 if (!ctl) 328 if (!ctl) {
364 {
365 /* Use default values */ 329 /* Use default values */
366 ctl = port ? 0x374 : 0x3f4; 330 ctl = port ? 0x374 : 0x3f4;
367 base = port ? 0x170 : 0x1f0; 331 base = port ? 0x170 : 0x1f0;
368 } 332 }
369 if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL) 333
370 return NULL; /* no room in ide_hwifs[] */ 334 hwif = ide_find_port_slot(d);
335 if (hwif == NULL) {
336 printk(KERN_ERR "%s: too many IDE interfaces, no room in "
337 "table\n", d->name);
338 return NULL;
339 }
371 340
372 memset(&hw, 0, sizeof(hw)); 341 memset(&hw, 0, sizeof(hw));
373 hw.irq = irq; 342 hw.irq = irq;
@@ -378,7 +347,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
378 ide_init_port_hw(hwif, &hw); 347 ide_init_port_hw(hwif, &hw);
379 348
380 hwif->dev = &dev->dev; 349 hwif->dev = &dev->dev;
381 hwif->cds = d;
382 350
383 return hwif; 351 return hwif;
384} 352}
@@ -394,40 +362,33 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
394 * state 362 * state
395 */ 363 */
396 364
397void ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) 365int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
398{ 366{
399 struct pci_dev *dev = to_pci_dev(hwif->dev); 367 struct pci_dev *dev = to_pci_dev(hwif->dev);
400 u16 pcicmd;
401
402 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
403 368
404 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 || 369 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
405 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && 370 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
406 (dev->class & 0x80))) { 371 (dev->class & 0x80))) {
407 unsigned long dma_base = ide_get_or_set_dma_base(d, hwif); 372 unsigned long base = ide_pci_dma_base(hwif, d);
408 if (dma_base && !(pcicmd & PCI_COMMAND_MASTER)) { 373
409 /* 374 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
410 * Set up BM-DMA capability 375 return -1;
411 * (PnP BIOS should have done this) 376
412 */ 377 if (hwif->mmio)
413 pci_set_master(dev); 378 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
414 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) || !(pcicmd & PCI_COMMAND_MASTER)) { 379 else
415 printk(KERN_ERR "%s: %s error updating PCICMD\n", 380 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
416 hwif->name, d->name); 381 hwif->name, base, base + 7);
417 dma_base = 0; 382
418 } 383 hwif->extra_base = base + (hwif->channel ? 8 : 16);
419 } 384
420 if (dma_base) { 385 if (ide_allocate_dma_engine(hwif))
421 if (d->init_dma) { 386 return -1;
422 d->init_dma(hwif, dma_base); 387
423 } else { 388 ide_setup_dma(hwif, base);
424 ide_setup_dma(hwif, dma_base);
425 }
426 } else {
427 printk(KERN_INFO "%s: %s Bus-Master DMA disabled "
428 "(BIOS)\n", hwif->name, d->name);
429 }
430 } 389 }
390
391 return 0;
431} 392}
432#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 393#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
433 394
@@ -514,7 +475,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
514 *(idx + port) = hwif->index; 475 *(idx + port) = hwif->index;
515 } 476 }
516} 477}
517
518EXPORT_SYMBOL_GPL(ide_pci_setup_ports); 478EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
519 479
520/* 480/*
@@ -597,7 +557,6 @@ int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
597 557
598 return ret; 558 return ret;
599} 559}
600
601EXPORT_SYMBOL_GPL(ide_setup_pci_device); 560EXPORT_SYMBOL_GPL(ide_setup_pci_device);
602 561
603int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2, 562int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
@@ -621,5 +580,4 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
621out: 580out:
622 return ret; 581 return ret;
623} 582}
624
625EXPORT_SYMBOL_GPL(ide_setup_pci_devices); 583EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0d13fe0a260b..3d6d9461c31d 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -160,6 +160,7 @@ struct ehca_qp {
160 }; 160 };
161 u32 qp_type; 161 u32 qp_type;
162 enum ehca_ext_qp_type ext_type; 162 enum ehca_ext_qp_type ext_type;
163 enum ib_qp_state state;
163 struct ipz_queue ipz_squeue; 164 struct ipz_queue ipz_squeue;
164 struct ipz_queue ipz_rqueue; 165 struct ipz_queue ipz_rqueue;
165 struct h_galpas galpas; 166 struct h_galpas galpas;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index b5ca94c6b8d9..ca5eb0cb628c 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
633 unsigned long flags; 633 unsigned long flags;
634 634
635 WARN_ON_ONCE(!in_interrupt()); 635 WARN_ON_ONCE(!in_interrupt());
636 if (ehca_debug_level) 636 if (ehca_debug_level >= 3)
637 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 637 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
638 638
639 spin_lock_irqsave(&pool->last_cpu_lock, flags); 639 spin_lock_irqsave(&pool->last_cpu_lock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 65b3362cdb9b..65048976198c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -50,7 +50,7 @@
50#include "ehca_tools.h" 50#include "ehca_tools.h"
51#include "hcp_if.h" 51#include "hcp_if.h"
52 52
53#define HCAD_VERSION "0025" 53#define HCAD_VERSION "0026"
54 54
55MODULE_LICENSE("Dual BSD/GPL"); 55MODULE_LICENSE("Dual BSD/GPL");
56MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 56MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
60static int ehca_open_aqp1 = 0; 60static int ehca_open_aqp1 = 0;
61static int ehca_hw_level = 0; 61static int ehca_hw_level = 0;
62static int ehca_poll_all_eqs = 1; 62static int ehca_poll_all_eqs = 1;
63static int ehca_mr_largepage = 1;
64 63
65int ehca_debug_level = 0; 64int ehca_debug_level = 0;
66int ehca_nr_ports = 2; 65int ehca_nr_ports = 2;
@@ -70,45 +69,40 @@ int ehca_static_rate = -1;
70int ehca_scaling_code = 0; 69int ehca_scaling_code = 0;
71int ehca_lock_hcalls = -1; 70int ehca_lock_hcalls = -1;
72 71
73module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); 72module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
74module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); 73module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
75module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); 74module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
76module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); 75module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
77module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); 76module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
78module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); 77module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
79module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); 78module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
80module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); 79module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
81module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); 80module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
82module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO);
83module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); 81module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
84 82
85MODULE_PARM_DESC(open_aqp1, 83MODULE_PARM_DESC(open_aqp1,
86 "AQP1 on startup (0: no (default), 1: yes)"); 84 "Open AQP1 on startup (default: no)");
87MODULE_PARM_DESC(debug_level, 85MODULE_PARM_DESC(debug_level,
88 "debug level" 86 "Amount of debug output (0: none (default), 1: traces, "
89 " (0: no debug traces (default), 1: with debug traces)"); 87 "2: some dumps, 3: lots)");
90MODULE_PARM_DESC(hw_level, 88MODULE_PARM_DESC(hw_level,
91 "hardware level" 89 "Hardware level (0: autosensing (default), "
92 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 90 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
93MODULE_PARM_DESC(nr_ports, 91MODULE_PARM_DESC(nr_ports,
94 "number of connected ports (-1: autodetect, 1: port one only, " 92 "number of connected ports (-1: autodetect, 1: port one only, "
95 "2: two ports (default)"); 93 "2: two ports (default)");
96MODULE_PARM_DESC(use_hp_mr, 94MODULE_PARM_DESC(use_hp_mr,
97 "high performance MRs (0: no (default), 1: yes)"); 95 "Use high performance MRs (default: no)");
98MODULE_PARM_DESC(port_act_time, 96MODULE_PARM_DESC(port_act_time,
99 "time to wait for port activation (default: 30 sec)"); 97 "Time to wait for port activation (default: 30 sec)");
100MODULE_PARM_DESC(poll_all_eqs, 98MODULE_PARM_DESC(poll_all_eqs,
101 "polls all event queues periodically" 99 "Poll all event queues periodically (default: yes)");
102 " (0: no, 1: yes (default))");
103MODULE_PARM_DESC(static_rate, 100MODULE_PARM_DESC(static_rate,
104 "set permanent static rate (default: disabled)"); 101 "Set permanent static rate (default: no static rate)");
105MODULE_PARM_DESC(scaling_code, 102MODULE_PARM_DESC(scaling_code,
106 "set scaling code (0: disabled/default, 1: enabled)"); 103 "Enable scaling code (default: no)");
107MODULE_PARM_DESC(mr_largepage,
108 "use large page for MR (0: use PAGE_SIZE (default), "
109 "1: use large page depending on MR size");
110MODULE_PARM_DESC(lock_hcalls, 104MODULE_PARM_DESC(lock_hcalls,
111 "serialize all hCalls made by the driver " 105 "Serialize all hCalls made by the driver "
112 "(default: autodetect)"); 106 "(default: autodetect)");
113 107
114DEFINE_RWLOCK(ehca_qp_idr_lock); 108DEFINE_RWLOCK(ehca_qp_idr_lock);
@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
275 u64 h_ret; 269 u64 h_ret;
276 struct hipz_query_hca *rblock; 270 struct hipz_query_hca *rblock;
277 struct hipz_query_port *port; 271 struct hipz_query_port *port;
272 const char *loc_code;
278 273
279 static const u32 pgsize_map[] = { 274 static const u32 pgsize_map[] = {
280 HCA_CAP_MR_PGSIZE_4K, 0x1000, 275 HCA_CAP_MR_PGSIZE_4K, 0x1000,
@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
283 HCA_CAP_MR_PGSIZE_16M, 0x1000000, 278 HCA_CAP_MR_PGSIZE_16M, 0x1000000,
284 }; 279 };
285 280
281 ehca_gen_dbg("Probing adapter %s...",
282 shca->ofdev->node->full_name);
283 loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
284 if (loc_code)
285 ehca_gen_dbg(" ... location lode=%s", loc_code);
286
286 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 287 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
287 if (!rblock) { 288 if (!rblock) {
288 ehca_gen_err("Cannot allocate rblock memory."); 289 ehca_gen_err("Cannot allocate rblock memory.");
@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
350 351
351 /* translate supported MR page sizes; always support 4K */ 352 /* translate supported MR page sizes; always support 4K */
352 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; 353 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
353 if (ehca_mr_largepage) { /* support extra sizes only if enabled */ 354 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
354 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) 355 if (rblock->memory_page_size_supported & pgsize_map[i])
355 if (rblock->memory_page_size_supported & pgsize_map[i]) 356 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
356 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
357 }
358 357
359 /* query max MTU from first port -- it's the same for all ports */ 358 /* query max MTU from first port -- it's the same for all ports */
360 port = (struct hipz_query_port *)rblock; 359 port = (struct hipz_query_port *)rblock;
@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
567 566
568static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) 567static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
569{ 568{
570 return snprintf(buf, PAGE_SIZE, "%d\n", 569 return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
571 ehca_debug_level);
572} 570}
573 571
574static ssize_t ehca_store_debug_level(struct device_driver *ddp, 572static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
657} 655}
658static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 656static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
659 657
660static ssize_t ehca_show_mr_largepage(struct device *dev,
661 struct device_attribute *attr,
662 char *buf)
663{
664 return sprintf(buf, "%d\n", ehca_mr_largepage);
665}
666static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
667
668static struct attribute *ehca_dev_attrs[] = { 658static struct attribute *ehca_dev_attrs[] = {
669 &dev_attr_adapter_handle.attr, 659 &dev_attr_adapter_handle.attr,
670 &dev_attr_num_ports.attr, 660 &dev_attr_num_ports.attr,
@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = {
681 &dev_attr_cur_mw.attr, 671 &dev_attr_cur_mw.attr,
682 &dev_attr_max_pd.attr, 672 &dev_attr_max_pd.attr,
683 &dev_attr_max_ah.attr, 673 &dev_attr_max_ah.attr,
684 &dev_attr_mr_largepage.attr,
685 NULL 674 NULL
686}; 675};
687 676
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f26997fc00f8..46ae4eb2c4e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1794 int t; 1794 int t;
1795 for (t = start_idx; t <= end_idx; t++) { 1795 for (t = start_idx; t <= end_idx; t++) {
1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1797 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1797 if (ehca_debug_level >= 3)
1798 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1798 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1799 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1800 ehca_gen_err("uncontiguous page found pgaddr=%lx " 1801 ehca_gen_err("uncontiguous page found pgaddr=%lx "
1801 "prev_pgaddr=%lx page_list_i=%x", 1802 "prev_pgaddr=%lx page_list_i=%x",
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1862 pgaddr & 1863 pgaddr &
1863 ~(pginfo->hwpage_size - 1)); 1864 ~(pginfo->hwpage_size - 1));
1864 } 1865 }
1865 ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1866 if (ehca_debug_level >= 3) {
1866 "value=%016lx", *kpage, pgaddr, 1867 u64 val = *(u64 *)abs_to_virt(
1867 *(u64 *)abs_to_virt( 1868 phys_to_abs(pgaddr));
1868 phys_to_abs(pgaddr))); 1869 ehca_gen_dbg("kpage=%lx chunk_page=%lx "
1870 "value=%016lx",
1871 *kpage, pgaddr, val);
1872 }
1869 prev_pgaddr = pgaddr; 1873 prev_pgaddr = pgaddr;
1870 i++; 1874 i++;
1871 pginfo->kpage_cnt++; 1875 pginfo->kpage_cnt++;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 3eb14a52cbf2..57bef1152cc2 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp(
550 spin_lock_init(&my_qp->spinlock_r); 550 spin_lock_init(&my_qp->spinlock_r);
551 my_qp->qp_type = qp_type; 551 my_qp->qp_type = qp_type;
552 my_qp->ext_type = parms.ext_type; 552 my_qp->ext_type = parms.ext_type;
553 my_qp->state = IB_QPS_RESET;
553 554
554 if (init_attr->recv_cq) 555 if (init_attr->recv_cq)
555 my_qp->recv_cq = 556 my_qp->recv_cq =
@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
965 qp_num, bad_send_wqe_p); 966 qp_num, bad_send_wqe_p);
966 /* convert wqe pointer to vadr */ 967 /* convert wqe pointer to vadr */
967 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); 968 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
968 if (ehca_debug_level) 969 if (ehca_debug_level >= 2)
969 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 970 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
970 squeue = &my_qp->ipz_squeue; 971 squeue = &my_qp->ipz_squeue;
971 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { 972 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
978 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 979 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
979 *bad_wqe_cnt = 0; 980 *bad_wqe_cnt = 0;
980 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 981 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
981 if (ehca_debug_level) 982 if (ehca_debug_level >= 2)
982 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); 983 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
983 wqe->nr_of_data_seg = 0; /* suppress data access */ 984 wqe->nr_of_data_seg = 0; /* suppress data access */
984 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ 985 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1450 /* no support for max_send/recv_sge yet */ 1451 /* no support for max_send/recv_sge yet */
1451 } 1452 }
1452 1453
1453 if (ehca_debug_level) 1454 if (ehca_debug_level >= 2)
1454 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); 1455 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1455 1456
1456 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, 1457 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1508 if (attr_mask & IB_QP_QKEY) 1509 if (attr_mask & IB_QP_QKEY)
1509 my_qp->qkey = attr->qkey; 1510 my_qp->qkey = attr->qkey;
1510 1511
1512 my_qp->state = qp_new_state;
1513
1511modify_qp_exit2: 1514modify_qp_exit2:
1512 if (squeue_locked) { /* this means: sqe -> rts */ 1515 if (squeue_locked) { /* this means: sqe -> rts */
1513 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1516 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp,
1763 if (qp_init_attr) 1766 if (qp_init_attr)
1764 *qp_init_attr = my_qp->init_attr; 1767 *qp_init_attr = my_qp->init_attr;
1765 1768
1766 if (ehca_debug_level) 1769 if (ehca_debug_level >= 2)
1767 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1770 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
1768 1771
1769query_qp_exit1: 1772query_qp_exit1:
@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1811 goto modify_srq_exit0; 1814 goto modify_srq_exit0;
1812 } 1815 }
1813 1816
1814 if (ehca_debug_level) 1817 if (ehca_debug_level >= 2)
1815 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1818 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1816 1819
1817 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, 1820 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
1864 srq_attr->srq_limit = EHCA_BMASK_GET( 1867 srq_attr->srq_limit = EHCA_BMASK_GET(
1865 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); 1868 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
1866 1869
1867 if (ehca_debug_level) 1870 if (ehca_debug_level >= 2)
1868 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1871 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1869 1872
1870query_srq_exit1: 1873query_srq_exit1:
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index a20bbf466188..bbe0436f4f75 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
81 recv_wr->sg_list[cnt_ds].length; 81 recv_wr->sg_list[cnt_ds].length;
82 } 82 }
83 83
84 if (ehca_debug_level) { 84 if (ehca_debug_level >= 3) {
85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
86 ipz_rqueue); 86 ipz_rqueue);
87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
281 return -EINVAL; 281 return -EINVAL;
282 } 282 }
283 283
284 if (ehca_debug_level) { 284 if (ehca_debug_level >= 3) {
285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); 285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); 286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
287 } 287 }
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
421 int ret = 0; 421 int ret = 0;
422 unsigned long flags; 422 unsigned long flags;
423 423
424 if (unlikely(my_qp->state != IB_QPS_RTS)) {
425 ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
426 return -EINVAL;
427 }
428
424 /* LOCK the QUEUE */ 429 /* LOCK the QUEUE */
425 spin_lock_irqsave(&my_qp->spinlock_s, flags); 430 spin_lock_irqsave(&my_qp->spinlock_s, flags);
426 431
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
454 goto post_send_exit0; 459 goto post_send_exit0;
455 } 460 }
456 wqe_cnt++; 461 wqe_cnt++;
457 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
458 my_qp, qp->qp_num, wqe_cnt);
459 } /* eof for cur_send_wr */ 462 } /* eof for cur_send_wr */
460 463
461post_send_exit0: 464post_send_exit0:
462 iosync(); /* serialize GAL register access */ 465 iosync(); /* serialize GAL register access */
463 hipz_update_sqa(my_qp, wqe_cnt); 466 hipz_update_sqa(my_qp, wqe_cnt);
467 if (unlikely(ret || ehca_debug_level >= 2))
468 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
469 my_qp, qp->qp_num, wqe_cnt, ret);
464 my_qp->message_count += wqe_cnt; 470 my_qp->message_count += wqe_cnt;
465 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 471 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
466 return ret; 472 return ret;
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
520 goto post_recv_exit0; 526 goto post_recv_exit0;
521 } 527 }
522 wqe_cnt++; 528 wqe_cnt++;
523 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
524 my_qp, my_qp->real_qp_num, wqe_cnt);
525 } /* eof for cur_recv_wr */ 529 } /* eof for cur_recv_wr */
526 530
527post_recv_exit0: 531post_recv_exit0:
528 iosync(); /* serialize GAL register access */ 532 iosync(); /* serialize GAL register access */
529 hipz_update_rqa(my_qp, wqe_cnt); 533 hipz_update_rqa(my_qp, wqe_cnt);
534 if (unlikely(ret || ehca_debug_level >= 2))
535 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
536 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
530 spin_unlock_irqrestore(&my_qp->spinlock_r, flags); 537 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
531 return ret; 538 return ret;
532} 539}
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
570 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 577 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
571 struct ehca_cqe *cqe; 578 struct ehca_cqe *cqe;
572 struct ehca_qp *my_qp; 579 struct ehca_qp *my_qp;
573 int cqe_count = 0; 580 int cqe_count = 0, is_error;
574 581
575poll_cq_one_read_cqe: 582poll_cq_one_read_cqe:
576 cqe = (struct ehca_cqe *) 583 cqe = (struct ehca_cqe *)
577 ipz_qeit_get_inc_valid(&my_cq->ipz_queue); 584 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
578 if (!cqe) { 585 if (!cqe) {
579 ret = -EAGAIN; 586 ret = -EAGAIN;
580 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " 587 if (ehca_debug_level >= 3)
581 "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); 588 ehca_dbg(cq->device, "Completion queue is empty "
582 goto poll_cq_one_exit0; 589 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
590 goto poll_cq_one_exit0;
583 } 591 }
584 592
585 /* prevents loads being reordered across this point */ 593 /* prevents loads being reordered across this point */
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe:
609 ehca_dbg(cq->device, 617 ehca_dbg(cq->device,
610 "Got CQE with purged bit qp_num=%x src_qp=%x", 618 "Got CQE with purged bit qp_num=%x src_qp=%x",
611 cqe->local_qp_number, cqe->remote_qp_number); 619 cqe->local_qp_number, cqe->remote_qp_number);
612 if (ehca_debug_level) 620 if (ehca_debug_level >= 2)
613 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", 621 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
614 cqe->local_qp_number, 622 cqe->local_qp_number,
615 cqe->remote_qp_number); 623 cqe->remote_qp_number);
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe:
622 } 630 }
623 } 631 }
624 632
625 /* tracing cqe */ 633 is_error = cqe->status & WC_STATUS_ERROR_BIT;
626 if (unlikely(ehca_debug_level)) { 634
635 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
636 if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
627 ehca_dbg(cq->device, 637 ehca_dbg(cq->device,
628 "Received COMPLETION ehca_cq=%p cq_num=%x -----", 638 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
629 my_cq, my_cq->cq_number); 639 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
630 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", 640 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
631 my_cq, my_cq->cq_number); 641 my_cq, my_cq->cq_number);
632 ehca_dbg(cq->device, 642 ehca_dbg(cq->device,
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe:
649 /* update also queue adder to throw away this entry!!! */ 659 /* update also queue adder to throw away this entry!!! */
650 goto poll_cq_one_exit0; 660 goto poll_cq_one_exit0;
651 } 661 }
662
652 /* eval ib_wc_status */ 663 /* eval ib_wc_status */
653 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { 664 if (unlikely(is_error)) {
654 /* complete with errors */ 665 /* complete with errors */
655 map_ib_wc_status(cqe->status, &wc->status); 666 map_ib_wc_status(cqe->status, &wc->status);
656 wc->vendor_err = wc->status; 667 wc->vendor_err = wc->status;
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe:
671 wc->imm_data = cpu_to_be32(cqe->immediate_data); 682 wc->imm_data = cpu_to_be32(cqe->immediate_data);
672 wc->sl = cqe->service_level; 683 wc->sl = cqe->service_level;
673 684
674 if (unlikely(wc->status != IB_WC_SUCCESS))
675 ehca_dbg(cq->device,
676 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
677 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
678 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
679 cqe->status, cqe->local_qp_number,
680 cqe->remote_qp_number, cqe->work_request_id, cqe);
681
682poll_cq_one_exit0: 685poll_cq_one_exit0:
683 if (cqe_count > 0) 686 if (cqe_count > 0)
684 hipz_update_feca(my_cq, cqe_count); 687 hipz_update_feca(my_cq, cqe_count);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 1b07f2beafaf..e43ed8f8a0c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
211 break; 211 break;
212 212
213 case 1: /* qp rqueue_addr */ 213 case 1: /* qp rqueue_addr */
214 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", 214 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
215 qp->ib_qp.qp_num);
216 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, 215 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
217 &qp->mm_count_rqueue); 216 &qp->mm_count_rqueue);
218 if (unlikely(ret)) { 217 if (unlikely(ret)) {
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
224 break; 223 break;
225 224
226 case 2: /* qp squeue_addr */ 225 case 2: /* qp squeue_addr */
227 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", 226 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
228 qp->ib_qp.qp_num);
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, 227 ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
230 &qp->mm_count_squeue); 228 &qp->mm_count_squeue);
231 if (unlikely(ret)) { 229 if (unlikely(ret)) {
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7029aa653751..5245e13c3a30 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
123 int i, sleep_msecs; 123 int i, sleep_msecs;
124 unsigned long flags = 0; 124 unsigned long flags = 0;
125 125
126 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, 126 if (unlikely(ehca_debug_level >= 2))
127 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); 127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
128 129
129 for (i = 0; i < 5; i++) { 130 for (i = 0; i < 5; i++) {
130 /* serialize hCalls to work around firmware issue */ 131 /* serialize hCalls to work around firmware issue */
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
148 opcode, ret, arg1, arg2, arg3, 149 opcode, ret, arg1, arg2, arg3,
149 arg4, arg5, arg6, arg7); 150 arg4, arg5, arg6, arg7);
150 else 151 else
151 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); 152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
152 154
153 return ret; 155 return ret;
154 } 156 }
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
172 int i, sleep_msecs; 174 int i, sleep_msecs;
173 unsigned long flags = 0; 175 unsigned long flags = 0;
174 176
175 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, 177 if (unlikely(ehca_debug_level >= 2))
176 arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); 178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
177 181
178 for (i = 0; i < 5; i++) { 182 for (i = 0; i < 5; i++) {
179 /* serialize hCalls to work around firmware issue */ 183 /* serialize hCalls to work around firmware issue */
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
201 ret, outs[0], outs[1], outs[2], outs[3], 205 ret, outs[0], outs[1], outs[2], outs[3],
202 outs[4], outs[5], outs[6], outs[7], 206 outs[4], outs[5], outs[6], outs[7],
203 outs[8]); 207 outs[8]);
204 } else 208 } else if (unlikely(ehca_debug_level >= 2))
205 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, 209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
206 ret, outs[0], outs[1], outs[2], outs[3], 210 ret, outs[0], outs[1], outs[2], outs[3],
207 outs[4], outs[5], outs[6], outs[7], 211 outs[4], outs[5], outs[6], outs[7],
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
381 r_cb, /* r6 */ 385 r_cb, /* r6 */
382 0, 0, 0, 0); 386 0, 0, 0, 0);
383 387
384 if (ehca_debug_level) 388 if (ehca_debug_level >= 2)
385 ehca_dmp(query_port_response_block, 64, "response_block"); 389 ehca_dmp(query_port_response_block, 64, "response_block");
386 390
387 return ret; 391 return ret;
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
731 u64 ret; 735 u64 ret;
732 u64 outs[PLPAR_HCALL9_BUFSIZE]; 736 u64 outs[PLPAR_HCALL9_BUFSIZE];
733 737
734 ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
735 "vaddr=%lx length=%lx",
736 (u32)PAGE_SIZE, access_ctrl, vaddr, length);
737 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
738 adapter_handle.handle, /* r4 */ 739 adapter_handle.handle, /* r4 */
739 5, /* r5 */ 740 5, /* r5 */
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
758{ 759{
759 u64 ret; 760 u64 ret;
760 761
761 if (unlikely(ehca_debug_level >= 2)) { 762 if (unlikely(ehca_debug_level >= 3)) {
762 if (count > 1) { 763 if (count > 1) {
763 u64 *kpage; 764 u64 *kpage;
764 int i; 765 int i;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 3557e7edc9b6..5e570bb0bb6f 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
204 204
205 uar = &to_mucontext(context)->uar; 205 uar = &to_mucontext(context)->uar;
206 } else { 206 } else {
207 err = mlx4_ib_db_alloc(dev, &cq->db, 1); 207 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
208 if (err) 208 if (err)
209 goto err_cq; 209 goto err_cq;
210 210
@@ -250,7 +250,7 @@ err_mtt:
250 250
251err_db: 251err_db:
252 if (!context) 252 if (!context)
253 mlx4_ib_db_free(dev, &cq->db); 253 mlx4_db_free(dev->dev, &cq->db);
254 254
255err_cq: 255err_cq:
256 kfree(cq); 256 kfree(cq);
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
435 ib_umem_release(mcq->umem); 435 ib_umem_release(mcq->umem);
436 } else { 436 } else {
437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
438 mlx4_ib_db_free(dev, &mcq->db); 438 mlx4_db_free(dev->dev, &mcq->db);
439 } 439 }
440 440
441 kfree(mcq); 441 kfree(mcq);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 1c36087aef14..8e342cc9baec 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -34,124 +34,6 @@
34 34
35#include "mlx4_ib.h" 35#include "mlx4_ib.h"
36 36
37struct mlx4_ib_db_pgdir {
38 struct list_head list;
39 DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
40 DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
41 unsigned long *bits[2];
42 __be32 *db_page;
43 dma_addr_t db_dma;
44};
45
46static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
47{
48 struct mlx4_ib_db_pgdir *pgdir;
49
50 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
51 if (!pgdir)
52 return NULL;
53
54 bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
55 pgdir->bits[0] = pgdir->order0;
56 pgdir->bits[1] = pgdir->order1;
57 pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
58 PAGE_SIZE, &pgdir->db_dma,
59 GFP_KERNEL);
60 if (!pgdir->db_page) {
61 kfree(pgdir);
62 return NULL;
63 }
64
65 return pgdir;
66}
67
68static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
69 struct mlx4_ib_db *db, int order)
70{
71 int o;
72 int i;
73
74 for (o = order; o <= 1; ++o) {
75 i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
76 if (i < MLX4_IB_DB_PER_PAGE >> o)
77 goto found;
78 }
79
80 return -ENOMEM;
81
82found:
83 clear_bit(i, pgdir->bits[o]);
84
85 i <<= o;
86
87 if (o > order)
88 set_bit(i ^ 1, pgdir->bits[order]);
89
90 db->u.pgdir = pgdir;
91 db->index = i;
92 db->db = pgdir->db_page + db->index;
93 db->dma = pgdir->db_dma + db->index * 4;
94 db->order = order;
95
96 return 0;
97}
98
99int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
100{
101 struct mlx4_ib_db_pgdir *pgdir;
102 int ret = 0;
103
104 mutex_lock(&dev->pgdir_mutex);
105
106 list_for_each_entry(pgdir, &dev->pgdir_list, list)
107 if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
108 goto out;
109
110 pgdir = mlx4_ib_alloc_db_pgdir(dev);
111 if (!pgdir) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 list_add(&pgdir->list, &dev->pgdir_list);
117
118 /* This should never fail -- we just allocated an empty page: */
119 WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
120
121out:
122 mutex_unlock(&dev->pgdir_mutex);
123
124 return ret;
125}
126
127void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
128{
129 int o;
130 int i;
131
132 mutex_lock(&dev->pgdir_mutex);
133
134 o = db->order;
135 i = db->index;
136
137 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
138 clear_bit(i ^ 1, db->u.pgdir->order0);
139 ++o;
140 }
141
142 i >>= o;
143 set_bit(i, db->u.pgdir->bits[o]);
144
145 if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
146 dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
147 db->u.pgdir->db_page, db->u.pgdir->db_dma);
148 list_del(&db->u.pgdir->list);
149 kfree(db->u.pgdir);
150 }
151
152 mutex_unlock(&dev->pgdir_mutex);
153}
154
155struct mlx4_ib_user_db_page { 37struct mlx4_ib_user_db_page {
156 struct list_head list; 38 struct list_head list;
157 struct ib_umem *umem; 39 struct ib_umem *umem;
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
160}; 42};
161 43
162int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 44int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
163 struct mlx4_ib_db *db) 45 struct mlx4_db *db)
164{ 46{
165 struct mlx4_ib_user_db_page *page; 47 struct mlx4_ib_user_db_page *page;
166 struct ib_umem_chunk *chunk; 48 struct ib_umem_chunk *chunk;
@@ -202,7 +84,7 @@ out:
202 return err; 84 return err;
203} 85}
204 86
205void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) 87void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
206{ 88{
207 mutex_lock(&context->db_page_mutex); 89 mutex_lock(&context->db_page_mutex);
208 90
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4d9b5ac42202..4d61e32866c6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
557 goto err_uar; 557 goto err_uar;
558 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 558 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
559 559
560 INIT_LIST_HEAD(&ibdev->pgdir_list);
561 mutex_init(&ibdev->pgdir_mutex);
562
563 ibdev->dev = dev; 560 ibdev->dev = dev;
564 561
565 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 562 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 9e637323c155..5cf994794d25 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -43,24 +43,6 @@
43#include <linux/mlx4/device.h> 43#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 44#include <linux/mlx4/doorbell.h>
45 45
46enum {
47 MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
48};
49
50struct mlx4_ib_db_pgdir;
51struct mlx4_ib_user_db_page;
52
53struct mlx4_ib_db {
54 __be32 *db;
55 union {
56 struct mlx4_ib_db_pgdir *pgdir;
57 struct mlx4_ib_user_db_page *user_page;
58 } u;
59 dma_addr_t dma;
60 int index;
61 int order;
62};
63
64struct mlx4_ib_ucontext { 46struct mlx4_ib_ucontext {
65 struct ib_ucontext ibucontext; 47 struct ib_ucontext ibucontext;
66 struct mlx4_uar uar; 48 struct mlx4_uar uar;
@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
88 struct mlx4_cq mcq; 70 struct mlx4_cq mcq;
89 struct mlx4_ib_cq_buf buf; 71 struct mlx4_ib_cq_buf buf;
90 struct mlx4_ib_cq_resize *resize_buf; 72 struct mlx4_ib_cq_resize *resize_buf;
91 struct mlx4_ib_db db; 73 struct mlx4_db db;
92 spinlock_t lock; 74 spinlock_t lock;
93 struct mutex resize_mutex; 75 struct mutex resize_mutex;
94 struct ib_umem *umem; 76 struct ib_umem *umem;
@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
127 struct mlx4_qp mqp; 109 struct mlx4_qp mqp;
128 struct mlx4_buf buf; 110 struct mlx4_buf buf;
129 111
130 struct mlx4_ib_db db; 112 struct mlx4_db db;
131 struct mlx4_ib_wq rq; 113 struct mlx4_ib_wq rq;
132 114
133 u32 doorbell_qpn; 115 u32 doorbell_qpn;
@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
154 struct ib_srq ibsrq; 136 struct ib_srq ibsrq;
155 struct mlx4_srq msrq; 137 struct mlx4_srq msrq;
156 struct mlx4_buf buf; 138 struct mlx4_buf buf;
157 struct mlx4_ib_db db; 139 struct mlx4_db db;
158 u64 *wrid; 140 u64 *wrid;
159 spinlock_t lock; 141 spinlock_t lock;
160 int head; 142 int head;
@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
175 struct mlx4_dev *dev; 157 struct mlx4_dev *dev;
176 void __iomem *uar_map; 158 void __iomem *uar_map;
177 159
178 struct list_head pgdir_list;
179 struct mutex pgdir_mutex;
180
181 struct mlx4_uar priv_uar; 160 struct mlx4_uar priv_uar;
182 u32 priv_pdn; 161 u32 priv_pdn;
183 MLX4_DECLARE_DOORBELL_LOCK(uar_lock); 162 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
248 return container_of(ibah, struct mlx4_ib_ah, ibah); 227 return container_of(ibah, struct mlx4_ib_ah, ibah);
249} 228}
250 229
251int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
252void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
253int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 230int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
254 struct mlx4_ib_db *db); 231 struct mlx4_db *db);
255void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); 232void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
256 233
257struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); 234struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
258int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, 235int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b75efae7e449..80ea8b9e7761 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
514 goto err; 514 goto err;
515 515
516 if (!init_attr->srq) { 516 if (!init_attr->srq) {
517 err = mlx4_ib_db_alloc(dev, &qp->db, 0); 517 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
518 if (err) 518 if (err)
519 goto err; 519 goto err;
520 520
@@ -580,7 +580,7 @@ err_buf:
580 580
581err_db: 581err_db:
582 if (!pd->uobject && !init_attr->srq) 582 if (!pd->uobject && !init_attr->srq)
583 mlx4_ib_db_free(dev, &qp->db); 583 mlx4_db_free(dev->dev, &qp->db);
584 584
585err: 585err:
586 return err; 586 return err;
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
666 kfree(qp->rq.wrid); 666 kfree(qp->rq.wrid);
667 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 667 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
668 if (!qp->ibqp.srq) 668 if (!qp->ibqp.srq)
669 mlx4_ib_db_free(dev, &qp->db); 669 mlx4_db_free(dev->dev, &qp->db);
670 } 670 }
671} 671}
672 672
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index beaa3b06cf58..204619702f9d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
129 if (err) 129 if (err)
130 goto err_mtt; 130 goto err_mtt;
131 } else { 131 } else {
132 err = mlx4_ib_db_alloc(dev, &srq->db, 0); 132 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
133 if (err) 133 if (err)
134 goto err_srq; 134 goto err_srq;
135 135
@@ -200,7 +200,7 @@ err_buf:
200 200
201err_db: 201err_db:
202 if (!pd->uobject) 202 if (!pd->uobject)
203 mlx4_ib_db_free(dev, &srq->db); 203 mlx4_db_free(dev->dev, &srq->db);
204 204
205err_srq: 205err_srq:
206 kfree(srq); 206 kfree(srq);
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
267 kfree(msrq->wrid); 267 kfree(msrq->wrid);
268 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 268 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
269 &msrq->buf); 269 &msrq->buf);
270 mlx4_ib_db_free(dev, &msrq->db); 270 mlx4_db_free(dev->dev, &msrq->db);
271 } 271 }
272 272
273 kfree(msrq); 273 kfree(msrq);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b046262ed638..a4e9269a29bd 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
139 139
140 addr = ntohl(ifa->ifa_address); 140 addr = ntohl(ifa->ifa_address);
141 mask = ntohl(ifa->ifa_mask); 141 mask = ntohl(ifa->ifa_mask);
142 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", 142 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
143 addr, mask); 143 ", netmask " NIPQUAD_FMT ".\n",
144 HIPQUAD(addr), HIPQUAD(mask));
144 list_for_each_entry(nesdev, &nes_dev_list, list) { 145 list_for_each_entry(nesdev, &nes_dev_list, list) {
145 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", 146 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
146 nesdev, nesdev->netdev[0]->name); 147 nesdev, nesdev->netdev[0]->name);
@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
353 */ 354 */
354static void nes_print_macaddr(struct net_device *netdev) 355static void nes_print_macaddr(struct net_device *netdev)
355{ 356{
356 nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", 357 DECLARE_MAC_BUF(mac);
357 netdev->name,
358 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
359 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
360 netdev->irq);
361}
362 358
359 nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
360 netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
361}
363 362
364/** 363/**
365 * nes_interrupt - handle interrupts 364 * nes_interrupt - handle interrupts
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d0738623bcf3..d940fc27129a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
852 /* get a handle on the hte */ 852 /* get a handle on the hte */
853 hte = &cm_core->connected_nodes; 853 hte = &cm_core->connected_nodes;
854 854
855 nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", 855 nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
856 loc_addr, loc_port, cm_core, hte); 856 HIPQUAD(loc_addr), loc_port, cm_core, hte);
857 857
858 /* walk list and find cm_node associated with this session ID */ 858 /* walk list and find cm_node associated with this session ID */
859 spin_lock_irqsave(&cm_core->ht_lock, flags); 859 spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
902 } 902 }
903 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 903 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
904 904
905 nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", 905 nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
906 dst_addr, dst_port); 906 HIPQUAD(dst_addr), dst_port);
907 907
908 /* no listener */ 908 /* no listener */
909 return NULL; 909 return NULL;
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1054 int arpindex = 0; 1054 int arpindex = 0;
1055 struct nes_device *nesdev; 1055 struct nes_device *nesdev;
1056 struct nes_adapter *nesadapter; 1056 struct nes_adapter *nesadapter;
1057 DECLARE_MAC_BUF(mac);
1057 1058
1058 /* create an hte and cm_node for this instance */ 1059 /* create an hte and cm_node for this instance */
1059 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); 1060 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1066 cm_node->loc_port = cm_info->loc_port; 1067 cm_node->loc_port = cm_info->loc_port;
1067 cm_node->rem_port = cm_info->rem_port; 1068 cm_node->rem_port = cm_info->rem_port;
1068 cm_node->send_write0 = send_first; 1069 cm_node->send_write0 = send_first;
1069 nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", 1070 nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
1070 cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); 1071 HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
1072 HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
1071 cm_node->listener = listener; 1073 cm_node->listener = listener;
1072 cm_node->netdev = nesvnic->netdev; 1074 cm_node->netdev = nesvnic->netdev;
1073 cm_node->cm_id = cm_info->cm_id; 1075 cm_node->cm_id = cm_info->cm_id;
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1116 1118
1117 /* copy the mac addr to node context */ 1119 /* copy the mac addr to node context */
1118 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); 1120 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
1119 nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," 1121 nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
1120 " %02x, %02x, %02x, %02x, %02x\n", 1122 print_mac(mac, cm_node->rem_mac));
1121 cm_node->rem_mac[0], cm_node->rem_mac[1],
1122 cm_node->rem_mac[2], cm_node->rem_mac[3],
1123 cm_node->rem_mac[4], cm_node->rem_mac[5]);
1124 1123
1125 add_hte_node(cm_core, cm_node); 1124 add_hte_node(cm_core, cm_node);
1126 atomic_inc(&cm_nodes_created); 1125 atomic_inc(&cm_nodes_created);
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
1850 nfo.rem_addr = ntohl(iph->saddr); 1849 nfo.rem_addr = ntohl(iph->saddr);
1851 nfo.rem_port = ntohs(tcph->source); 1850 nfo.rem_port = ntohs(tcph->source);
1852 1851
1853 nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", 1852 nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
1854 iph->daddr, tcph->dest, iph->saddr, tcph->source); 1853 ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
1854 NIPQUAD(iph->daddr), tcph->dest,
1855 NIPQUAD(iph->saddr), tcph->source);
1855 1856
1856 /* note: this call is going to increment cm_node ref count */ 1857 /* note: this call is going to increment cm_node ref count */
1857 cm_node = find_node(cm_core, 1858 cm_node = find_node(cm_core,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index aa53aab91bf8..08964cc7e98a 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
636 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); 636 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
637 return 0; 637 return 0;
638 } 638 }
639
640 i = 0;
641 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
642 mdelay(1);
643 if (i >= 10000) {
644 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
645 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
646 return 0;
647 }
639 } 648 }
640 649
641 /* port reset */ 650 /* port reset */
@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
684 } 693 }
685 } 694 }
686 695
687
688
689 i = 0;
690 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
691 mdelay(1);
692 if (i >= 10000) {
693 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
694 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
695 return 0;
696 }
697
698 return port_count; 696 return port_count;
699} 697}
700 698
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index b7e2844f096b..8f36e231bdf5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -905,7 +905,7 @@ struct nes_hw_qp {
905}; 905};
906 906
907struct nes_hw_cq { 907struct nes_hw_cq {
908 struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ 908 struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); 909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
910 dma_addr_t cq_pbase; /* PCI memory for host rings */ 910 dma_addr_t cq_pbase; /* PCI memory for host rings */
911 u16 cq_head; 911 u16 cq_head;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 01cd0effc492..e5366b013c1a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
787 int i; 787 int i;
788 u32 macaddr_low; 788 u32 macaddr_low;
789 u16 macaddr_high; 789 u16 macaddr_high;
790 DECLARE_MAC_BUF(mac);
790 791
791 if (!is_valid_ether_addr(mac_addr->sa_data)) 792 if (!is_valid_ether_addr(mac_addr->sa_data))
792 return -EADDRNOTAVAIL; 793 return -EADDRNOTAVAIL;
793 794
794 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 795 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
795 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", 796 printk(PFX "%s: Address length = %d, Address = %s\n",
796 __func__, netdev->addr_len, 797 __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
797 mac_addr->sa_data[0], mac_addr->sa_data[1],
798 mac_addr->sa_data[2], mac_addr->sa_data[3],
799 mac_addr->sa_data[4], mac_addr->sa_data[5]);
800 macaddr_high = ((u16)netdev->dev_addr[0]) << 8; 798 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
801 macaddr_high += (u16)netdev->dev_addr[1]; 799 macaddr_high += (u16)netdev->dev_addr[1];
802 macaddr_low = ((u32)netdev->dev_addr[2]) << 24; 800 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
878 if (mc_nic_index < 0) 876 if (mc_nic_index < 0)
879 mc_nic_index = nesvnic->nic_index; 877 mc_nic_index = nesvnic->nic_index;
880 if (multicast_addr) { 878 if (multicast_addr) {
881 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", 879 DECLARE_MAC_BUF(mac);
882 multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], 880 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
883 multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], 881 print_mac(mac, multicast_addr->dmi_addr),
884 multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], 882 perfect_filter_register_address+(mc_index * 8),
885 perfect_filter_register_address+(mc_index * 8), mc_nic_index); 883 mc_nic_index);
886 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; 884 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
887 macaddr_high += (u16)multicast_addr->dmi_addr[1]; 885 macaddr_high += (u16)multicast_addr->dmi_addr[1];
888 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; 886 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index f9db07c2717d..c6d5631a6995 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
660 660
661 /* DELETE or RESOLVE */ 661 /* DELETE or RESOLVE */
662 if (arp_index == nesadapter->arp_table_size) { 662 if (arp_index == nesadapter->arp_table_size) {
663 nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); 663 nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
664 HIPQUAD(ip_addr),
665 action == NES_ARP_RESOLVE ? "resolve" : "delete");
664 return -1; 666 return -1;
665 } 667 }
666 668
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f9a5d4390892..ee74f7c7a6da 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1976 1976
1977 if (nescq->cq_mem_size) 1977 if (nescq->cq_mem_size)
1978 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, 1978 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
1979 (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); 1979 nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
1980 kfree(nescq); 1980 kfree(nescq);
1981 1981
1982 return ret; 1982 return ret;
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3610 while (cqe_count < num_entries) { 3610 while (cqe_count < num_entries) {
3611 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3611 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3612 NES_CQE_VALID) { 3612 NES_CQE_VALID) {
3613 /*
3614 * Make sure we read CQ entry contents *after*
3615 * we've checked the valid bit.
3616 */
3617 rmb();
3618
3613 cqe = nescq->hw_cq.cq_vbase[head]; 3619 cqe = nescq->hw_cq.cq_vbase[head];
3614 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3620 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3615 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); 3621 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73b2b176ad0e..f1f142dc64b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,11 +56,11 @@
56/* constants */ 56/* constants */
57 57
58enum { 58enum {
59 IPOIB_PACKET_SIZE = 2048,
60 IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
61
62 IPOIB_ENCAP_LEN = 4, 59 IPOIB_ENCAP_LEN = 4,
63 60
61 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
62 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
63
64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ 64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, 65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, 66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -139,7 +139,7 @@ struct ipoib_mcast {
139 139
140struct ipoib_rx_buf { 140struct ipoib_rx_buf {
141 struct sk_buff *skb; 141 struct sk_buff *skb;
142 u64 mapping; 142 u64 mapping[IPOIB_UD_RX_SG];
143}; 143};
144 144
145struct ipoib_tx_buf { 145struct ipoib_tx_buf {
@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
294 294
295 unsigned int admin_mtu; 295 unsigned int admin_mtu;
296 unsigned int mcast_mtu; 296 unsigned int mcast_mtu;
297 unsigned int max_ib_mtu;
297 298
298 struct ipoib_rx_buf *rx_ring; 299 struct ipoib_rx_buf *rx_ring;
299 300
@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
305 struct ib_send_wr tx_wr; 306 struct ib_send_wr tx_wr;
306 unsigned tx_outstanding; 307 unsigned tx_outstanding;
307 308
309 struct ib_recv_wr rx_wr;
310 struct ib_sge rx_sge[IPOIB_UD_RX_SG];
311
308 struct ib_wc ibwc[IPOIB_NUM_WC]; 312 struct ib_wc ibwc[IPOIB_NUM_WC];
309 313
310 struct list_head dead_ahs; 314 struct list_head dead_ahs;
@@ -366,6 +370,14 @@ struct ipoib_neigh {
366 struct list_head list; 370 struct list_head list;
367}; 371};
368 372
373#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
374#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
375
376static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
377{
378 return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
379}
380
369/* 381/*
370 * We stash a pointer to our private neighbour information after our 382 * We stash a pointer to our private neighbour information after our
371 * hardware address in neigh->ha. The ALIGN() expression here makes 383 * hardware address in neigh->ha. The ALIGN() expression here makes
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0205eb7c1bd3..7cf1fa7074ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
89 spin_unlock_irqrestore(&priv->lock, flags); 89 spin_unlock_irqrestore(&priv->lock, flags);
90} 90}
91 91
92static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
94{
95 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
96 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
97 DMA_FROM_DEVICE);
98 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
99 DMA_FROM_DEVICE);
100 } else
101 ib_dma_unmap_single(priv->ca, mapping[0],
102 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
103 DMA_FROM_DEVICE);
104}
105
106static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
107 struct sk_buff *skb,
108 unsigned int length)
109{
110 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
111 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
112 unsigned int size;
113 /*
114 * There is only two buffers needed for max_payload = 4K,
115 * first buf size is IPOIB_UD_HEAD_SIZE
116 */
117 skb->tail += IPOIB_UD_HEAD_SIZE;
118 skb->len += length;
119
120 size = length - IPOIB_UD_HEAD_SIZE;
121
122 frag->size = size;
123 skb->data_len += size;
124 skb->truesize += size;
125 } else
126 skb_put(skb, length);
127
128}
129
92static int ipoib_ib_post_receive(struct net_device *dev, int id) 130static int ipoib_ib_post_receive(struct net_device *dev, int id)
93{ 131{
94 struct ipoib_dev_priv *priv = netdev_priv(dev); 132 struct ipoib_dev_priv *priv = netdev_priv(dev);
95 struct ib_sge list;
96 struct ib_recv_wr param;
97 struct ib_recv_wr *bad_wr; 133 struct ib_recv_wr *bad_wr;
98 int ret; 134 int ret;
99 135
100 list.addr = priv->rx_ring[id].mapping; 136 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
101 list.length = IPOIB_BUF_SIZE; 137 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
102 list.lkey = priv->mr->lkey; 138 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
103 139
104 param.next = NULL;
105 param.wr_id = id | IPOIB_OP_RECV;
106 param.sg_list = &list;
107 param.num_sge = 1;
108 140
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 141 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
110 if (unlikely(ret)) { 142 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 143 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 144 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 dev_kfree_skb_any(priv->rx_ring[id].skb); 145 dev_kfree_skb_any(priv->rx_ring[id].skb);
115 priv->rx_ring[id].skb = NULL; 146 priv->rx_ring[id].skb = NULL;
116 } 147 }
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
118 return ret; 149 return ret;
119} 150}
120 151
121static int ipoib_alloc_rx_skb(struct net_device *dev, int id) 152static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
122{ 153{
123 struct ipoib_dev_priv *priv = netdev_priv(dev); 154 struct ipoib_dev_priv *priv = netdev_priv(dev);
124 struct sk_buff *skb; 155 struct sk_buff *skb;
125 u64 addr; 156 int buf_size;
157 u64 *mapping;
126 158
127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 159 if (ipoib_ud_need_sg(priv->max_ib_mtu))
128 if (!skb) 160 buf_size = IPOIB_UD_HEAD_SIZE;
129 return -ENOMEM; 161 else
162 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
163
164 skb = dev_alloc_skb(buf_size + 4);
165 if (unlikely(!skb))
166 return NULL;
130 167
131 /* 168 /*
132 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte 169 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
135 */ 172 */
136 skb_reserve(skb, 4); 173 skb_reserve(skb, 4);
137 174
138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 175 mapping = priv->rx_ring[id].mapping;
139 DMA_FROM_DEVICE); 176 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 177 DMA_FROM_DEVICE);
141 dev_kfree_skb_any(skb); 178 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
142 return -EIO; 179 goto error;
180
181 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
182 struct page *page = alloc_page(GFP_ATOMIC);
183 if (!page)
184 goto partial_error;
185 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
186 mapping[1] =
187 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
188 0, PAGE_SIZE, DMA_FROM_DEVICE);
189 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
190 goto partial_error;
143 } 191 }
144 192
145 priv->rx_ring[id].skb = skb; 193 priv->rx_ring[id].skb = skb;
146 priv->rx_ring[id].mapping = addr; 194 return skb;
147 195
148 return 0; 196partial_error:
197 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
198error:
199 dev_kfree_skb_any(skb);
200 return NULL;
149} 201}
150 202
151static int ipoib_ib_post_receives(struct net_device *dev) 203static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
154 int i; 206 int i;
155 207
156 for (i = 0; i < ipoib_recvq_size; ++i) { 208 for (i = 0; i < ipoib_recvq_size; ++i) {
157 if (ipoib_alloc_rx_skb(dev, i)) { 209 if (!ipoib_alloc_rx_skb(dev, i)) {
158 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 210 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
159 return -ENOMEM; 211 return -ENOMEM;
160 } 212 }
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
172 struct ipoib_dev_priv *priv = netdev_priv(dev); 224 struct ipoib_dev_priv *priv = netdev_priv(dev);
173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 225 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
174 struct sk_buff *skb; 226 struct sk_buff *skb;
175 u64 addr; 227 u64 mapping[IPOIB_UD_RX_SG];
176 228
177 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 229 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
178 wr_id, wc->status); 230 wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
184 } 236 }
185 237
186 skb = priv->rx_ring[wr_id].skb; 238 skb = priv->rx_ring[wr_id].skb;
187 addr = priv->rx_ring[wr_id].mapping;
188 239
189 if (unlikely(wc->status != IB_WC_SUCCESS)) { 240 if (unlikely(wc->status != IB_WC_SUCCESS)) {
190 if (wc->status != IB_WC_WR_FLUSH_ERR) 241 if (wc->status != IB_WC_WR_FLUSH_ERR)
191 ipoib_warn(priv, "failed recv event " 242 ipoib_warn(priv, "failed recv event "
192 "(status=%d, wrid=%d vend_err %x)\n", 243 "(status=%d, wrid=%d vend_err %x)\n",
193 wc->status, wr_id, wc->vendor_err); 244 wc->status, wr_id, wc->vendor_err);
194 ib_dma_unmap_single(priv->ca, addr, 245 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
196 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
197 priv->rx_ring[wr_id].skb = NULL; 247 priv->rx_ring[wr_id].skb = NULL;
198 return; 248 return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
205 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 255 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
206 goto repost; 256 goto repost;
207 257
258 memcpy(mapping, priv->rx_ring[wr_id].mapping,
259 IPOIB_UD_RX_SG * sizeof *mapping);
260
208 /* 261 /*
209 * If we can't allocate a new RX buffer, dump 262 * If we can't allocate a new RX buffer, dump
210 * this packet and reuse the old buffer. 263 * this packet and reuse the old buffer.
211 */ 264 */
212 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 265 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
213 ++dev->stats.rx_dropped; 266 ++dev->stats.rx_dropped;
214 goto repost; 267 goto repost;
215 } 268 }
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 270 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
218 wc->byte_len, wc->slid); 271 wc->byte_len, wc->slid);
219 272
220 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 273 ipoib_ud_dma_unmap_rx(priv, mapping);
274 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
221 275
222 skb_put(skb, wc->byte_len);
223 skb_pull(skb, IB_GRH_BYTES); 276 skb_pull(skb, IB_GRH_BYTES);
224 277
225 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 278 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
733 rx_req = &priv->rx_ring[i]; 786 rx_req = &priv->rx_ring[i];
734 if (!rx_req->skb) 787 if (!rx_req->skb)
735 continue; 788 continue;
736 ib_dma_unmap_single(priv->ca, 789 ipoib_ud_dma_unmap_rx(priv,
737 rx_req->mapping, 790 priv->rx_ring[i].mapping);
738 IPOIB_BUF_SIZE,
739 DMA_FROM_DEVICE);
740 dev_kfree_skb_any(rx_req->skb); 791 dev_kfree_skb_any(rx_req->skb);
741 rx_req->skb = NULL; 792 rx_req->skb = NULL;
742 } 793 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bd07f02cf02b..7a4ed9d3d844 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
195 return 0; 195 return 0;
196 } 196 }
197 197
198 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 198 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
199 return -EINVAL; 199 return -EINVAL;
200 200
201 priv->admin_mtu = new_mtu; 201 priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
971 NETIF_F_LLTX | 971 NETIF_F_LLTX |
972 NETIF_F_HIGHDMA); 972 NETIF_F_HIGHDMA);
973 973
974 /* MTU will be reset when mcast join happens */
975 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
976 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
977
978 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 974 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
979 975
980 netif_carrier_off(dev); 976 netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
1107{ 1103{
1108 struct ipoib_dev_priv *priv; 1104 struct ipoib_dev_priv *priv;
1109 struct ib_device_attr *device_attr; 1105 struct ib_device_attr *device_attr;
1106 struct ib_port_attr attr;
1110 int result = -ENOMEM; 1107 int result = -ENOMEM;
1111 1108
1112 priv = ipoib_intf_alloc(format); 1109 priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
1115 1112
1116 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1113 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1117 1114
1115 if (!ib_query_port(hca, port, &attr))
1116 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1117 else {
1118 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1119 hca->name, port);
1120 goto device_init_failed;
1121 }
1122
1123 /* MTU will be reset when mcast join happens */
1124 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1125 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1126
1118 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1127 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1119 if (result) { 1128 if (result) {
1120 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1129 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 31a53c5bcb13..d00a2c174aee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
567 return; 567 return;
568 } 568 }
569 569
570 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - 570 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
571 IPOIB_ENCAP_LEN;
572 571
573 if (!ipoib_cm_admin_enabled(dev)) 572 if (!ipoib_cm_admin_enabled(dev))
574 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 573 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 8a20e3742c43..07c03f178a49 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
150 .max_send_wr = ipoib_sendq_size, 150 .max_send_wr = ipoib_sendq_size,
151 .max_recv_wr = ipoib_recvq_size, 151 .max_recv_wr = ipoib_recvq_size,
152 .max_send_sge = 1, 152 .max_send_sge = 1,
153 .max_recv_sge = 1 153 .max_recv_sge = IPOIB_UD_RX_SG
154 }, 154 },
155 .sq_sig_type = IB_SIGNAL_ALL_WR, 155 .sq_sig_type = IB_SIGNAL_ALL_WR,
156 .qp_type = IB_QPT_UD 156 .qp_type = IB_QPT_UD
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
215 priv->tx_wr.sg_list = priv->tx_sge; 215 priv->tx_wr.sg_list = priv->tx_sge;
216 priv->tx_wr.send_flags = IB_SEND_SIGNALED; 216 priv->tx_wr.send_flags = IB_SEND_SIGNALED;
217 217
218 priv->rx_sge[0].lkey = priv->mr->lkey;
219 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
220 priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
221 priv->rx_sge[1].length = PAGE_SIZE;
222 priv->rx_sge[1].lkey = priv->mr->lkey;
223 priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
224 } else {
225 priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
226 priv->rx_wr.num_sge = 1;
227 }
228 priv->rx_wr.next = NULL;
229 priv->rx_wr.sg_list = priv->rx_sge;
230
218 return 0; 231 return 0;
219 232
220out_free_cq: 233out_free_cq:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 293f5b892e3f..431fdeaa2dc4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
89 goto err; 89 goto err;
90 } 90 }
91 91
92 priv->max_ib_mtu = ppriv->max_ib_mtu;
92 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
93 94
94 priv->pkey = pkey; 95 priv->pkey = pkey;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4b07bdadb81e..b29e3affb805 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -444,6 +444,23 @@ exit:
444 __FUNCTION__, retval); 444 __FUNCTION__, retval);
445} 445}
446 446
447static void xpad_bulk_out(struct urb *urb)
448{
449 switch (urb->status) {
450 case 0:
451 /* success */
452 break;
453 case -ECONNRESET:
454 case -ENOENT:
455 case -ESHUTDOWN:
456 /* this urb is terminated, clean up */
457 dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
458 break;
459 default:
460 dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
461 }
462}
463
447#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) 464#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
448static void xpad_irq_out(struct urb *urb) 465static void xpad_irq_out(struct urb *urb)
449{ 466{
@@ -475,23 +492,6 @@ exit:
475 __FUNCTION__, retval); 492 __FUNCTION__, retval);
476} 493}
477 494
478static void xpad_bulk_out(struct urb *urb)
479{
480 switch (urb->status) {
481 case 0:
482 /* success */
483 break;
484 case -ECONNRESET:
485 case -ENOENT:
486 case -ESHUTDOWN:
487 /* this urb is terminated, clean up */
488 dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
489 break;
490 default:
491 dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
492 }
493}
494
495static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) 495static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
496{ 496{
497 struct usb_endpoint_descriptor *ep_irq_out; 497 struct usb_endpoint_descriptor *ep_irq_out;
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index f972ff377b63..cc9f27514aef 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -114,8 +114,8 @@ static int emumousebtn_input_register(void)
114 if (!emumousebtn) 114 if (!emumousebtn)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 lockdep_set_class(emumousebtn->event_lock, &emumousebtn_event_class); 117 lockdep_set_class(&emumousebtn->event_lock, &emumousebtn_event_class);
118 lockdep_set_class(emumousebtn->mutex, &emumousebtn_mutex_class); 118 lockdep_set_class(&emumousebtn->mutex, &emumousebtn_mutex_class);
119 119
120 emumousebtn->name = "Macintosh mouse button emulation"; 120 emumousebtn->name = "Macintosh mouse button emulation";
121 emumousebtn->id.bustype = BUS_ADB; 121 emumousebtn->id.bustype = BUS_ADB;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 6477fc66cc23..346223856f59 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -299,7 +299,7 @@ static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
299} 299}
300 300
301/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */ 301/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */
302struct dibx000_agc_config xc3028_agc_config = { 302static struct dibx000_agc_config xc3028_agc_config = {
303 BAND_VHF | BAND_UHF, /* band_caps */ 303 BAND_VHF | BAND_UHF, /* band_caps */
304 304
305 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, 305 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -342,7 +342,7 @@ struct dibx000_agc_config xc3028_agc_config = {
342}; 342};
343 343
344/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */ 344/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */
345struct dibx000_bandwidth_config xc3028_bw_config = { 345static struct dibx000_bandwidth_config xc3028_bw_config = {
346 60000, 30000, /* internal, sampling */ 346 60000, 30000, /* internal, sampling */
347 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */ 347 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
348 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, 348 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc,
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 68fab616f55d..f5fceb3cdb3c 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -307,6 +307,14 @@ config DVB_AU8522
307 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 307 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
308 to support this frontend. 308 to support this frontend.
309 309
310config DVB_S5H1411
311 tristate "Samsung S5H1411 based"
312 depends on DVB_CORE && I2C
313 default m if DVB_FE_CUSTOMISE
314 help
315 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
316 to support this frontend.
317
310comment "Tuners/PLL support" 318comment "Tuners/PLL support"
311 depends on DVB_CORE 319 depends on DVB_CORE
312 320
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f873fc0f649..9747c73dc826 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_DVB_TUNER_XC5000) += xc5000.o
55obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o 55obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
56obj-$(CONFIG_DVB_AU8522) += au8522.o 56obj-$(CONFIG_DVB_AU8522) += au8522.o
57obj-$(CONFIG_DVB_TDA10048) += tda10048.o 57obj-$(CONFIG_DVB_TDA10048) += tda10048.o
58obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
diff --git a/drivers/media/dvb/frontends/mt312.h b/drivers/media/dvb/frontends/mt312.h
index 96338f0c4dd4..de796eab3911 100644
--- a/drivers/media/dvb/frontends/mt312.h
+++ b/drivers/media/dvb/frontends/mt312.h
@@ -33,7 +33,7 @@ struct mt312_config {
33 u8 demod_address; 33 u8 demod_address;
34 34
35 /* inverted voltage setting */ 35 /* inverted voltage setting */
36 int voltage_inverted:1; 36 unsigned int voltage_inverted:1;
37}; 37};
38 38
39#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE)) 39#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
new file mode 100644
index 000000000000..eb5bfc99d4e9
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -0,0 +1,888 @@
1/*
2 Samsung S5H1411 VSB/QAM demodulator driver
3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20*/
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include "dvb_frontend.h"
29#include "dvb-pll.h"
30#include "s5h1411.h"
31
32struct s5h1411_state {
33
34 struct i2c_adapter *i2c;
35
36 /* configuration settings */
37 const struct s5h1411_config *config;
38
39 struct dvb_frontend frontend;
40
41 fe_modulation_t current_modulation;
42
43 u32 current_frequency;
44 int if_freq;
45
46 u8 inversion;
47};
48
49static int debug;
50
51#define dprintk(arg...) do { \
52 if (debug) \
53 printk(arg); \
54 } while (0)
55
56/* Register values to initialise the demod, defaults to VSB */
57static struct init_tab {
58 u8 addr;
59 u8 reg;
60 u16 data;
61} init_tab[] = {
62 { S5H1411_I2C_TOP_ADDR, 0x00, 0x0071, },
63 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
64 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
65 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
66 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, },
67 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
68 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
69 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
70 { S5H1411_I2C_TOP_ADDR, 0x27, 0x0f04, },
71 { S5H1411_I2C_TOP_ADDR, 0x28, 0x070f, },
72 { S5H1411_I2C_TOP_ADDR, 0x29, 0x2820, },
73 { S5H1411_I2C_TOP_ADDR, 0x2a, 0x102e, },
74 { S5H1411_I2C_TOP_ADDR, 0x2b, 0x0220, },
75 { S5H1411_I2C_TOP_ADDR, 0x2e, 0x0d0e, },
76 { S5H1411_I2C_TOP_ADDR, 0x2f, 0x1013, },
77 { S5H1411_I2C_TOP_ADDR, 0x31, 0x171b, },
78 { S5H1411_I2C_TOP_ADDR, 0x32, 0x0e0f, },
79 { S5H1411_I2C_TOP_ADDR, 0x33, 0x0f10, },
80 { S5H1411_I2C_TOP_ADDR, 0x34, 0x170e, },
81 { S5H1411_I2C_TOP_ADDR, 0x35, 0x4b10, },
82 { S5H1411_I2C_TOP_ADDR, 0x36, 0x0f17, },
83 { S5H1411_I2C_TOP_ADDR, 0x3c, 0x1577, },
84 { S5H1411_I2C_TOP_ADDR, 0x3d, 0x081a, },
85 { S5H1411_I2C_TOP_ADDR, 0x3e, 0x77ee, },
86 { S5H1411_I2C_TOP_ADDR, 0x40, 0x1e09, },
87 { S5H1411_I2C_TOP_ADDR, 0x41, 0x0f0c, },
88 { S5H1411_I2C_TOP_ADDR, 0x42, 0x1f10, },
89 { S5H1411_I2C_TOP_ADDR, 0x4d, 0x0509, },
90 { S5H1411_I2C_TOP_ADDR, 0x4e, 0x0a00, },
91 { S5H1411_I2C_TOP_ADDR, 0x50, 0x0000, },
92 { S5H1411_I2C_TOP_ADDR, 0x5b, 0x0000, },
93 { S5H1411_I2C_TOP_ADDR, 0x5c, 0x0008, },
94 { S5H1411_I2C_TOP_ADDR, 0x57, 0x1101, },
95 { S5H1411_I2C_TOP_ADDR, 0x65, 0x007c, },
96 { S5H1411_I2C_TOP_ADDR, 0x68, 0x0512, },
97 { S5H1411_I2C_TOP_ADDR, 0x69, 0x0258, },
98 { S5H1411_I2C_TOP_ADDR, 0x70, 0x0004, },
99 { S5H1411_I2C_TOP_ADDR, 0x71, 0x0007, },
100 { S5H1411_I2C_TOP_ADDR, 0x76, 0x00a9, },
101 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
102 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
103 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
105 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
106 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
107 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
108 { S5H1411_I2C_TOP_ADDR, 0xb8, 0x003f, },
109 { S5H1411_I2C_TOP_ADDR, 0xb9, 0x2700, },
110 { S5H1411_I2C_TOP_ADDR, 0xba, 0xfac8, },
111 { S5H1411_I2C_TOP_ADDR, 0xbe, 0x1003, },
112 { S5H1411_I2C_TOP_ADDR, 0xbf, 0x103f, },
113 { S5H1411_I2C_TOP_ADDR, 0xce, 0x2000, },
114 { S5H1411_I2C_TOP_ADDR, 0xcf, 0x0800, },
115 { S5H1411_I2C_TOP_ADDR, 0xd0, 0x0800, },
116 { S5H1411_I2C_TOP_ADDR, 0xd1, 0x0400, },
117 { S5H1411_I2C_TOP_ADDR, 0xd2, 0x0800, },
118 { S5H1411_I2C_TOP_ADDR, 0xd3, 0x2000, },
119 { S5H1411_I2C_TOP_ADDR, 0xd4, 0x3000, },
120 { S5H1411_I2C_TOP_ADDR, 0xdb, 0x4a9b, },
121 { S5H1411_I2C_TOP_ADDR, 0xdc, 0x1000, },
122 { S5H1411_I2C_TOP_ADDR, 0xde, 0x0001, },
123 { S5H1411_I2C_TOP_ADDR, 0xdf, 0x0000, },
124 { S5H1411_I2C_TOP_ADDR, 0xe3, 0x0301, },
125 { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0000, },
126 { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0001, },
127 { S5H1411_I2C_QAM_ADDR, 0x08, 0x0600, },
128 { S5H1411_I2C_QAM_ADDR, 0x18, 0x4201, },
129 { S5H1411_I2C_QAM_ADDR, 0x1e, 0x6476, },
130 { S5H1411_I2C_QAM_ADDR, 0x21, 0x0830, },
131 { S5H1411_I2C_QAM_ADDR, 0x0c, 0x5679, },
132 { S5H1411_I2C_QAM_ADDR, 0x0d, 0x579b, },
133 { S5H1411_I2C_QAM_ADDR, 0x24, 0x0102, },
134 { S5H1411_I2C_QAM_ADDR, 0x31, 0x7488, },
135 { S5H1411_I2C_QAM_ADDR, 0x32, 0x0a08, },
136 { S5H1411_I2C_QAM_ADDR, 0x3d, 0x8689, },
137 { S5H1411_I2C_QAM_ADDR, 0x49, 0x0048, },
138 { S5H1411_I2C_QAM_ADDR, 0x57, 0x2012, },
139 { S5H1411_I2C_QAM_ADDR, 0x5d, 0x7676, },
140 { S5H1411_I2C_QAM_ADDR, 0x04, 0x0400, },
141 { S5H1411_I2C_QAM_ADDR, 0x58, 0x00c0, },
142 { S5H1411_I2C_QAM_ADDR, 0x5b, 0x0100, },
143};
144
145/* VSB SNR lookup table */
146static struct vsb_snr_tab {
147 u16 val;
148 u16 data;
149} vsb_snr_tab[] = {
150 { 0x39f, 300, },
151 { 0x39b, 295, },
152 { 0x397, 290, },
153 { 0x394, 285, },
154 { 0x38f, 280, },
155 { 0x38b, 275, },
156 { 0x387, 270, },
157 { 0x382, 265, },
158 { 0x37d, 260, },
159 { 0x377, 255, },
160 { 0x370, 250, },
161 { 0x36a, 245, },
162 { 0x364, 240, },
163 { 0x35b, 235, },
164 { 0x353, 230, },
165 { 0x349, 225, },
166 { 0x340, 320, },
167 { 0x337, 215, },
168 { 0x327, 210, },
169 { 0x31b, 205, },
170 { 0x310, 200, },
171 { 0x302, 195, },
172 { 0x2f3, 190, },
173 { 0x2e4, 185, },
174 { 0x2d7, 180, },
175 { 0x2cd, 175, },
176 { 0x2bb, 170, },
177 { 0x2a9, 165, },
178 { 0x29e, 160, },
179 { 0x284, 155, },
180 { 0x27a, 150, },
181 { 0x260, 145, },
182 { 0x23a, 140, },
183 { 0x224, 135, },
184 { 0x213, 130, },
185 { 0x204, 125, },
186 { 0x1fe, 120, },
187 { 0, 0, },
188};
189
190/* QAM64 SNR lookup table */
191static struct qam64_snr_tab {
192 u16 val;
193 u16 data;
194} qam64_snr_tab[] = {
195 { 0x0001, 0, },
196 { 0x0af0, 300, },
197 { 0x0d80, 290, },
198 { 0x10a0, 280, },
199 { 0x14b5, 270, },
200 { 0x1590, 268, },
201 { 0x1680, 266, },
202 { 0x17b0, 264, },
203 { 0x18c0, 262, },
204 { 0x19b0, 260, },
205 { 0x1ad0, 258, },
206 { 0x1d00, 256, },
207 { 0x1da0, 254, },
208 { 0x1ef0, 252, },
209 { 0x2050, 250, },
210 { 0x20f0, 249, },
211 { 0x21d0, 248, },
212 { 0x22b0, 247, },
213 { 0x23a0, 246, },
214 { 0x2470, 245, },
215 { 0x24f0, 244, },
216 { 0x25a0, 243, },
217 { 0x26c0, 242, },
218 { 0x27b0, 241, },
219 { 0x28d0, 240, },
220 { 0x29b0, 239, },
221 { 0x2ad0, 238, },
222 { 0x2ba0, 237, },
223 { 0x2c80, 236, },
224 { 0x2d20, 235, },
225 { 0x2e00, 234, },
226 { 0x2f10, 233, },
227 { 0x3050, 232, },
228 { 0x3190, 231, },
229 { 0x3300, 230, },
230 { 0x3340, 229, },
231 { 0x3200, 228, },
232 { 0x3550, 227, },
233 { 0x3610, 226, },
234 { 0x3600, 225, },
235 { 0x3700, 224, },
236 { 0x3800, 223, },
237 { 0x3920, 222, },
238 { 0x3a20, 221, },
239 { 0x3b30, 220, },
240 { 0x3d00, 219, },
241 { 0x3e00, 218, },
242 { 0x4000, 217, },
243 { 0x4100, 216, },
244 { 0x4300, 215, },
245 { 0x4400, 214, },
246 { 0x4600, 213, },
247 { 0x4700, 212, },
248 { 0x4800, 211, },
249 { 0x4a00, 210, },
250 { 0x4b00, 209, },
251 { 0x4d00, 208, },
252 { 0x4f00, 207, },
253 { 0x5050, 206, },
254 { 0x5200, 205, },
255 { 0x53c0, 204, },
256 { 0x5450, 203, },
257 { 0x5650, 202, },
258 { 0x5820, 201, },
259 { 0x6000, 200, },
260 { 0xffff, 0, },
261};
262
263/* QAM256 SNR lookup table */
264static struct qam256_snr_tab {
265 u16 val;
266 u16 data;
267} qam256_snr_tab[] = {
268 { 0x0001, 0, },
269 { 0x0970, 400, },
270 { 0x0a90, 390, },
271 { 0x0b90, 380, },
272 { 0x0d90, 370, },
273 { 0x0ff0, 360, },
274 { 0x1240, 350, },
275 { 0x1345, 348, },
276 { 0x13c0, 346, },
277 { 0x14c0, 344, },
278 { 0x1500, 342, },
279 { 0x1610, 340, },
280 { 0x1700, 338, },
281 { 0x1800, 336, },
282 { 0x18b0, 334, },
283 { 0x1900, 332, },
284 { 0x1ab0, 330, },
285 { 0x1bc0, 328, },
286 { 0x1cb0, 326, },
287 { 0x1db0, 324, },
288 { 0x1eb0, 322, },
289 { 0x2030, 320, },
290 { 0x2200, 318, },
291 { 0x2280, 316, },
292 { 0x2410, 314, },
293 { 0x25b0, 312, },
294 { 0x27a0, 310, },
295 { 0x2840, 308, },
296 { 0x29d0, 306, },
297 { 0x2b10, 304, },
298 { 0x2d30, 302, },
299 { 0x2f20, 300, },
300 { 0x30c0, 298, },
301 { 0x3260, 297, },
302 { 0x32c0, 296, },
303 { 0x3300, 295, },
304 { 0x33b0, 294, },
305 { 0x34b0, 293, },
306 { 0x35a0, 292, },
307 { 0x3650, 291, },
308 { 0x3800, 290, },
309 { 0x3900, 289, },
310 { 0x3a50, 288, },
311 { 0x3b30, 287, },
312 { 0x3cb0, 286, },
313 { 0x3e20, 285, },
314 { 0x3fa0, 284, },
315 { 0x40a0, 283, },
316 { 0x41c0, 282, },
317 { 0x42f0, 281, },
318 { 0x44a0, 280, },
319 { 0x4600, 279, },
320 { 0x47b0, 278, },
321 { 0x4900, 277, },
322 { 0x4a00, 276, },
323 { 0x4ba0, 275, },
324 { 0x4d00, 274, },
325 { 0x4f00, 273, },
326 { 0x5000, 272, },
327 { 0x51f0, 272, },
328 { 0x53a0, 270, },
329 { 0x5520, 269, },
330 { 0x5700, 268, },
331 { 0x5800, 267, },
332 { 0x5a00, 266, },
333 { 0x5c00, 265, },
334 { 0x5d00, 264, },
335 { 0x5f00, 263, },
336 { 0x6000, 262, },
337 { 0x6200, 261, },
338 { 0x6400, 260, },
339 { 0xffff, 0, },
340};
341
342/* 8 bit registers, 16 bit values */
343static int s5h1411_writereg(struct s5h1411_state *state,
344 u8 addr, u8 reg, u16 data)
345{
346 int ret;
347 u8 buf [] = { reg, data >> 8, data & 0xff };
348
349 struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
350
351 ret = i2c_transfer(state->i2c, &msg, 1);
352
353 if (ret != 1)
354 printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
355 "ret == %i)\n", __func__, addr, reg, data, ret);
356
357 return (ret != 1) ? -1 : 0;
358}
359
360static u16 s5h1411_readreg(struct s5h1411_state *state, u8 addr, u8 reg)
361{
362 int ret;
363 u8 b0 [] = { reg };
364 u8 b1 [] = { 0, 0 };
365
366 struct i2c_msg msg [] = {
367 { .addr = addr, .flags = 0, .buf = b0, .len = 1 },
368 { .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
369
370 ret = i2c_transfer(state->i2c, msg, 2);
371
372 if (ret != 2)
373 printk(KERN_ERR "%s: readreg error (ret == %i)\n",
374 __func__, ret);
375 return (b1[0] << 8) | b1[1];
376}
377
378static int s5h1411_softreset(struct dvb_frontend *fe)
379{
380 struct s5h1411_state *state = fe->demodulator_priv;
381
382 dprintk("%s()\n", __func__);
383
384 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 0);
385 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 1);
386 return 0;
387}
388
389static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
390{
391 struct s5h1411_state *state = fe->demodulator_priv;
392
393 dprintk("%s(%d KHz)\n", __func__, KHz);
394
395 switch (KHz) {
396 case 3250:
397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9);
398 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
399 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
400 break;
401 case 3500:
402 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1225);
403 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x1e96);
404 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1225);
405 break;
406 case 4000:
407 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x14bc);
408 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0xb53e);
409 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x14bd);
410 break;
411 default:
412 dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
413 __func__, KHz);
414 /* no break, need to continue */
415 case 5380:
416 case 44000:
417 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
418 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x3655);
419 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1be4);
420 break;
421 }
422
423 state->if_freq = KHz;
424
425 return 0;
426}
427
428static int s5h1411_set_mpeg_timing(struct dvb_frontend *fe, int mode)
429{
430 struct s5h1411_state *state = fe->demodulator_priv;
431 u16 val;
432
433 dprintk("%s(%d)\n", __func__, mode);
434
435 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbe) & 0xcfff;
436 switch (mode) {
437 case S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK:
438 val |= 0x0000;
439 break;
440 case S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK:
441 dprintk("%s(%d) Mode1 or Defaulting\n", __func__, mode);
442 val |= 0x1000;
443 break;
444 case S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK:
445 val |= 0x2000;
446 break;
447 case S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK:
448 val |= 0x3000;
449 break;
450 default:
451 return -EINVAL;
452 }
453
454 /* Configure MPEG Signal Timing charactistics */
455 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbe, val);
456}
457
458static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
459{
460 struct s5h1411_state *state = fe->demodulator_priv;
461 u16 val;
462
463 dprintk("%s(%d)\n", __func__, inversion);
464 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x24) & ~0x1000;
465
466 if (inversion == 1)
467 val |= 0x1000; /* Inverted */
468 else
469 val |= 0x0000;
470
471 state->inversion = inversion;
472 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
473}
474
475static int s5h1411_enable_modulation(struct dvb_frontend *fe,
476 fe_modulation_t m)
477{
478 struct s5h1411_state *state = fe->demodulator_priv;
479
480 dprintk("%s(0x%08x)\n", __func__, m);
481
482 switch (m) {
483 case VSB_8:
484 dprintk("%s() VSB_8\n", __func__);
485 s5h1411_set_if_freq(fe, state->config->vsb_if);
486 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x71);
487 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x00);
488 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0xf1);
489 break;
490 case QAM_64:
491 case QAM_256:
492 dprintk("%s() QAM_AUTO (64/256)\n", __func__);
493 s5h1411_set_if_freq(fe, state->config->qam_if);
494 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171);
495 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x0001);
496 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x16, 0x1101);
497 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0x00f0);
498 break;
499 default:
500 dprintk("%s() Invalid modulation\n", __func__);
501 return -EINVAL;
502 }
503
504 state->current_modulation = m;
505 s5h1411_softreset(fe);
506
507 return 0;
508}
509
510static int s5h1411_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
511{
512 struct s5h1411_state *state = fe->demodulator_priv;
513
514 dprintk("%s(%d)\n", __func__, enable);
515
516 if (enable)
517 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
518 else
519 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 0);
520}
521
522static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
523{
524 struct s5h1411_state *state = fe->demodulator_priv;
525 u16 val;
526
527 dprintk("%s(%d)\n", __func__, enable);
528
529 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xe0) & ~0x02;
530
531 if (enable)
532 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0,
533 val | 0x02);
534 else
535 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
536}
537
538static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
539{
540 struct s5h1411_state *state = fe->demodulator_priv;
541
542 dprintk("%s(%d)\n", __func__, enable);
543
544 if (enable)
545 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 1);
546 else {
547 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 0);
548 s5h1411_softreset(fe);
549 }
550
551 return 0;
552}
553
554static int s5h1411_register_reset(struct dvb_frontend *fe)
555{
556 struct s5h1411_state *state = fe->demodulator_priv;
557
558 dprintk("%s()\n", __func__);
559
560 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf3, 0);
561}
562
563/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
564static int s5h1411_set_frontend(struct dvb_frontend *fe,
565 struct dvb_frontend_parameters *p)
566{
567 struct s5h1411_state *state = fe->demodulator_priv;
568
569 dprintk("%s(frequency=%d)\n", __func__, p->frequency);
570
571 s5h1411_softreset(fe);
572
573 state->current_frequency = p->frequency;
574
575 s5h1411_enable_modulation(fe, p->u.vsb.modulation);
576
577 /* Allow the demod to settle */
578 msleep(100);
579
580 if (fe->ops.tuner_ops.set_params) {
581 if (fe->ops.i2c_gate_ctrl)
582 fe->ops.i2c_gate_ctrl(fe, 1);
583
584 fe->ops.tuner_ops.set_params(fe, p);
585
586 if (fe->ops.i2c_gate_ctrl)
587 fe->ops.i2c_gate_ctrl(fe, 0);
588 }
589
590 return 0;
591}
592
593/* Reset the demod hardware and reset all of the configuration registers
594 to a default state. */
595static int s5h1411_init(struct dvb_frontend *fe)
596{
597 struct s5h1411_state *state = fe->demodulator_priv;
598 int i;
599
600 dprintk("%s()\n", __func__);
601
602 s5h1411_sleep(fe, 0);
603 s5h1411_register_reset(fe);
604
605 for (i = 0; i < ARRAY_SIZE(init_tab); i++)
606 s5h1411_writereg(state, init_tab[i].addr,
607 init_tab[i].reg,
608 init_tab[i].data);
609
610 /* The datasheet says that after initialisation, VSB is default */
611 state->current_modulation = VSB_8;
612
613 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
614 /* Serial */
615 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101);
616 else
617 /* Parallel */
618 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001);
619
620 s5h1411_set_spectralinversion(fe, state->config->inversion);
621 s5h1411_set_if_freq(fe, state->config->vsb_if);
622 s5h1411_set_gpio(fe, state->config->gpio);
623 s5h1411_set_mpeg_timing(fe, state->config->mpeg_timing);
624 s5h1411_softreset(fe);
625
626 /* Note: Leaving the I2C gate closed. */
627 s5h1411_i2c_gate_ctrl(fe, 0);
628
629 return 0;
630}
631
632static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
633{
634 struct s5h1411_state *state = fe->demodulator_priv;
635 u16 reg;
636 u32 tuner_status = 0;
637
638 *status = 0;
639
640 /* Get the demodulator status */
641 reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
642 & 0x0001;
643 if (reg)
644 *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
645
646 switch (state->current_modulation) {
647 case QAM_64:
648 case QAM_256:
649 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
650 if (reg & 0x100)
651 *status |= FE_HAS_VITERBI;
652 if (reg & 0x10)
653 *status |= FE_HAS_SYNC;
654 break;
655 case VSB_8:
656 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
657 if (reg & 0x0001)
658 *status |= FE_HAS_SYNC;
659 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
660 if (reg & 0x1000)
661 *status |= FE_HAS_VITERBI;
662 break;
663 default:
664 return -EINVAL;
665 }
666
667 switch (state->config->status_mode) {
668 case S5H1411_DEMODLOCKING:
669 if (*status & FE_HAS_VITERBI)
670 *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
671 break;
672 case S5H1411_TUNERLOCKING:
673 /* Get the tuner status */
674 if (fe->ops.tuner_ops.get_status) {
675 if (fe->ops.i2c_gate_ctrl)
676 fe->ops.i2c_gate_ctrl(fe, 1);
677
678 fe->ops.tuner_ops.get_status(fe, &tuner_status);
679
680 if (fe->ops.i2c_gate_ctrl)
681 fe->ops.i2c_gate_ctrl(fe, 0);
682 }
683 if (tuner_status)
684 *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
685 break;
686 }
687
688 dprintk("%s() status 0x%08x\n", __func__, *status);
689
690 return 0;
691}
692
693static int s5h1411_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
694{
695 int i, ret = -EINVAL;
696 dprintk("%s()\n", __func__);
697
698 for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
699 if (v < qam256_snr_tab[i].val) {
700 *snr = qam256_snr_tab[i].data;
701 ret = 0;
702 break;
703 }
704 }
705 return ret;
706}
707
708static int s5h1411_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
709{
710 int i, ret = -EINVAL;
711 dprintk("%s()\n", __func__);
712
713 for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
714 if (v < qam64_snr_tab[i].val) {
715 *snr = qam64_snr_tab[i].data;
716 ret = 0;
717 break;
718 }
719 }
720 return ret;
721}
722
723static int s5h1411_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
724{
725 int i, ret = -EINVAL;
726 dprintk("%s()\n", __func__);
727
728 for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
729 if (v > vsb_snr_tab[i].val) {
730 *snr = vsb_snr_tab[i].data;
731 ret = 0;
732 break;
733 }
734 }
735 dprintk("%s() snr=%d\n", __func__, *snr);
736 return ret;
737}
738
739static int s5h1411_read_snr(struct dvb_frontend *fe, u16 *snr)
740{
741 struct s5h1411_state *state = fe->demodulator_priv;
742 u16 reg;
743 dprintk("%s()\n", __func__);
744
745 switch (state->current_modulation) {
746 case QAM_64:
747 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
748 return s5h1411_qam64_lookup_snr(fe, snr, reg);
749 case QAM_256:
750 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
751 return s5h1411_qam256_lookup_snr(fe, snr, reg);
752 case VSB_8:
753 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR,
754 0xf2) & 0x3ff;
755 return s5h1411_vsb_lookup_snr(fe, snr, reg);
756 default:
757 break;
758 }
759
760 return -EINVAL;
761}
762
763static int s5h1411_read_signal_strength(struct dvb_frontend *fe,
764 u16 *signal_strength)
765{
766 return s5h1411_read_snr(fe, signal_strength);
767}
768
769static int s5h1411_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
770{
771 struct s5h1411_state *state = fe->demodulator_priv;
772
773 *ucblocks = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xc9);
774
775 return 0;
776}
777
778static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
779{
780 return s5h1411_read_ucblocks(fe, ber);
781}
782
783static int s5h1411_get_frontend(struct dvb_frontend *fe,
784 struct dvb_frontend_parameters *p)
785{
786 struct s5h1411_state *state = fe->demodulator_priv;
787
788 p->frequency = state->current_frequency;
789 p->u.vsb.modulation = state->current_modulation;
790
791 return 0;
792}
793
794static int s5h1411_get_tune_settings(struct dvb_frontend *fe,
795 struct dvb_frontend_tune_settings *tune)
796{
797 tune->min_delay_ms = 1000;
798 return 0;
799}
800
801static void s5h1411_release(struct dvb_frontend *fe)
802{
803 struct s5h1411_state *state = fe->demodulator_priv;
804 kfree(state);
805}
806
807static struct dvb_frontend_ops s5h1411_ops;
808
809struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
810 struct i2c_adapter *i2c)
811{
812 struct s5h1411_state *state = NULL;
813 u16 reg;
814
815 /* allocate memory for the internal state */
816 state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
817 if (state == NULL)
818 goto error;
819
820 /* setup the state */
821 state->config = config;
822 state->i2c = i2c;
823 state->current_modulation = VSB_8;
824 state->inversion = state->config->inversion;
825
826 /* check if the demod exists */
827 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x05);
828 if (reg != 0x0066)
829 goto error;
830
831 /* create dvb_frontend */
832 memcpy(&state->frontend.ops, &s5h1411_ops,
833 sizeof(struct dvb_frontend_ops));
834
835 state->frontend.demodulator_priv = state;
836
837 if (s5h1411_init(&state->frontend) != 0) {
838 printk(KERN_ERR "%s: Failed to initialize correctly\n",
839 __func__);
840 goto error;
841 }
842
843 /* Note: Leaving the I2C gate open here. */
844 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
845
846 return &state->frontend;
847
848error:
849 kfree(state);
850 return NULL;
851}
852EXPORT_SYMBOL(s5h1411_attach);
853
854static struct dvb_frontend_ops s5h1411_ops = {
855
856 .info = {
857 .name = "Samsung S5H1411 QAM/8VSB Frontend",
858 .type = FE_ATSC,
859 .frequency_min = 54000000,
860 .frequency_max = 858000000,
861 .frequency_stepsize = 62500,
862 .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
863 },
864
865 .init = s5h1411_init,
866 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
867 .set_frontend = s5h1411_set_frontend,
868 .get_frontend = s5h1411_get_frontend,
869 .get_tune_settings = s5h1411_get_tune_settings,
870 .read_status = s5h1411_read_status,
871 .read_ber = s5h1411_read_ber,
872 .read_signal_strength = s5h1411_read_signal_strength,
873 .read_snr = s5h1411_read_snr,
874 .read_ucblocks = s5h1411_read_ucblocks,
875 .release = s5h1411_release,
876};
877
878module_param(debug, int, 0644);
879MODULE_PARM_DESC(debug, "Enable verbose debug messages");
880
881MODULE_DESCRIPTION("Samsung S5H1411 QAM-B/ATSC Demodulator driver");
882MODULE_AUTHOR("Steven Toth");
883MODULE_LICENSE("GPL");
884
885/*
886 * Local variables:
887 * c-basic-offset: 8
888 */
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
new file mode 100644
index 000000000000..1855f64ed4d8
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -0,0 +1,90 @@
1/*
2 Samsung S5H1411 VSB/QAM demodulator driver
3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20*/
21
22#ifndef __S5H1411_H__
23#define __S5H1411_H__
24
25#include <linux/dvb/frontend.h>
26
27#define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
28#define S5H1411_I2C_QAM_ADDR (0x34 >> 1)
29
30struct s5h1411_config {
31
32 /* serial/parallel output */
33#define S5H1411_PARALLEL_OUTPUT 0
34#define S5H1411_SERIAL_OUTPUT 1
35 u8 output_mode;
36
37 /* GPIO Setting */
38#define S5H1411_GPIO_OFF 0
39#define S5H1411_GPIO_ON 1
40 u8 gpio;
41
42 /* MPEG signal timing */
43#define S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK 0
44#define S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 1
45#define S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK 2
46#define S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK 3
47 u16 mpeg_timing;
48
49 /* IF Freq for QAM and VSB in KHz */
50#define S5H1411_IF_2500 2500
51#define S5H1411_IF_3500 3500
52#define S5H1411_IF_4000 4000
53#define S5H1411_IF_5380 5380
54#define S5H1411_IF_44000 44000
55#define S5H1411_VSB_IF_DEFAULT S5H1411_IF_44000
56#define S5H1411_QAM_IF_DEFAULT S5H1411_IF_44000
57 u16 qam_if;
58 u16 vsb_if;
59
60 /* Spectral Inversion */
61#define S5H1411_INVERSION_OFF 0
62#define S5H1411_INVERSION_ON 1
63 u8 inversion;
64
65 /* Return lock status based on tuner lock, or demod lock */
66#define S5H1411_TUNERLOCKING 0
67#define S5H1411_DEMODLOCKING 1
68 u8 status_mode;
69};
70
71#if defined(CONFIG_DVB_S5H1411) || \
72 (defined(CONFIG_DVB_S5H1411_MODULE) && defined(MODULE))
73extern struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
74 struct i2c_adapter *i2c);
75#else
76static inline struct dvb_frontend *s5h1411_attach(
77 const struct s5h1411_config *config,
78 struct i2c_adapter *i2c)
79{
80 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
81 return NULL;
82}
83#endif /* CONFIG_DVB_S5H1411 */
84
85#endif /* __S5H1411_H__ */
86
87/*
88 * Local variables:
89 * c-basic-offset: 8
90 */
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index c97c4bd24841..41708267e7a4 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -1,7 +1,7 @@
1 1
2config VIDEO_AU0828 2config VIDEO_AU0828
3 tristate "Auvitek AU0828 support" 3 tristate "Auvitek AU0828 support"
4 depends on VIDEO_DEV && I2C && INPUT 4 depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE 6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE
7 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE 7 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 8ca91f814277..a2a6983444fa 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -36,7 +36,6 @@ struct au0828_board au0828_boards[] = {
36 .name = "DViCO FusionHDTV USB", 36 .name = "DViCO FusionHDTV USB",
37 }, 37 },
38}; 38};
39const unsigned int au0828_bcount = ARRAY_SIZE(au0828_boards);
40 39
41/* Tuner callback function for au0828 boards. Currently only needed 40/* Tuner callback function for au0828 boards. Currently only needed
42 * for HVR1500Q, which has an xc5000 tuner. 41 * for HVR1500Q, which has an xc5000 tuner.
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index e65d5642cb1d..54bfc0f05295 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -32,18 +32,10 @@
32 * 4 = I2C related 32 * 4 = I2C related
33 * 8 = Bridge related 33 * 8 = Bridge related
34 */ 34 */
35unsigned int debug; 35int au0828_debug;
36module_param(debug, int, 0644); 36module_param_named(debug, au0828_debug, int, 0644);
37MODULE_PARM_DESC(debug, "enable debug messages"); 37MODULE_PARM_DESC(debug, "enable debug messages");
38 38
39unsigned int usb_debug;
40module_param(usb_debug, int, 0644);
41MODULE_PARM_DESC(usb_debug, "enable usb debug messages");
42
43unsigned int bridge_debug;
44module_param(bridge_debug, int, 0644);
45MODULE_PARM_DESC(bridge_debug, "enable bridge debug messages");
46
47#define _AU0828_BULKPIPE 0x03 39#define _AU0828_BULKPIPE 0x03
48#define _BULKPIPESIZE 0xffff 40#define _BULKPIPESIZE 0xffff
49 41
@@ -229,24 +221,18 @@ static int __init au0828_init(void)
229{ 221{
230 int ret; 222 int ret;
231 223
232 if (debug) 224 if (au0828_debug & 1)
233 printk(KERN_INFO "%s() Debugging is enabled\n", __func__); 225 printk(KERN_INFO "%s() Debugging is enabled\n", __func__);
234 226
235 if (usb_debug) { 227 if (au0828_debug & 2)
236 printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__); 228 printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__);
237 debug |= 2;
238 }
239 229
240 if (i2c_debug) { 230 if (au0828_debug & 4)
241 printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__); 231 printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__);
242 debug |= 4;
243 }
244 232
245 if (bridge_debug) { 233 if (au0828_debug & 8)
246 printk(KERN_INFO "%s() Bridge Debugging is enabled\n", 234 printk(KERN_INFO "%s() Bridge Debugging is enabled\n",
247 __func__); 235 __func__);
248 debug |= 8;
249 }
250 236
251 printk(KERN_INFO "au0828 driver loaded\n"); 237 printk(KERN_INFO "au0828 driver loaded\n");
252 238
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 85d0ae9a322f..5040d7fc4af5 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -204,7 +204,7 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
204 return ret; 204 return ret;
205} 205}
206 206
207int dvb_register(struct au0828_dev *dev) 207static int dvb_register(struct au0828_dev *dev)
208{ 208{
209 struct au0828_dvb *dvb = &dev->dvb; 209 struct au0828_dvb *dvb = &dev->dvb;
210 int result; 210 int result;
diff --git a/drivers/media/video/au0828/au0828-i2c.c b/drivers/media/video/au0828/au0828-i2c.c
index 94c8b74a6651..741a4937b050 100644
--- a/drivers/media/video/au0828/au0828-i2c.c
+++ b/drivers/media/video/au0828/au0828-i2c.c
@@ -29,11 +29,7 @@
29 29
30#include <media/v4l2-common.h> 30#include <media/v4l2-common.h>
31 31
32unsigned int i2c_debug; 32static int i2c_scan;
33module_param(i2c_debug, int, 0444);
34MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
35
36unsigned int i2c_scan;
37module_param(i2c_scan, int, 0444); 33module_param(i2c_scan, int, 0444);
38MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); 34MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
39 35
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index 0200b9fc5dc4..7beb571798e5 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -96,15 +96,12 @@ struct au0828_buff {
96/* au0828-core.c */ 96/* au0828-core.c */
97extern u32 au0828_read(struct au0828_dev *dev, u16 reg); 97extern u32 au0828_read(struct au0828_dev *dev, u16 reg);
98extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val); 98extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val);
99extern unsigned int debug; 99extern int au0828_debug;
100extern unsigned int usb_debug;
101extern unsigned int bridge_debug;
102 100
103/* ----------------------------------------------------------- */ 101/* ----------------------------------------------------------- */
104/* au0828-cards.c */ 102/* au0828-cards.c */
105extern struct au0828_board au0828_boards[]; 103extern struct au0828_board au0828_boards[];
106extern struct usb_device_id au0828_usb_id_table[]; 104extern struct usb_device_id au0828_usb_id_table[];
107extern const unsigned int au0828_bcount;
108extern void au0828_gpio_setup(struct au0828_dev *dev); 105extern void au0828_gpio_setup(struct au0828_dev *dev);
109extern int au0828_tuner_callback(void *priv, int command, int arg); 106extern int au0828_tuner_callback(void *priv, int command, int arg);
110extern void au0828_card_setup(struct au0828_dev *dev); 107extern void au0828_card_setup(struct au0828_dev *dev);
@@ -115,7 +112,6 @@ extern int au0828_i2c_register(struct au0828_dev *dev);
115extern int au0828_i2c_unregister(struct au0828_dev *dev); 112extern int au0828_i2c_unregister(struct au0828_dev *dev);
116extern void au0828_call_i2c_clients(struct au0828_dev *dev, 113extern void au0828_call_i2c_clients(struct au0828_dev *dev,
117 unsigned int cmd, void *arg); 114 unsigned int cmd, void *arg);
118extern unsigned int i2c_debug;
119 115
120/* ----------------------------------------------------------- */ 116/* ----------------------------------------------------------- */
121/* au0828-dvb.c */ 117/* au0828-dvb.c */
@@ -123,6 +119,6 @@ extern int au0828_dvb_register(struct au0828_dev *dev);
123extern void au0828_dvb_unregister(struct au0828_dev *dev); 119extern void au0828_dvb_unregister(struct au0828_dev *dev);
124 120
125#define dprintk(level, fmt, arg...)\ 121#define dprintk(level, fmt, arg...)\
126 do { if (debug & level)\ 122 do { if (au0828_debug & level)\
127 printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\ 123 printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\
128 } while (0) 124 } while (0)
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 870d6e197d65..f05649727b60 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -191,7 +191,7 @@ static struct tda18271_config hauppauge_hvr1200_tuner_config = {
191 .gate = TDA18271_GATE_ANALOG, 191 .gate = TDA18271_GATE_ANALOG,
192}; 192};
193 193
194struct dibx000_agc_config xc3028_agc_config = { 194static struct dibx000_agc_config xc3028_agc_config = {
195 BAND_VHF | BAND_UHF, /* band_caps */ 195 BAND_VHF | BAND_UHF, /* band_caps */
196 196
197 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, 197 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -237,7 +237,7 @@ struct dibx000_agc_config xc3028_agc_config = {
237 237
238/* PLL Configuration for COFDM BW_MHz = 8.000000 238/* PLL Configuration for COFDM BW_MHz = 8.000000
239 * With external clock = 30.000000 */ 239 * With external clock = 30.000000 */
240struct dibx000_bandwidth_config xc3028_bw_config = { 240static struct dibx000_bandwidth_config xc3028_bw_config = {
241 60000, /* internal */ 241 60000, /* internal */
242 30000, /* sampling */ 242 30000, /* sampling */
243 1, /* pll_cfg: prediv */ 243 1, /* pll_cfg: prediv */
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index bcf6d9ba063d..27635cdcbaf2 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -58,6 +58,7 @@ config VIDEO_CX88_DVB
58 select DVB_CX24123 if !DVB_FE_CUSTOMISE 58 select DVB_CX24123 if !DVB_FE_CUSTOMISE
59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
60 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 60 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
61 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
61 ---help--- 62 ---help---
62 This adds support for DVB/ATSC cards based on the 63 This adds support for DVB/ATSC cards based on the
63 Conexant 2388x chip. 64 Conexant 2388x chip.
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 61c4f72644b8..6c0c94c5ef91 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -546,10 +546,12 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
546 if (retval < 0) 546 if (retval < 0)
547 return retval; 547 return retval;
548 548
549 dev->mailbox = blackbird_find_mailbox(dev); 549 retval = blackbird_find_mailbox(dev);
550 if (dev->mailbox < 0) 550 if (retval < 0)
551 return -1; 551 return -1;
552 552
553 dev->mailbox = retval;
554
553 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ 555 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
554 if (retval < 0) { 556 if (retval < 0) {
555 dprintk(0, "ERROR: Firmware ping failed!\n"); 557 dprintk(0, "ERROR: Firmware ping failed!\n");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 620159d05506..2b6b283cda15 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1591,6 +1591,7 @@ static const struct cx88_board cx88_boards[] = {
1591 .vmux = 2, 1591 .vmux = 2,
1592 .gpio0 = 0x16d9, 1592 .gpio0 = 0x16d9,
1593 }}, 1593 }},
1594 .mpeg = CX88_MPEG_DVB,
1594 }, 1595 },
1595 [CX88_BOARD_PROLINK_PV_8000GT] = { 1596 [CX88_BOARD_PROLINK_PV_8000GT] = {
1596 .name = "Prolink Pixelview MPEG 8000GT", 1597 .name = "Prolink Pixelview MPEG 8000GT",
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index f1251b844e08..1c7fe6862a60 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -47,6 +47,7 @@
47#include "isl6421.h" 47#include "isl6421.h"
48#include "tuner-simple.h" 48#include "tuner-simple.h"
49#include "tda9887.h" 49#include "tda9887.h"
50#include "s5h1411.h"
50 51
51MODULE_DESCRIPTION("driver for cx2388x based DVB cards"); 52MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
52MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 53MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -463,6 +464,22 @@ static struct zl10353_config cx88_geniatech_x8000_mt = {
463 .no_tuner = 1, 464 .no_tuner = 1,
464}; 465};
465 466
467static struct s5h1411_config dvico_fusionhdtv7_config = {
468 .output_mode = S5H1411_SERIAL_OUTPUT,
469 .gpio = S5H1411_GPIO_ON,
470 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
471 .qam_if = S5H1411_IF_44000,
472 .vsb_if = S5H1411_IF_44000,
473 .inversion = S5H1411_INVERSION_OFF,
474 .status_mode = S5H1411_DEMODLOCKING
475};
476
477static struct xc5000_config dvico_fusionhdtv7_tuner_config = {
478 .i2c_address = 0xc2 >> 1,
479 .if_khz = 5380,
480 .tuner_callback = cx88_tuner_callback,
481};
482
466static int attach_xc3028(u8 addr, struct cx8802_dev *dev) 483static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
467{ 484{
468 struct dvb_frontend *fe; 485 struct dvb_frontend *fe;
@@ -844,6 +861,21 @@ static int dvb_register(struct cx8802_dev *dev)
844 if (attach_xc3028(0x61, dev) < 0) 861 if (attach_xc3028(0x61, dev) < 0)
845 return -EINVAL; 862 return -EINVAL;
846 break; 863 break;
864 case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
865 dev->dvb.frontend = dvb_attach(s5h1411_attach,
866 &dvico_fusionhdtv7_config,
867 &dev->core->i2c_adap);
868 if (dev->dvb.frontend != NULL) {
869 /* tuner_config.video_dev must point to
870 * i2c_adap.algo_data
871 */
872 dvico_fusionhdtv7_tuner_config.priv =
873 dev->core->i2c_adap.algo_data;
874 dvb_attach(xc5000_attach, dev->dvb.frontend,
875 &dev->core->i2c_adap,
876 &dvico_fusionhdtv7_tuner_config);
877 }
878 break;
847 default: 879 default:
848 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n", 880 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
849 dev->core->name); 881 dev->core->name);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index f8c41d8c74c4..5d837c16ee22 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -650,7 +650,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
650 650
651 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs, 651 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
652 GFP_KERNEL); 652 GFP_KERNEL);
653 if (!dev->isoc_ctl.urb) { 653 if (!dev->isoc_ctl.transfer_buffer) {
654 em28xx_errdev("cannot allocate memory for usbtransfer\n"); 654 em28xx_errdev("cannot allocate memory for usbtransfer\n");
655 kfree(dev->isoc_ctl.urb); 655 kfree(dev->isoc_ctl.urb);
656 return -ENOMEM; 656 return -ENOMEM;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 11c5fdedc23b..7b65f5e537f8 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -509,8 +509,11 @@ static int ir_probe(struct i2c_adapter *adap)
509 static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 }; 509 static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 };
510 static const int probe_cx23885[] = { 0x6b, -1 }; 510 static const int probe_cx23885[] = { 0x6b, -1 };
511 const int *probe; 511 const int *probe;
512 struct i2c_client *c; 512 struct i2c_msg msg = {
513 unsigned char buf; 513 .flags = I2C_M_RD,
514 .len = 0,
515 .buf = NULL,
516 };
514 int i, rc; 517 int i, rc;
515 518
516 switch (adap->id) { 519 switch (adap->id) {
@@ -536,23 +539,17 @@ static int ir_probe(struct i2c_adapter *adap)
536 return 0; 539 return 0;
537 } 540 }
538 541
539 c = kzalloc(sizeof(*c), GFP_KERNEL);
540 if (!c)
541 return -ENOMEM;
542
543 c->adapter = adap;
544 for (i = 0; -1 != probe[i]; i++) { 542 for (i = 0; -1 != probe[i]; i++) {
545 c->addr = probe[i]; 543 msg.addr = probe[i];
546 rc = i2c_master_recv(c, &buf, 0); 544 rc = i2c_transfer(adap, &msg, 1);
547 dprintk(1,"probe 0x%02x @ %s: %s\n", 545 dprintk(1,"probe 0x%02x @ %s: %s\n",
548 probe[i], adap->name, 546 probe[i], adap->name,
549 (0 == rc) ? "yes" : "no"); 547 (1 == rc) ? "yes" : "no");
550 if (0 == rc) { 548 if (1 == rc) {
551 ir_attach(adap, probe[i], 0, 0); 549 ir_attach(adap, probe[i], 0, 0);
552 break; 550 break;
553 } 551 }
554 } 552 }
555 kfree(c);
556 return 0; 553 return 0;
557} 554}
558 555
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index a8da90f69dd9..158b3d0c6532 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -64,6 +64,7 @@ config VIDEO_PVRUSB2_DVB
64 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL 64 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
65 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 65 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
66 select DVB_S5H1409 if !DVB_FE_CUSTOMISE 66 select DVB_S5H1409 if !DVB_FE_CUSTOMISE
67 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
67 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE 68 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
68 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE 69 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE
69 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 70 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 2dd06a90adce..3a141d93e1a9 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -36,6 +36,7 @@ pvr2_device_desc structures.
36#include "pvrusb2-hdw-internal.h" 36#include "pvrusb2-hdw-internal.h"
37#include "lgdt330x.h" 37#include "lgdt330x.h"
38#include "s5h1409.h" 38#include "s5h1409.h"
39#include "s5h1411.h"
39#include "tda10048.h" 40#include "tda10048.h"
40#include "tda18271.h" 41#include "tda18271.h"
41#include "tda8290.h" 42#include "tda8290.h"
@@ -368,6 +369,15 @@ static struct s5h1409_config pvr2_s5h1409_config = {
368 .status_mode = S5H1409_DEMODLOCKING, 369 .status_mode = S5H1409_DEMODLOCKING,
369}; 370};
370 371
372static struct s5h1411_config pvr2_s5h1411_config = {
373 .output_mode = S5H1411_PARALLEL_OUTPUT,
374 .gpio = S5H1411_GPIO_OFF,
375 .vsb_if = S5H1411_IF_44000,
376 .qam_if = S5H1411_IF_4000,
377 .inversion = S5H1411_INVERSION_ON,
378 .status_mode = S5H1411_DEMODLOCKING,
379};
380
371static struct tda18271_std_map hauppauge_tda18271_std_map = { 381static struct tda18271_std_map hauppauge_tda18271_std_map = {
372 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, 382 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
373 .if_lvl = 6, .rfagc_top = 0x37, }, 383 .if_lvl = 6, .rfagc_top = 0x37, },
@@ -390,6 +400,16 @@ static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
390 return -EIO; 400 return -EIO;
391} 401}
392 402
403static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
404{
405 adap->fe = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
406 &adap->channel.hdw->i2c_adap);
407 if (adap->fe)
408 return 0;
409
410 return -EIO;
411}
412
393static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap) 413static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
394{ 414{
395 dvb_attach(tda829x_attach, adap->fe, 415 dvb_attach(tda829x_attach, adap->fe,
@@ -406,6 +426,11 @@ struct pvr2_dvb_props pvr2_750xx_dvb_props = {
406 .frontend_attach = pvr2_s5h1409_attach, 426 .frontend_attach = pvr2_s5h1409_attach,
407 .tuner_attach = pvr2_tda18271_8295_attach, 427 .tuner_attach = pvr2_tda18271_8295_attach,
408}; 428};
429
430struct pvr2_dvb_props pvr2_751xx_dvb_props = {
431 .frontend_attach = pvr2_s5h1411_attach,
432 .tuner_attach = pvr2_tda18271_8295_attach,
433};
409#endif 434#endif
410 435
411static const char *pvr2_client_75xxx[] = { 436static const char *pvr2_client_75xxx[] = {
@@ -454,6 +479,9 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
454 .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE, 479 .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
455 .default_std_mask = V4L2_STD_NTSC_M, 480 .default_std_mask = V4L2_STD_NTSC_M,
456 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE, 481 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
482#ifdef CONFIG_VIDEO_PVRUSB2_DVB
483 .dvb_props = &pvr2_751xx_dvb_props,
484#endif
457}; 485};
458 486
459 487
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.h b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
index c2e2b06fe2e0..d016f8b6c70b 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
@@ -104,28 +104,28 @@ struct pvr2_device_desc {
104 unsigned char digital_control_scheme; 104 unsigned char digital_control_scheme;
105 105
106 /* If set, we don't bother trying to load cx23416 firmware. */ 106 /* If set, we don't bother trying to load cx23416 firmware. */
107 int flag_skip_cx23416_firmware:1; 107 unsigned int flag_skip_cx23416_firmware:1;
108 108
109 /* If set, the encoder must be healthy in order for digital mode to 109 /* If set, the encoder must be healthy in order for digital mode to
110 work (otherwise we assume that digital streaming will work even 110 work (otherwise we assume that digital streaming will work even
111 if we fail to locate firmware for the encoder). If the device 111 if we fail to locate firmware for the encoder). If the device
112 doesn't support digital streaming then this flag has no 112 doesn't support digital streaming then this flag has no
113 effect. */ 113 effect. */
114 int flag_digital_requires_cx23416:1; 114 unsigned int flag_digital_requires_cx23416:1;
115 115
116 /* Device has a hauppauge eeprom which we can interrogate. */ 116 /* Device has a hauppauge eeprom which we can interrogate. */
117 int flag_has_hauppauge_rom:1; 117 unsigned int flag_has_hauppauge_rom:1;
118 118
119 /* Device does not require a powerup command to be issued. */ 119 /* Device does not require a powerup command to be issued. */
120 int flag_no_powerup:1; 120 unsigned int flag_no_powerup:1;
121 121
122 /* Device has a cx25840 - this enables special additional logic to 122 /* Device has a cx25840 - this enables special additional logic to
123 handle it. */ 123 handle it. */
124 int flag_has_cx25840:1; 124 unsigned int flag_has_cx25840:1;
125 125
126 /* Device has a wm8775 - this enables special additional logic to 126 /* Device has a wm8775 - this enables special additional logic to
127 ensure that it is found. */ 127 ensure that it is found. */
128 int flag_has_wm8775:1; 128 unsigned int flag_has_wm8775:1;
129 129
130 /* Device has IR hardware that can be faked into looking like a 130 /* Device has IR hardware that can be faked into looking like a
131 normal Hauppauge i2c IR receiver. This is currently very 131 normal Hauppauge i2c IR receiver. This is currently very
@@ -135,15 +135,15 @@ struct pvr2_device_desc {
135 to virtualize the presence of the non-existant IR receiver chip and 135 to virtualize the presence of the non-existant IR receiver chip and
136 implement the virtual receiver in terms of appropriate FX2 136 implement the virtual receiver in terms of appropriate FX2
137 commands. */ 137 commands. */
138 int flag_has_hauppauge_custom_ir:1; 138 unsigned int flag_has_hauppauge_custom_ir:1;
139 139
140 /* These bits define which kinds of sources the device can handle. 140 /* These bits define which kinds of sources the device can handle.
141 Note: Digital tuner presence is inferred by the 141 Note: Digital tuner presence is inferred by the
142 digital_control_scheme enumeration. */ 142 digital_control_scheme enumeration. */
143 int flag_has_fmradio:1; /* Has FM radio receiver */ 143 unsigned int flag_has_fmradio:1; /* Has FM radio receiver */
144 int flag_has_analogtuner:1; /* Has analog tuner */ 144 unsigned int flag_has_analogtuner:1; /* Has analog tuner */
145 int flag_has_composite:1; /* Has composite input */ 145 unsigned int flag_has_composite:1; /* Has composite input */
146 int flag_has_svideo:1; /* Has s-video input */ 146 unsigned int flag_has_svideo:1; /* Has s-video input */
147}; 147};
148 148
149extern struct usb_device_id pvr2_device_table[]; 149extern struct usb_device_id pvr2_device_table[];
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 529e00952a8d..2b72e10e6b9f 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -369,19 +369,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
369 break; 369 break;
370 } 370 }
371 case TUNER_TEA5767: 371 case TUNER_TEA5767:
372 if (tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) { 372 if (!tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
373 t->type = TUNER_ABSENT; 373 goto attach_failed;
374 t->mode_mask = T_UNINITIALIZED;
375 return;
376 }
377 t->mode_mask = T_RADIO; 374 t->mode_mask = T_RADIO;
378 break; 375 break;
379 case TUNER_TEA5761: 376 case TUNER_TEA5761:
380 if (tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) { 377 if (!tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
381 t->type = TUNER_ABSENT; 378 goto attach_failed;
382 t->mode_mask = T_UNINITIALIZED;
383 return;
384 }
385 t->mode_mask = T_RADIO; 379 t->mode_mask = T_RADIO;
386 break; 380 break;
387 case TUNER_PHILIPS_FMD1216ME_MK3: 381 case TUNER_PHILIPS_FMD1216ME_MK3:
@@ -394,12 +388,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
394 buffer[2] = 0x86; 388 buffer[2] = 0x86;
395 buffer[3] = 0x54; 389 buffer[3] = 0x54;
396 i2c_master_send(c, buffer, 4); 390 i2c_master_send(c, buffer, 4);
397 if (simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr, 391 if (!simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr,
398 t->type) == NULL) { 392 t->type))
399 t->type = TUNER_ABSENT; 393 goto attach_failed;
400 t->mode_mask = T_UNINITIALIZED;
401 return;
402 }
403 break; 394 break;
404 case TUNER_PHILIPS_TD1316: 395 case TUNER_PHILIPS_TD1316:
405 buffer[0] = 0x0b; 396 buffer[0] = 0x0b;
@@ -407,12 +398,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
407 buffer[2] = 0x86; 398 buffer[2] = 0x86;
408 buffer[3] = 0xa4; 399 buffer[3] = 0xa4;
409 i2c_master_send(c,buffer,4); 400 i2c_master_send(c,buffer,4);
410 if (simple_tuner_attach(&t->fe, t->i2c->adapter, 401 if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
411 t->i2c->addr, t->type) == NULL) { 402 t->i2c->addr, t->type))
412 t->type = TUNER_ABSENT; 403 goto attach_failed;
413 t->mode_mask = T_UNINITIALIZED;
414 return;
415 }
416 break; 404 break;
417 case TUNER_XC2028: 405 case TUNER_XC2028:
418 { 406 {
@@ -421,40 +409,34 @@ static void set_type(struct i2c_client *c, unsigned int type,
421 .i2c_addr = t->i2c->addr, 409 .i2c_addr = t->i2c->addr,
422 .callback = t->tuner_callback, 410 .callback = t->tuner_callback,
423 }; 411 };
424 if (!xc2028_attach(&t->fe, &cfg)) { 412 if (!xc2028_attach(&t->fe, &cfg))
425 t->type = TUNER_ABSENT; 413 goto attach_failed;
426 t->mode_mask = T_UNINITIALIZED;
427 return;
428 }
429 break; 414 break;
430 } 415 }
431 case TUNER_TDA9887: 416 case TUNER_TDA9887:
432 tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr); 417 tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr);
433 break; 418 break;
434 case TUNER_XC5000: 419 case TUNER_XC5000:
420 {
421 struct dvb_tuner_ops *xc_tuner_ops;
422
435 xc5000_cfg.i2c_address = t->i2c->addr; 423 xc5000_cfg.i2c_address = t->i2c->addr;
436 xc5000_cfg.if_khz = 5380; 424 xc5000_cfg.if_khz = 5380;
437 xc5000_cfg.priv = c->adapter->algo_data; 425 xc5000_cfg.priv = c->adapter->algo_data;
438 xc5000_cfg.tuner_callback = t->tuner_callback; 426 xc5000_cfg.tuner_callback = t->tuner_callback;
439 if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg)) { 427 if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg))
440 t->type = TUNER_ABSENT; 428 goto attach_failed;
441 t->mode_mask = T_UNINITIALIZED; 429
442 return;
443 }
444 {
445 struct dvb_tuner_ops *xc_tuner_ops;
446 xc_tuner_ops = &t->fe.ops.tuner_ops; 430 xc_tuner_ops = &t->fe.ops.tuner_ops;
447 if(xc_tuner_ops->init != NULL) 431 if (xc_tuner_ops->init)
448 xc_tuner_ops->init(&t->fe); 432 xc_tuner_ops->init(&t->fe);
449 }
450 break; 433 break;
434 }
451 default: 435 default:
452 if (simple_tuner_attach(&t->fe, t->i2c->adapter, 436 if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
453 t->i2c->addr, t->type) == NULL) { 437 t->i2c->addr, t->type))
454 t->type = TUNER_ABSENT; 438 goto attach_failed;
455 t->mode_mask = T_UNINITIALIZED; 439
456 return;
457 }
458 break; 440 break;
459 } 441 }
460 442
@@ -476,11 +458,27 @@ static void set_type(struct i2c_client *c, unsigned int type,
476 if (t->mode_mask == T_UNINITIALIZED) 458 if (t->mode_mask == T_UNINITIALIZED)
477 t->mode_mask = new_mode_mask; 459 t->mode_mask = new_mode_mask;
478 460
479 set_freq(c, (V4L2_TUNER_RADIO == t->mode) ? t->radio_freq : t->tv_freq); 461 /* xc2028/3028 and xc5000 requires a firmware to be set-up later
462 trying to set a frequency here will just fail
463 FIXME: better to move set_freq to the tuner code. This is needed
464 on analog tuners for PLL to properly work
465 */
466 if (t->type != TUNER_XC2028 && t->type != TUNER_XC5000)
467 set_freq(c, (V4L2_TUNER_RADIO == t->mode) ?
468 t->radio_freq : t->tv_freq);
469
480 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", 470 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
481 c->adapter->name, c->driver->driver.name, c->addr << 1, type, 471 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
482 t->mode_mask); 472 t->mode_mask);
483 tuner_i2c_address_check(t); 473 tuner_i2c_address_check(t);
474 return;
475
476attach_failed:
477 tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
478 t->type = TUNER_ABSENT;
479 t->mode_mask = T_UNINITIALIZED;
480
481 return;
484} 482}
485 483
486/* 484/*
@@ -495,14 +493,16 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup)
495{ 493{
496 struct tuner *t = i2c_get_clientdata(c); 494 struct tuner *t = i2c_get_clientdata(c);
497 495
498 tuner_dbg("set addr for type %i\n", t->type);
499
500 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && 496 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
501 (t->mode_mask & tun_setup->mode_mask))) || 497 (t->mode_mask & tun_setup->mode_mask))) ||
502 (tun_setup->addr == c->addr)) { 498 (tun_setup->addr == c->addr)) {
503 set_type(c, tun_setup->type, tun_setup->mode_mask, 499 set_type(c, tun_setup->type, tun_setup->mode_mask,
504 tun_setup->config, tun_setup->tuner_callback); 500 tun_setup->config, tun_setup->tuner_callback);
505 } 501 } else
502 tuner_dbg("set addr discarded for type %i, mask %x. "
503 "Asked to change tuner at addr 0x%02x, with mask %x\n",
504 t->type, t->mode_mask,
505 tun_setup->addr, tun_setup->mode_mask);
506} 506}
507 507
508static inline int check_mode(struct tuner *t, char *cmd) 508static inline int check_mode(struct tuner *t, char *cmd)
diff --git a/drivers/media/video/tuner-xc2028.c b/drivers/media/video/tuner-xc2028.c
index cc3db7d79a0d..9e9003cffc7f 100644
--- a/drivers/media/video/tuner-xc2028.c
+++ b/drivers/media/video/tuner-xc2028.c
@@ -432,7 +432,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
432 432
433 type &= type_mask; 433 type &= type_mask;
434 434
435 if (!type & SCODE) 435 if (!(type & SCODE))
436 type_mask = ~0; 436 type_mask = ~0;
437 437
438 /* Seek for exact match */ 438 /* Seek for exact match */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index b1e9592acb90..845be1864f68 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -888,7 +888,7 @@ static int vivi_open(struct inode *inode, struct file *file)
888{ 888{
889 int minor = iminor(inode); 889 int minor = iminor(inode);
890 struct vivi_dev *dev; 890 struct vivi_dev *dev;
891 struct vivi_fh *fh; 891 struct vivi_fh *fh = NULL;
892 int i; 892 int i;
893 int retval = 0; 893 int retval = 0;
894 894
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index fafb57fed761..0736cff9d97a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -31,7 +31,6 @@
31static LIST_HEAD(container_list); 31static LIST_HEAD(container_list);
32static DEFINE_MUTEX(container_list_lock); 32static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class; 33static struct class enclosure_class;
34static struct class enclosure_component_class;
35 34
36/** 35/**
37 * enclosure_find - find an enclosure given a device 36 * enclosure_find - find an enclosure given a device
@@ -166,6 +165,40 @@ void enclosure_unregister(struct enclosure_device *edev)
166} 165}
167EXPORT_SYMBOL_GPL(enclosure_unregister); 166EXPORT_SYMBOL_GPL(enclosure_unregister);
168 167
168#define ENCLOSURE_NAME_SIZE 64
169
170static void enclosure_link_name(struct enclosure_component *cdev, char *name)
171{
172 strcpy(name, "enclosure_device:");
173 strcat(name, cdev->cdev.bus_id);
174}
175
176static void enclosure_remove_links(struct enclosure_component *cdev)
177{
178 char name[ENCLOSURE_NAME_SIZE];
179
180 enclosure_link_name(cdev, name);
181 sysfs_remove_link(&cdev->dev->kobj, name);
182 sysfs_remove_link(&cdev->cdev.kobj, "device");
183}
184
185static int enclosure_add_links(struct enclosure_component *cdev)
186{
187 int error;
188 char name[ENCLOSURE_NAME_SIZE];
189
190 error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
191 if (error)
192 return error;
193
194 enclosure_link_name(cdev, name);
195 error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
196 if (error)
197 sysfs_remove_link(&cdev->cdev.kobj, "device");
198
199 return error;
200}
201
169static void enclosure_release(struct device *cdev) 202static void enclosure_release(struct device *cdev)
170{ 203{
171 struct enclosure_device *edev = to_enclosure_device(cdev); 204 struct enclosure_device *edev = to_enclosure_device(cdev);
@@ -178,10 +211,15 @@ static void enclosure_component_release(struct device *dev)
178{ 211{
179 struct enclosure_component *cdev = to_enclosure_component(dev); 212 struct enclosure_component *cdev = to_enclosure_component(dev);
180 213
181 put_device(cdev->dev); 214 if (cdev->dev) {
215 enclosure_remove_links(cdev);
216 put_device(cdev->dev);
217 }
182 put_device(dev->parent); 218 put_device(dev->parent);
183} 219}
184 220
221static struct attribute_group *enclosure_groups[];
222
185/** 223/**
186 * enclosure_component_register - add a particular component to an enclosure 224 * enclosure_component_register - add a particular component to an enclosure
187 * @edev: the enclosure to add the component 225 * @edev: the enclosure to add the component
@@ -217,12 +255,14 @@ enclosure_component_register(struct enclosure_device *edev,
217 ecomp->number = number; 255 ecomp->number = number;
218 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
219 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
220 cdev->class = &enclosure_component_class;
221 if (name) 258 if (name)
222 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name); 259 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
223 else 260 else
224 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number); 261 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
225 262
263 cdev->release = enclosure_component_release;
264 cdev->groups = enclosure_groups;
265
226 err = device_register(cdev); 266 err = device_register(cdev);
227 if (err) 267 if (err)
228 ERR_PTR(err); 268 ERR_PTR(err);
@@ -255,10 +295,12 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
255 295
256 cdev = &edev->component[component]; 296 cdev = &edev->component[component];
257 297
258 device_del(&cdev->cdev); 298 if (cdev->dev)
299 enclosure_remove_links(cdev);
300
259 put_device(cdev->dev); 301 put_device(cdev->dev);
260 cdev->dev = get_device(dev); 302 cdev->dev = get_device(dev);
261 return device_add(&cdev->cdev); 303 return enclosure_add_links(cdev);
262} 304}
263EXPORT_SYMBOL_GPL(enclosure_add_device); 305EXPORT_SYMBOL_GPL(enclosure_add_device);
264 306
@@ -442,24 +484,32 @@ static ssize_t get_component_type(struct device *cdev,
442} 484}
443 485
444 486
445static struct device_attribute enclosure_component_attrs[] = { 487static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
446 __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, 488 set_component_fault);
447 set_component_fault), 489static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
448 __ATTR(status, S_IRUGO | S_IWUSR, get_component_status, 490 set_component_status);
449 set_component_status), 491static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
450 __ATTR(active, S_IRUGO | S_IWUSR, get_component_active, 492 set_component_active);
451 set_component_active), 493static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
452 __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, 494 set_component_locate);
453 set_component_locate), 495static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
454 __ATTR(type, S_IRUGO, get_component_type, NULL), 496
455 __ATTR_NULL 497static struct attribute *enclosure_component_attrs[] = {
498 &dev_attr_fault.attr,
499 &dev_attr_status.attr,
500 &dev_attr_active.attr,
501 &dev_attr_locate.attr,
502 &dev_attr_type.attr,
503 NULL
456}; 504};
457 505
458static struct class enclosure_component_class = { 506static struct attribute_group enclosure_group = {
459 .name = "enclosure_component", 507 .attrs = enclosure_component_attrs,
460 .owner = THIS_MODULE, 508};
461 .dev_attrs = enclosure_component_attrs, 509
462 .dev_release = enclosure_component_release, 510static struct attribute_group *enclosure_groups[] = {
511 &enclosure_group,
512 NULL
463}; 513};
464 514
465static int __init enclosure_init(void) 515static int __init enclosure_init(void)
@@ -469,20 +519,12 @@ static int __init enclosure_init(void)
469 err = class_register(&enclosure_class); 519 err = class_register(&enclosure_class);
470 if (err) 520 if (err)
471 return err; 521 return err;
472 err = class_register(&enclosure_component_class);
473 if (err)
474 goto err_out;
475 522
476 return 0; 523 return 0;
477 err_out:
478 class_unregister(&enclosure_class);
479
480 return err;
481} 524}
482 525
483static void __exit enclosure_exit(void) 526static void __exit enclosure_exit(void)
484{ 527{
485 class_unregister(&enclosure_component_class);
486 class_unregister(&enclosure_class); 528 class_unregister(&enclosure_class);
487} 529}
488 530
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0697aa8ea774..8082c1d142df 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT
2011 2011
2012config E1000E 2012config E1000E
2013 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 2013 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2014 depends on PCI 2014 depends on PCI && (!SPARC32 || BROKEN)
2015 ---help--- 2015 ---help---
2016 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 2016 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2017 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 2017 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 75ef9d0d974d..f9d6b4dca180 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
196 } 196 }
197} 197}
198EXPORT_SYMBOL_GPL(mlx4_buf_free); 198EXPORT_SYMBOL_GPL(mlx4_buf_free);
199
200static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
201{
202 struct mlx4_db_pgdir *pgdir;
203
204 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
205 if (!pgdir)
206 return NULL;
207
208 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
209 pgdir->bits[0] = pgdir->order0;
210 pgdir->bits[1] = pgdir->order1;
211 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
212 &pgdir->db_dma, GFP_KERNEL);
213 if (!pgdir->db_page) {
214 kfree(pgdir);
215 return NULL;
216 }
217
218 return pgdir;
219}
220
221static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
222 struct mlx4_db *db, int order)
223{
224 int o;
225 int i;
226
227 for (o = order; o <= 1; ++o) {
228 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
229 if (i < MLX4_DB_PER_PAGE >> o)
230 goto found;
231 }
232
233 return -ENOMEM;
234
235found:
236 clear_bit(i, pgdir->bits[o]);
237
238 i <<= o;
239
240 if (o > order)
241 set_bit(i ^ 1, pgdir->bits[order]);
242
243 db->u.pgdir = pgdir;
244 db->index = i;
245 db->db = pgdir->db_page + db->index;
246 db->dma = pgdir->db_dma + db->index * 4;
247 db->order = order;
248
249 return 0;
250}
251
252int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
253{
254 struct mlx4_priv *priv = mlx4_priv(dev);
255 struct mlx4_db_pgdir *pgdir;
256 int ret = 0;
257
258 mutex_lock(&priv->pgdir_mutex);
259
260 list_for_each_entry(pgdir, &priv->pgdir_list, list)
261 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
262 goto out;
263
264 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
265 if (!pgdir) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
270 list_add(&pgdir->list, &priv->pgdir_list);
271
272 /* This should never fail -- we just allocated an empty page: */
273 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
274
275out:
276 mutex_unlock(&priv->pgdir_mutex);
277
278 return ret;
279}
280EXPORT_SYMBOL_GPL(mlx4_db_alloc);
281
282void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
283{
284 struct mlx4_priv *priv = mlx4_priv(dev);
285 int o;
286 int i;
287
288 mutex_lock(&priv->pgdir_mutex);
289
290 o = db->order;
291 i = db->index;
292
293 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
294 clear_bit(i ^ 1, db->u.pgdir->order0);
295 ++o;
296 }
297 i >>= o;
298 set_bit(i, db->u.pgdir->bits[o]);
299
300 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
301 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
302 db->u.pgdir->db_page, db->u.pgdir->db_dma);
303 list_del(&db->u.pgdir->list);
304 kfree(db->u.pgdir);
305 }
306
307 mutex_unlock(&priv->pgdir_mutex);
308}
309EXPORT_SYMBOL_GPL(mlx4_db_free);
310
311int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
312 int size, int max_direct)
313{
314 int err;
315
316 err = mlx4_db_alloc(dev, &wqres->db, 1);
317 if (err)
318 return err;
319
320 *wqres->db.db = 0;
321
322 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
323 if (err)
324 goto err_db;
325
326 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
327 &wqres->mtt);
328 if (err)
329 goto err_buf;
330
331 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
332 if (err)
333 goto err_mtt;
334
335 return 0;
336
337err_mtt:
338 mlx4_mtt_cleanup(dev, &wqres->mtt);
339err_buf:
340 mlx4_buf_free(dev, size, &wqres->buf);
341err_db:
342 mlx4_db_free(dev, &wqres->db);
343
344 return err;
345}
346EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
347
348void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
349 int size)
350{
351 mlx4_mtt_cleanup(dev, &wqres->mtt);
352 mlx4_buf_free(dev, size, &wqres->buf);
353 mlx4_db_free(dev, &wqres->db);
354}
355EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index caa5bcf54e35..6fda0af9d0a6 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
180 cq_context->mtt_base_addr_h = mtt_addr >> 32; 180 cq_context->mtt_base_addr_h = mtt_addr >> 32;
181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
182 182
183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); 183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
184 184
185 mlx4_free_cmd_mailbox(dev, mailbox); 185 mlx4_free_cmd_mailbox(dev, mailbox);
186 return err; 186 return err;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 49a4acab5e82..a6aa49fc1d68 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
798 INIT_LIST_HEAD(&priv->ctx_list); 798 INIT_LIST_HEAD(&priv->ctx_list);
799 spin_lock_init(&priv->ctx_lock); 799 spin_lock_init(&priv->ctx_lock);
800 800
801 INIT_LIST_HEAD(&priv->pgdir_list);
802 mutex_init(&priv->pgdir_mutex);
803
801 /* 804 /*
802 * Now reset the HCA before we touch the PCI capabilities or 805 * Now reset the HCA before we touch the PCI capabilities or
803 * attempt a firmware command, since a boot ROM may have left 806 * attempt a firmware command, since a boot ROM may have left
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 73336810e652..a4023c2dd050 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -257,6 +257,9 @@ struct mlx4_priv {
257 struct list_head ctx_list; 257 struct list_head ctx_list;
258 spinlock_t ctx_lock; 258 spinlock_t ctx_lock;
259 259
260 struct list_head pgdir_list;
261 struct mutex pgdir_mutex;
262
260 struct mlx4_fw fw; 263 struct mlx4_fw fw;
261 struct mlx4_cmd cmd; 264 struct mlx4_cmd cmd;
262 265
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index fa24e6597591..ee5484c44a18 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
299} 299}
300EXPORT_SYMBOL_GPL(mlx4_qp_query); 300EXPORT_SYMBOL_GPL(mlx4_qp_query);
301 301
302int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
303 struct mlx4_qp_context *context,
304 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
305{
306 int err;
307 int i;
308 enum mlx4_qp_state states[] = {
309 MLX4_QP_STATE_RST,
310 MLX4_QP_STATE_INIT,
311 MLX4_QP_STATE_RTR,
312 MLX4_QP_STATE_RTS
313 };
314
315 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
316 context->flags &= cpu_to_be32(~(0xf << 28));
317 context->flags |= cpu_to_be32(states[i + 1] << 28);
318 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
319 context, 0, 0, qp);
320 if (err) {
321 mlx4_err(dev, "Failed to bring QP to state: "
322 "%d with error: %d\n",
323 states[i + 1], err);
324 return err;
325 }
326
327 *qp_state = states[i + 1];
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 5a888704a8d0..4f4e7cf105d4 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -5,7 +5,7 @@
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6 6
7obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
8obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
9 9
10drivers-y += drivers/s390/built-in.o 10drivers-y += drivers/s390/built-in.o
11 11
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
new file mode 100644
index 000000000000..4a5ec39f9ca6
--- /dev/null
+++ b/drivers/s390/kvm/Makefile
@@ -0,0 +1,9 @@
1# Makefile for kvm guest drivers on s390
2#
3# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8
9obj-$(CONFIG_VIRTIO) += kvm_virtio.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
new file mode 100644
index 000000000000..bbef3764fbf8
--- /dev/null
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -0,0 +1,338 @@
1/*
2 * kvm_virtio.c - virtio for kvm on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/err.h>
16#include <linux/virtio.h>
17#include <linux/virtio_config.h>
18#include <linux/interrupt.h>
19#include <linux/virtio_ring.h>
20#include <asm/io.h>
21#include <asm/kvm_para.h>
22#include <asm/kvm_virtio.h>
23#include <asm/setup.h>
24#include <asm/s390_ext.h>
25
26#define VIRTIO_SUBCODE_64 0x0D00
27
28/*
29 * The pointer to our (page) of device descriptions.
30 */
31static void *kvm_devices;
32
33/*
34 * Unique numbering for kvm devices.
35 */
36static unsigned int dev_index;
37
38struct kvm_device {
39 struct virtio_device vdev;
40 struct kvm_device_desc *desc;
41};
42
43#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
44
45/*
46 * memory layout:
47 * - kvm_device_descriptor
48 * struct kvm_device_desc
49 * - configuration
50 * struct kvm_vqconfig
51 * - feature bits
52 * - config space
53 */
54static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
55{
56 return (struct kvm_vqconfig *)(desc + 1);
57}
58
59static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
60{
61 return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
62}
63
64static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
65{
66 return kvm_vq_features(desc) + desc->feature_len * 2;
67}
68
69/*
70 * The total size of the config page used by this device (incl. desc)
71 */
72static unsigned desc_size(const struct kvm_device_desc *desc)
73{
74 return sizeof(*desc)
75 + desc->num_vq * sizeof(struct kvm_vqconfig)
76 + desc->feature_len * 2
77 + desc->config_len;
78}
79
80/*
81 * This tests (and acknowleges) a feature bit.
82 */
83static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
84{
85 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
86 u8 *features;
87
88 if (fbit / 8 > desc->feature_len)
89 return false;
90
91 features = kvm_vq_features(desc);
92 if (!(features[fbit / 8] & (1 << (fbit % 8))))
93 return false;
94
95 /*
96 * We set the matching bit in the other half of the bitmap to tell the
97 * Host we want to use this feature.
98 */
99 features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
100 return true;
101}
102
103/*
104 * Reading and writing elements in config space
105 */
106static void kvm_get(struct virtio_device *vdev, unsigned int offset,
107 void *buf, unsigned len)
108{
109 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
110
111 BUG_ON(offset + len > desc->config_len);
112 memcpy(buf, kvm_vq_configspace(desc) + offset, len);
113}
114
115static void kvm_set(struct virtio_device *vdev, unsigned int offset,
116 const void *buf, unsigned len)
117{
118 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
119
120 BUG_ON(offset + len > desc->config_len);
121 memcpy(kvm_vq_configspace(desc) + offset, buf, len);
122}
123
124/*
125 * The operations to get and set the status word just access
126 * the status field of the device descriptor. set_status will also
127 * make a hypercall to the host, to tell about status changes
128 */
129static u8 kvm_get_status(struct virtio_device *vdev)
130{
131 return to_kvmdev(vdev)->desc->status;
132}
133
134static void kvm_set_status(struct virtio_device *vdev, u8 status)
135{
136 BUG_ON(!status);
137 to_kvmdev(vdev)->desc->status = status;
138 kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
139 (unsigned long) to_kvmdev(vdev)->desc);
140}
141
142/*
143 * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
144 * descriptor address. The Host will zero the status and all the
145 * features.
146 */
147static void kvm_reset(struct virtio_device *vdev)
148{
149 kvm_hypercall1(KVM_S390_VIRTIO_RESET,
150 (unsigned long) to_kvmdev(vdev)->desc);
151}
152
153/*
154 * When the virtio_ring code wants to notify the Host, it calls us here and we
155 * make a hypercall. We hand the address of the virtqueue so the Host
156 * knows which virtqueue we're talking about.
157 */
158static void kvm_notify(struct virtqueue *vq)
159{
160 struct kvm_vqconfig *config = vq->priv;
161
162 kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
163}
164
165/*
166 * This routine finds the first virtqueue described in the configuration of
167 * this device and sets it up.
168 */
169static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
170 unsigned index,
171 void (*callback)(struct virtqueue *vq))
172{
173 struct kvm_device *kdev = to_kvmdev(vdev);
174 struct kvm_vqconfig *config;
175 struct virtqueue *vq;
176 int err;
177
178 if (index >= kdev->desc->num_vq)
179 return ERR_PTR(-ENOENT);
180
181 config = kvm_vq_config(kdev->desc)+index;
182
183 if (add_shared_memory(config->address,
184 vring_size(config->num, PAGE_SIZE))) {
185 err = -ENOMEM;
186 goto out;
187 }
188
189 vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
190 kvm_notify, callback);
191 if (!vq) {
192 err = -ENOMEM;
193 goto unmap;
194 }
195
196 /*
197 * register a callback token
198 * The host will sent this via the external interrupt parameter
199 */
200 config->token = (u64) vq;
201
202 vq->priv = config;
203 return vq;
204unmap:
205 remove_shared_memory(config->address, vring_size(config->num,
206 PAGE_SIZE));
207out:
208 return ERR_PTR(err);
209}
210
211static void kvm_del_vq(struct virtqueue *vq)
212{
213 struct kvm_vqconfig *config = vq->priv;
214
215 vring_del_virtqueue(vq);
216 remove_shared_memory(config->address,
217 vring_size(config->num, PAGE_SIZE));
218}
219
220/*
221 * The config ops structure as defined by virtio config
222 */
223static struct virtio_config_ops kvm_vq_configspace_ops = {
224 .feature = kvm_feature,
225 .get = kvm_get,
226 .set = kvm_set,
227 .get_status = kvm_get_status,
228 .set_status = kvm_set_status,
229 .reset = kvm_reset,
230 .find_vq = kvm_find_vq,
231 .del_vq = kvm_del_vq,
232};
233
234/*
235 * The root device for the kvm virtio devices.
236 * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
237 */
238static struct device kvm_root = {
239 .parent = NULL,
240 .bus_id = "kvm_s390",
241};
242
243/*
244 * adds a new device and register it with virtio
245 * appropriate drivers are loaded by the device model
246 */
247static void add_kvm_device(struct kvm_device_desc *d)
248{
249 struct kvm_device *kdev;
250
251 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
252 if (!kdev) {
253 printk(KERN_EMERG "Cannot allocate kvm dev %u\n",
254 dev_index++);
255 return;
256 }
257
258 kdev->vdev.dev.parent = &kvm_root;
259 kdev->vdev.index = dev_index++;
260 kdev->vdev.id.device = d->type;
261 kdev->vdev.config = &kvm_vq_configspace_ops;
262 kdev->desc = d;
263
264 if (register_virtio_device(&kdev->vdev) != 0) {
265 printk(KERN_ERR "Failed to register kvm device %u\n",
266 kdev->vdev.index);
267 kfree(kdev);
268 }
269}
270
271/*
272 * scan_devices() simply iterates through the device page.
273 * The type 0 is reserved to mean "end of devices".
274 */
275static void scan_devices(void)
276{
277 unsigned int i;
278 struct kvm_device_desc *d;
279
280 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
281 d = kvm_devices + i;
282
283 if (d->type == 0)
284 break;
285
286 add_kvm_device(d);
287 }
288}
289
290/*
291 * we emulate the request_irq behaviour on top of s390 extints
292 */
293static void kvm_extint_handler(u16 code)
294{
295 void *data = (void *) *(long *) __LC_PFAULT_INTPARM;
296 u16 subcode = S390_lowcore.cpu_addr;
297
298 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
299 return;
300
301 vring_interrupt(0, data);
302}
303
304/*
305 * Init function for virtio
306 * devices are in a single page above top of "normal" mem
307 */
308static int __init kvm_devices_init(void)
309{
310 int rc;
311
312 if (!MACHINE_IS_KVM)
313 return -ENODEV;
314
315 rc = device_register(&kvm_root);
316 if (rc) {
317 printk(KERN_ERR "Could not register kvm_s390 root device");
318 return rc;
319 }
320
321 if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) {
322 device_unregister(&kvm_root);
323 return -ENOMEM;
324 }
325
326 kvm_devices = (void *) (max_pfn << PAGE_SHIFT);
327
328 ctl_set_bit(0, 9);
329 register_external_interrupt(0x2603, kvm_extint_handler);
330
331 scan_devices();
332 return 0;
333}
334
335/*
336 * We do this after core stuff, but before the drivers.
337 */
338postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 7c3f02816e95..9af2330f07a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1927,7 +1927,8 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1927 1927
1928 /* setup new FSF request */ 1928 /* setup new FSF request */
1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1930 0, NULL, &lock_flags, &fsf_req); 1930 ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
1931 &fsf_req);
1931 if (retval) { 1932 if (retval) {
1932 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1933 ZFCP_LOG_INFO("error: Could not create exchange configuration "
1933 "data request for adapter %s.\n", 1934 "data request for adapter %s.\n",
@@ -2035,21 +2036,21 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2035 min(FC_SERIAL_NUMBER_SIZE, 17)); 2036 min(FC_SERIAL_NUMBER_SIZE, 17));
2036 } 2037 }
2037 2038
2038 ZFCP_LOG_NORMAL("The adapter %s reported the following " 2039 if (fsf_req->erp_action)
2039 "characteristics:\n" 2040 ZFCP_LOG_NORMAL("The adapter %s reported the following "
2040 "WWNN 0x%016Lx, " 2041 "characteristics:\n"
2041 "WWPN 0x%016Lx, " 2042 "WWNN 0x%016Lx, WWPN 0x%016Lx, "
2042 "S_ID 0x%06x,\n" 2043 "S_ID 0x%06x,\n"
2043 "adapter version 0x%x, " 2044 "adapter version 0x%x, "
2044 "LIC version 0x%x, " 2045 "LIC version 0x%x, "
2045 "FC link speed %d Gb/s\n", 2046 "FC link speed %d Gb/s\n",
2046 zfcp_get_busid_by_adapter(adapter), 2047 zfcp_get_busid_by_adapter(adapter),
2047 (wwn_t) fc_host_node_name(shost), 2048 (wwn_t) fc_host_node_name(shost),
2048 (wwn_t) fc_host_port_name(shost), 2049 (wwn_t) fc_host_port_name(shost),
2049 fc_host_port_id(shost), 2050 fc_host_port_id(shost),
2050 adapter->hydra_version, 2051 adapter->hydra_version,
2051 adapter->fsf_lic_version, 2052 adapter->fsf_lic_version,
2052 fc_host_speed(shost)); 2053 fc_host_speed(shost));
2053 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { 2054 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2054 ZFCP_LOG_NORMAL("error: the adapter %s " 2055 ZFCP_LOG_NORMAL("error: the adapter %s "
2055 "only supports newer control block " 2056 "only supports newer control block "
@@ -2114,8 +2115,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2114 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req); 2115 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
2115 return -EIO; 2116 return -EIO;
2116 case FC_PORTTYPE_NPORT: 2117 case FC_PORTTYPE_NPORT:
2117 ZFCP_LOG_NORMAL("Switched fabric fibrechannel " 2118 if (fsf_req->erp_action)
2118 "network detected at adapter %s.\n", 2119 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2120 "network detected at adapter "
2121 "%s.\n",
2119 zfcp_get_busid_by_adapter(adapter)); 2122 zfcp_get_busid_by_adapter(adapter));
2120 break; 2123 break;
2121 default: 2124 default:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8cce5cc11d50..099970b27001 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -213,6 +213,7 @@
213#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 213#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
214#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 214#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
215#define FSF_FEATURE_UPDATE_ALERT 0x00000100 215#define FSF_FEATURE_UPDATE_ALERT 0x00000100
216#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
216 217
217/* host connection features */ 218/* host connection features */
218#define FSF_FEATURE_NPIV_MODE 0x00000001 219#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -340,6 +341,15 @@ struct fsf_qtcb_prefix {
340 u8 res1[20]; 341 u8 res1[20];
341} __attribute__ ((packed)); 342} __attribute__ ((packed));
342 343
344struct fsf_statistics_info {
345 u64 input_req;
346 u64 output_req;
347 u64 control_req;
348 u64 input_mb;
349 u64 output_mb;
350 u64 seconds_act;
351} __attribute__ ((packed));
352
343union fsf_status_qual { 353union fsf_status_qual {
344 u8 byte[FSF_STATUS_QUALIFIER_SIZE]; 354 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
345 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)]; 355 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
@@ -436,7 +446,8 @@ struct fsf_qtcb_bottom_config {
436 u32 hardware_version; 446 u32 hardware_version;
437 u8 serial_number[32]; 447 u8 serial_number[32];
438 struct fsf_nport_serv_param plogi_payload; 448 struct fsf_nport_serv_param plogi_payload;
439 u8 res4[160]; 449 struct fsf_statistics_info stat_info;
450 u8 res4[112];
440} __attribute__ ((packed)); 451} __attribute__ ((packed));
441 452
442struct fsf_qtcb_bottom_port { 453struct fsf_qtcb_bottom_port {
@@ -469,7 +480,10 @@ struct fsf_qtcb_bottom_port {
469 u64 control_requests; 480 u64 control_requests;
470 u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */ 481 u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
471 u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */ 482 u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
472 u8 res2[256]; 483 u8 cp_util;
484 u8 cb_util;
485 u8 a_util;
486 u8 res2[253];
473} __attribute__ ((packed)); 487} __attribute__ ((packed));
474 488
475union fsf_qtcb_bottom { 489union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f81850624eed..01687559dc06 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -40,6 +40,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
40 unsigned int, unsigned int); 40 unsigned int, unsigned int);
41 41
42static struct device_attribute *zfcp_sysfs_sdev_attrs[]; 42static struct device_attribute *zfcp_sysfs_sdev_attrs[];
43static struct device_attribute *zfcp_a_stats_attrs[];
43 44
44struct zfcp_data zfcp_data = { 45struct zfcp_data zfcp_data = {
45 .scsi_host_template = { 46 .scsi_host_template = {
@@ -61,6 +62,7 @@ struct zfcp_data zfcp_data = {
61 .use_clustering = 1, 62 .use_clustering = 1,
62 .sdev_attrs = zfcp_sysfs_sdev_attrs, 63 .sdev_attrs = zfcp_sysfs_sdev_attrs,
63 .max_sectors = ZFCP_MAX_SECTORS, 64 .max_sectors = ZFCP_MAX_SECTORS,
65 .shost_attrs = zfcp_a_stats_attrs,
64 }, 66 },
65 .driver_version = ZFCP_VERSION, 67 .driver_version = ZFCP_VERSION,
66}; 68};
@@ -809,4 +811,116 @@ static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
809 NULL 811 NULL
810}; 812};
811 813
814static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
815 struct device_attribute *attr,
816 char *buf)
817{
818 struct Scsi_Host *scsi_host = dev_to_shost(dev);
819 struct fsf_qtcb_bottom_port *qtcb_port;
820 int retval;
821 struct zfcp_adapter *adapter;
822
823 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
824 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
825 return -EOPNOTSUPP;
826
827 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
828 if (!qtcb_port)
829 return -ENOMEM;
830
831 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
832 if (!retval)
833 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
834 qtcb_port->cb_util, qtcb_port->a_util);
835 kfree(qtcb_port);
836 return retval;
837}
838
839static int zfcp_sysfs_adapter_ex_config(struct device *dev,
840 struct fsf_statistics_info *stat_inf)
841{
842 int retval;
843 struct fsf_qtcb_bottom_config *qtcb_config;
844 struct Scsi_Host *scsi_host = dev_to_shost(dev);
845 struct zfcp_adapter *adapter;
846
847 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
848 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
849 return -EOPNOTSUPP;
850
851 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
852 GFP_KERNEL);
853 if (!qtcb_config)
854 return -ENOMEM;
855
856 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
857 if (!retval)
858 *stat_inf = qtcb_config->stat_info;
859
860 kfree(qtcb_config);
861 return retval;
862}
863
864static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
865 struct device_attribute *attr,
866 char *buf)
867{
868 struct fsf_statistics_info stat_info;
869 int retval;
870
871 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
872 if (retval)
873 return retval;
874
875 return sprintf(buf, "%llu %llu %llu\n",
876 (unsigned long long) stat_info.input_req,
877 (unsigned long long) stat_info.output_req,
878 (unsigned long long) stat_info.control_req);
879}
880
881static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 struct fsf_statistics_info stat_info;
886 int retval;
887
888 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
889 if (retval)
890 return retval;
891
892 return sprintf(buf, "%llu %llu\n",
893 (unsigned long long) stat_info.input_mb,
894 (unsigned long long) stat_info.output_mb);
895}
896
897static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
898 struct device_attribute *attr,
899 char *buf)
900{
901 struct fsf_statistics_info stat_info;
902 int retval;
903
904 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
905 if (retval)
906 return retval;
907
908 return sprintf(buf, "%llu\n",
909 (unsigned long long) stat_info.seconds_act);
910}
911
912static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
913static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
914static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
915static DEVICE_ATTR(seconds_active, S_IRUGO,
916 zfcp_sysfs_adapter_sec_active_show, NULL);
917
918static struct device_attribute *zfcp_a_stats_attrs[] = {
919 &dev_attr_utilization,
920 &dev_attr_requests,
921 &dev_attr_megabytes,
922 &dev_attr_seconds_active,
923 NULL
924};
925
812#undef ZFCP_LOG_AREA 926#undef ZFCP_LOG_AREA
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b374e457e5e2..b898d382b7b0 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1499,7 +1499,7 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
1499 thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; 1499 thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
1500 ioport = ((struct sccb_card *)pCurrCard)->ioPort; 1500 ioport = ((struct sccb_card *)pCurrCard)->ioPort;
1501 1501
1502 if ((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN)) { 1502 if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
1503 1503
1504 p_Sccb->HostStatus = SCCB_COMPLETE; 1504 p_Sccb->HostStatus = SCCB_COMPLETE;
1505 p_Sccb->SccbStatus = SCCB_ERROR; 1505 p_Sccb->SccbStatus = SCCB_ERROR;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7f78e3ea517d..99c57b0c1d54 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1677,6 +1677,16 @@ config MAC_SCSI
1677 SCSI-HOWTO, available from 1677 SCSI-HOWTO, available from
1678 <http://www.tldp.org/docs.html#howto>. 1678 <http://www.tldp.org/docs.html#howto>.
1679 1679
1680config SCSI_MAC_ESP
1681 tristate "Macintosh NCR53c9[46] SCSI"
1682 depends on MAC && SCSI
1683 help
1684 This is the NCR 53c9x SCSI controller found on most of the 68040
1685 based Macintoshes.
1686
1687 To compile this driver as a module, choose M here: the module
1688 will be called mac_esp.
1689
1680config MVME147_SCSI 1690config MVME147_SCSI
1681 bool "WD33C93 SCSI driver for MVME147" 1691 bool "WD33C93 SCSI driver for MVME147"
1682 depends on MVME147 && SCSI=y 1692 depends on MVME147 && SCSI=y
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 23e6ecbd4778..6c775e350c98 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o 46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
49obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o
49obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 50obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
50obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o 51obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
51obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o 52obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 6ccdc96cc480..a09b2d3fdf5a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1432,15 +1432,10 @@ static void run(struct work_struct *work)
1432 */ 1432 */
1433static irqreturn_t intr(int irqno, void *dev_id) 1433static irqreturn_t intr(int irqno, void *dev_id)
1434{ 1434{
1435 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id; 1435 struct Scsi_Host *shpnt = dev_id;
1436 unsigned long flags; 1436 unsigned long flags;
1437 unsigned char rev, dmacntrl0; 1437 unsigned char rev, dmacntrl0;
1438 1438
1439 if (!shpnt) {
1440 printk(KERN_ERR "aha152x: catched interrupt %d for unknown controller.\n", irqno);
1441 return IRQ_NONE;
1442 }
1443
1444 /* 1439 /*
1445 * Read a couple of registers that are known to not be all 1's. If 1440 * Read a couple of registers that are known to not be all 1's. If
1446 * we read all 1's (-1), that means that either: 1441 * we read all 1's (-1), that means that either:
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5a1471c370fa..80594947c6f6 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -153,8 +153,6 @@ struct aha1542_hostdata {
153 153
154#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata) 154#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
155 155
156static struct Scsi_Host *aha_host[7]; /* One for each IRQ level (9-15) */
157
158static DEFINE_SPINLOCK(aha1542_lock); 156static DEFINE_SPINLOCK(aha1542_lock);
159 157
160 158
@@ -163,8 +161,7 @@ static DEFINE_SPINLOCK(aha1542_lock);
163 161
164static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt); 162static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
165static int aha1542_restart(struct Scsi_Host *shost); 163static int aha1542_restart(struct Scsi_Host *shost);
166static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id); 164static void aha1542_intr_handle(struct Scsi_Host *shost);
167static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id);
168 165
169#define aha1542_intr_reset(base) outb(IRST, CONTROL(base)) 166#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
170 167
@@ -404,23 +401,19 @@ fail:
404} 401}
405 402
406/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */ 403/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
407static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id) 404static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
408{ 405{
409 unsigned long flags; 406 unsigned long flags;
410 struct Scsi_Host *shost; 407 struct Scsi_Host *shost = dev_id;
411
412 shost = aha_host[irq - 9];
413 if (!shost)
414 panic("Splunge!");
415 408
416 spin_lock_irqsave(shost->host_lock, flags); 409 spin_lock_irqsave(shost->host_lock, flags);
417 aha1542_intr_handle(shost, dev_id); 410 aha1542_intr_handle(shost);
418 spin_unlock_irqrestore(shost->host_lock, flags); 411 spin_unlock_irqrestore(shost->host_lock, flags);
419 return IRQ_HANDLED; 412 return IRQ_HANDLED;
420} 413}
421 414
422/* A "high" level interrupt handler */ 415/* A "high" level interrupt handler */
423static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id) 416static void aha1542_intr_handle(struct Scsi_Host *shost)
424{ 417{
425 void (*my_done) (Scsi_Cmnd *) = NULL; 418 void (*my_done) (Scsi_Cmnd *) = NULL;
426 int errstatus, mbi, mbo, mbistatus; 419 int errstatus, mbi, mbo, mbistatus;
@@ -1197,7 +1190,8 @@ fail:
1197 1190
1198 DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level)); 1191 DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
1199 spin_lock_irqsave(&aha1542_lock, flags); 1192 spin_lock_irqsave(&aha1542_lock, flags);
1200 if (request_irq(irq_level, do_aha1542_intr_handle, 0, "aha1542", NULL)) { 1193 if (request_irq(irq_level, do_aha1542_intr_handle, 0,
1194 "aha1542", shpnt)) {
1201 printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n"); 1195 printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
1202 spin_unlock_irqrestore(&aha1542_lock, flags); 1196 spin_unlock_irqrestore(&aha1542_lock, flags);
1203 goto unregister; 1197 goto unregister;
@@ -1205,7 +1199,7 @@ fail:
1205 if (dma_chan != 0xFF) { 1199 if (dma_chan != 0xFF) {
1206 if (request_dma(dma_chan, "aha1542")) { 1200 if (request_dma(dma_chan, "aha1542")) {
1207 printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n"); 1201 printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
1208 free_irq(irq_level, NULL); 1202 free_irq(irq_level, shpnt);
1209 spin_unlock_irqrestore(&aha1542_lock, flags); 1203 spin_unlock_irqrestore(&aha1542_lock, flags);
1210 goto unregister; 1204 goto unregister;
1211 } 1205 }
@@ -1214,7 +1208,7 @@ fail:
1214 enable_dma(dma_chan); 1208 enable_dma(dma_chan);
1215 } 1209 }
1216 } 1210 }
1217 aha_host[irq_level - 9] = shpnt; 1211
1218 shpnt->this_id = scsi_id; 1212 shpnt->this_id = scsi_id;
1219 shpnt->unique_id = base_io; 1213 shpnt->unique_id = base_io;
1220 shpnt->io_port = base_io; 1214 shpnt->io_port = base_io;
@@ -1276,7 +1270,7 @@ unregister:
1276static int aha1542_release(struct Scsi_Host *shost) 1270static int aha1542_release(struct Scsi_Host *shost)
1277{ 1271{
1278 if (shost->irq) 1272 if (shost->irq)
1279 free_irq(shost->irq, NULL); 1273 free_irq(shost->irq, shost);
1280 if (shost->dma_channel != 0xff) 1274 if (shost->dma_channel != 0xff)
1281 free_dma(shost->dma_channel); 1275 free_dma(shost->dma_channel);
1282 if (shost->io_port && shost->n_io_port) 1276 if (shost->io_port && shost->n_io_port)
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 2f00467b6b8c..be5558ab84ea 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -815,7 +815,7 @@ struct ahd_tmode_tstate {
815struct ahd_phase_table_entry { 815struct ahd_phase_table_entry {
816 uint8_t phase; 816 uint8_t phase;
817 uint8_t mesg_out; /* Message response to parity errors */ 817 uint8_t mesg_out; /* Message response to parity errors */
818 char *phasemsg; 818 const char *phasemsg;
819}; 819};
820 820
821/************************** Serial EEPROM Format ******************************/ 821/************************** Serial EEPROM Format ******************************/
@@ -1314,7 +1314,7 @@ typedef int (ahd_device_setup_t)(struct ahd_softc *);
1314struct ahd_pci_identity { 1314struct ahd_pci_identity {
1315 uint64_t full_id; 1315 uint64_t full_id;
1316 uint64_t id_mask; 1316 uint64_t id_mask;
1317 char *name; 1317 const char *name;
1318 ahd_device_setup_t *setup; 1318 ahd_device_setup_t *setup;
1319}; 1319};
1320 1320
@@ -1322,7 +1322,7 @@ struct ahd_pci_identity {
1322struct aic7770_identity { 1322struct aic7770_identity {
1323 uint32_t full_id; 1323 uint32_t full_id;
1324 uint32_t id_mask; 1324 uint32_t id_mask;
1325 char *name; 1325 const char *name;
1326 ahd_device_setup_t *setup; 1326 ahd_device_setup_t *setup;
1327}; 1327};
1328extern struct aic7770_identity aic7770_ident_table []; 1328extern struct aic7770_identity aic7770_ident_table [];
@@ -1333,12 +1333,11 @@ extern const int ahd_num_aic7770_devs;
1333 1333
1334/*************************** Function Declarations ****************************/ 1334/*************************** Function Declarations ****************************/
1335/******************************************************************************/ 1335/******************************************************************************/
1336void ahd_reset_cmds_pending(struct ahd_softc *ahd);
1337 1336
1338/***************************** PCI Front End *********************************/ 1337/***************************** PCI Front End *********************************/
1339struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); 1338const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
1340int ahd_pci_config(struct ahd_softc *, 1339int ahd_pci_config(struct ahd_softc *,
1341 struct ahd_pci_identity *); 1340 const struct ahd_pci_identity *);
1342int ahd_pci_test_register_access(struct ahd_softc *); 1341int ahd_pci_test_register_access(struct ahd_softc *);
1343#ifdef CONFIG_PM 1342#ifdef CONFIG_PM
1344void ahd_pci_suspend(struct ahd_softc *); 1343void ahd_pci_suspend(struct ahd_softc *);
@@ -1376,16 +1375,6 @@ int ahd_write_flexport(struct ahd_softc *ahd,
1376int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, 1375int ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
1377 uint8_t *value); 1376 uint8_t *value);
1378 1377
1379/*************************** Interrupt Services *******************************/
1380void ahd_run_qoutfifo(struct ahd_softc *ahd);
1381#ifdef AHD_TARGET_MODE
1382void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
1383#endif
1384void ahd_handle_hwerrint(struct ahd_softc *ahd);
1385void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
1386void ahd_handle_scsiint(struct ahd_softc *ahd,
1387 u_int intstat);
1388
1389/***************************** Error Recovery *********************************/ 1378/***************************** Error Recovery *********************************/
1390typedef enum { 1379typedef enum {
1391 SEARCH_COMPLETE, 1380 SEARCH_COMPLETE,
@@ -1479,7 +1468,7 @@ extern uint32_t ahd_debug;
1479void ahd_print_devinfo(struct ahd_softc *ahd, 1468void ahd_print_devinfo(struct ahd_softc *ahd,
1480 struct ahd_devinfo *devinfo); 1469 struct ahd_devinfo *devinfo);
1481void ahd_dump_card_state(struct ahd_softc *ahd); 1470void ahd_dump_card_state(struct ahd_softc *ahd);
1482int ahd_print_register(ahd_reg_parse_entry_t *table, 1471int ahd_print_register(const ahd_reg_parse_entry_t *table,
1483 u_int num_entries, 1472 u_int num_entries,
1484 const char *name, 1473 const char *name,
1485 u_int address, 1474 u_int address,
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index be14e2ecb8f7..cca16fc5b4ad 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -198,6 +198,7 @@ register SEQINTCODE {
198register CLRINT { 198register CLRINT {
199 address 0x003 199 address 0x003
200 access_mode WO 200 access_mode WO
201 count 19
201 field CLRHWERRINT 0x80 /* Rev B or greater */ 202 field CLRHWERRINT 0x80 /* Rev B or greater */
202 field CLRBRKADRINT 0x40 203 field CLRBRKADRINT 0x40
203 field CLRSWTMINT 0x20 204 field CLRSWTMINT 0x20
@@ -245,6 +246,7 @@ register CLRERR {
245register HCNTRL { 246register HCNTRL {
246 address 0x005 247 address 0x005
247 access_mode RW 248 access_mode RW
249 count 12
248 field SEQ_RESET 0x80 /* Rev B or greater */ 250 field SEQ_RESET 0x80 /* Rev B or greater */
249 field POWRDN 0x40 251 field POWRDN 0x40
250 field SWINT 0x10 252 field SWINT 0x10
@@ -262,6 +264,7 @@ register HNSCB_QOFF {
262 address 0x006 264 address 0x006
263 access_mode RW 265 access_mode RW
264 size 2 266 size 2
267 count 2
265} 268}
266 269
267/* 270/*
@@ -270,6 +273,7 @@ register HNSCB_QOFF {
270register HESCB_QOFF { 273register HESCB_QOFF {
271 address 0x008 274 address 0x008
272 access_mode RW 275 access_mode RW
276 count 2
273} 277}
274 278
275/* 279/*
@@ -287,6 +291,7 @@ register HS_MAILBOX {
287 */ 291 */
288register SEQINTSTAT { 292register SEQINTSTAT {
289 address 0x00C 293 address 0x00C
294 count 1
290 access_mode RO 295 access_mode RO
291 field SEQ_SWTMRTO 0x10 296 field SEQ_SWTMRTO 0x10
292 field SEQ_SEQINT 0x08 297 field SEQ_SEQINT 0x08
@@ -332,6 +337,7 @@ register SNSCB_QOFF {
332 */ 337 */
333register SESCB_QOFF { 338register SESCB_QOFF {
334 address 0x012 339 address 0x012
340 count 2
335 access_mode RW 341 access_mode RW
336 modes M_CCHAN 342 modes M_CCHAN
337} 343}
@@ -397,6 +403,7 @@ register DFCNTRL {
397 address 0x019 403 address 0x019
398 access_mode RW 404 access_mode RW
399 modes M_DFF0, M_DFF1 405 modes M_DFF0, M_DFF1
406 count 11
400 field PRELOADEN 0x80 407 field PRELOADEN 0x80
401 field SCSIENWRDIS 0x40 /* Rev B only. */ 408 field SCSIENWRDIS 0x40 /* Rev B only. */
402 field SCSIEN 0x20 409 field SCSIEN 0x20
@@ -415,6 +422,7 @@ register DFCNTRL {
415 */ 422 */
416register DSCOMMAND0 { 423register DSCOMMAND0 {
417 address 0x019 424 address 0x019
425 count 1
418 access_mode RW 426 access_mode RW
419 modes M_CFG 427 modes M_CFG
420 field CACHETHEN 0x80 /* Cache Threshold enable */ 428 field CACHETHEN 0x80 /* Cache Threshold enable */
@@ -580,6 +588,7 @@ register DFF_THRSH {
580 address 0x088 588 address 0x088
581 access_mode RW 589 access_mode RW
582 modes M_CFG 590 modes M_CFG
591 count 1
583 field WR_DFTHRSH 0x70 { 592 field WR_DFTHRSH 0x70 {
584 WR_DFTHRSH_MIN, 593 WR_DFTHRSH_MIN,
585 WR_DFTHRSH_25, 594 WR_DFTHRSH_25,
@@ -800,6 +809,7 @@ register PCIXCTL {
800 address 0x093 809 address 0x093
801 access_mode RW 810 access_mode RW
802 modes M_CFG 811 modes M_CFG
812 count 1
803 field SERRPULSE 0x80 813 field SERRPULSE 0x80
804 field UNEXPSCIEN 0x20 814 field UNEXPSCIEN 0x20
805 field SPLTSMADIS 0x10 815 field SPLTSMADIS 0x10
@@ -844,6 +854,7 @@ register DCHSPLTSTAT0 {
844 address 0x096 854 address 0x096
845 access_mode RW 855 access_mode RW
846 modes M_DFF0, M_DFF1 856 modes M_DFF0, M_DFF1
857 count 2
847 field STAETERM 0x80 858 field STAETERM 0x80
848 field SCBCERR 0x40 859 field SCBCERR 0x40
849 field SCADERR 0x20 860 field SCADERR 0x20
@@ -895,6 +906,7 @@ register DCHSPLTSTAT1 {
895 address 0x097 906 address 0x097
896 access_mode RW 907 access_mode RW
897 modes M_DFF0, M_DFF1 908 modes M_DFF0, M_DFF1
909 count 2
898 field RXDATABUCKET 0x01 910 field RXDATABUCKET 0x01
899} 911}
900 912
@@ -1048,6 +1060,7 @@ register SGSPLTSTAT0 {
1048 address 0x09E 1060 address 0x09E
1049 access_mode RW 1061 access_mode RW
1050 modes M_DFF0, M_DFF1 1062 modes M_DFF0, M_DFF1
1063 count 2
1051 field STAETERM 0x80 1064 field STAETERM 0x80
1052 field SCBCERR 0x40 1065 field SCBCERR 0x40
1053 field SCADERR 0x20 1066 field SCADERR 0x20
@@ -1065,6 +1078,7 @@ register SGSPLTSTAT1 {
1065 address 0x09F 1078 address 0x09F
1066 access_mode RW 1079 access_mode RW
1067 modes M_DFF0, M_DFF1 1080 modes M_DFF0, M_DFF1
1081 count 2
1068 field RXDATABUCKET 0x01 1082 field RXDATABUCKET 0x01
1069} 1083}
1070 1084
@@ -1086,6 +1100,7 @@ register DF0PCISTAT {
1086 address 0x0A0 1100 address 0x0A0
1087 access_mode RW 1101 access_mode RW
1088 modes M_CFG 1102 modes M_CFG
1103 count 1
1089 field DPE 0x80 1104 field DPE 0x80
1090 field SSE 0x40 1105 field SSE 0x40
1091 field RMA 0x20 1106 field RMA 0x20
@@ -1184,6 +1199,7 @@ register TARGPCISTAT {
1184 address 0x0A7 1199 address 0x0A7
1185 access_mode RW 1200 access_mode RW
1186 modes M_CFG 1201 modes M_CFG
1202 count 5
1187 field DPE 0x80 1203 field DPE 0x80
1188 field SSE 0x40 1204 field SSE 0x40
1189 field STA 0x08 1205 field STA 0x08
@@ -1198,6 +1214,7 @@ register LQIN {
1198 address 0x020 1214 address 0x020
1199 access_mode RW 1215 access_mode RW
1200 size 20 1216 size 20
1217 count 2
1201 modes M_DFF0, M_DFF1, M_SCSI 1218 modes M_DFF0, M_DFF1, M_SCSI
1202} 1219}
1203 1220
@@ -1229,6 +1246,7 @@ register LUNPTR {
1229 address 0x022 1246 address 0x022
1230 access_mode RW 1247 access_mode RW
1231 modes M_CFG 1248 modes M_CFG
1249 count 2
1232} 1250}
1233 1251
1234/* 1252/*
@@ -1259,6 +1277,7 @@ register CMDLENPTR {
1259 address 0x025 1277 address 0x025
1260 access_mode RW 1278 access_mode RW
1261 modes M_CFG 1279 modes M_CFG
1280 count 1
1262} 1281}
1263 1282
1264/* 1283/*
@@ -1270,6 +1289,7 @@ register ATTRPTR {
1270 address 0x026 1289 address 0x026
1271 access_mode RW 1290 access_mode RW
1272 modes M_CFG 1291 modes M_CFG
1292 count 1
1273} 1293}
1274 1294
1275/* 1295/*
@@ -1281,6 +1301,7 @@ register FLAGPTR {
1281 address 0x027 1301 address 0x027
1282 access_mode RW 1302 access_mode RW
1283 modes M_CFG 1303 modes M_CFG
1304 count 1
1284} 1305}
1285 1306
1286/* 1307/*
@@ -1291,6 +1312,7 @@ register CMDPTR {
1291 address 0x028 1312 address 0x028
1292 access_mode RW 1313 access_mode RW
1293 modes M_CFG 1314 modes M_CFG
1315 count 1
1294} 1316}
1295 1317
1296/* 1318/*
@@ -1301,6 +1323,7 @@ register QNEXTPTR {
1301 address 0x029 1323 address 0x029
1302 access_mode RW 1324 access_mode RW
1303 modes M_CFG 1325 modes M_CFG
1326 count 1
1304} 1327}
1305 1328
1306/* 1329/*
@@ -1323,6 +1346,7 @@ register ABRTBYTEPTR {
1323 address 0x02B 1346 address 0x02B
1324 access_mode RW 1347 access_mode RW
1325 modes M_CFG 1348 modes M_CFG
1349 count 1
1326} 1350}
1327 1351
1328/* 1352/*
@@ -1333,6 +1357,7 @@ register ABRTBITPTR {
1333 address 0x02C 1357 address 0x02C
1334 access_mode RW 1358 access_mode RW
1335 modes M_CFG 1359 modes M_CFG
1360 count 1
1336} 1361}
1337 1362
1338/* 1363/*
@@ -1370,6 +1395,7 @@ register LUNLEN {
1370 address 0x030 1395 address 0x030
1371 access_mode RW 1396 access_mode RW
1372 modes M_CFG 1397 modes M_CFG
1398 count 2
1373 mask ILUNLEN 0x0F 1399 mask ILUNLEN 0x0F
1374 mask TLUNLEN 0xF0 1400 mask TLUNLEN 0xF0
1375} 1401}
@@ -1383,6 +1409,7 @@ register CDBLIMIT {
1383 address 0x031 1409 address 0x031
1384 access_mode RW 1410 access_mode RW
1385 modes M_CFG 1411 modes M_CFG
1412 count 1
1386} 1413}
1387 1414
1388/* 1415/*
@@ -1394,6 +1421,7 @@ register MAXCMD {
1394 address 0x032 1421 address 0x032
1395 access_mode RW 1422 access_mode RW
1396 modes M_CFG 1423 modes M_CFG
1424 count 9
1397} 1425}
1398 1426
1399/* 1427/*
@@ -1458,6 +1486,7 @@ register LQCTL1 {
1458 address 0x038 1486 address 0x038
1459 access_mode RW 1487 access_mode RW
1460 modes M_DFF0, M_DFF1, M_SCSI 1488 modes M_DFF0, M_DFF1, M_SCSI
1489 count 2
1461 field PCI2PCI 0x04 1490 field PCI2PCI 0x04
1462 field SINGLECMD 0x02 1491 field SINGLECMD 0x02
1463 field ABORTPENDING 0x01 1492 field ABORTPENDING 0x01
@@ -1470,6 +1499,7 @@ register LQCTL2 {
1470 address 0x039 1499 address 0x039
1471 access_mode RW 1500 access_mode RW
1472 modes M_DFF0, M_DFF1, M_SCSI 1501 modes M_DFF0, M_DFF1, M_SCSI
1502 count 5
1473 field LQIRETRY 0x80 1503 field LQIRETRY 0x80
1474 field LQICONTINUE 0x40 1504 field LQICONTINUE 0x40
1475 field LQITOIDLE 0x20 1505 field LQITOIDLE 0x20
@@ -1528,6 +1558,7 @@ register SCSISEQ1 {
1528 address 0x03B 1558 address 0x03B
1529 access_mode RW 1559 access_mode RW
1530 modes M_DFF0, M_DFF1, M_SCSI 1560 modes M_DFF0, M_DFF1, M_SCSI
1561 count 8
1531 field MANUALCTL 0x40 1562 field MANUALCTL 0x40
1532 field ENSELI 0x20 1563 field ENSELI 0x20
1533 field ENRSELI 0x10 1564 field ENRSELI 0x10
@@ -1667,6 +1698,9 @@ register SCSISIGO {
1667 } 1698 }
1668} 1699}
1669 1700
1701/*
1702 * SCSI Control Signal In
1703 */
1670register SCSISIGI { 1704register SCSISIGI {
1671 address 0x041 1705 address 0x041
1672 access_mode RO 1706 access_mode RO
@@ -1703,6 +1737,7 @@ register MULTARGID {
1703 access_mode RW 1737 access_mode RW
1704 modes M_CFG 1738 modes M_CFG
1705 size 2 1739 size 2
1740 count 2
1706} 1741}
1707 1742
1708/* 1743/*
@@ -1758,6 +1793,7 @@ register TARGIDIN {
1758 address 0x048 1793 address 0x048
1759 access_mode RO 1794 access_mode RO
1760 modes M_DFF0, M_DFF1, M_SCSI 1795 modes M_DFF0, M_DFF1, M_SCSI
1796 count 2
1761 field CLKOUT 0x80 1797 field CLKOUT 0x80
1762 field TARGID 0x0F 1798 field TARGID 0x0F
1763} 1799}
@@ -1798,6 +1834,7 @@ register OPTIONMODE {
1798 address 0x04A 1834 address 0x04A
1799 access_mode RW 1835 access_mode RW
1800 modes M_CFG 1836 modes M_CFG
1837 count 4
1801 field BIOSCANCTL 0x80 1838 field BIOSCANCTL 0x80
1802 field AUTOACKEN 0x40 1839 field AUTOACKEN 0x40
1803 field BIASCANCTL 0x20 1840 field BIASCANCTL 0x20
@@ -1850,6 +1887,7 @@ register SIMODE0 {
1850 address 0x04B 1887 address 0x04B
1851 access_mode RW 1888 access_mode RW
1852 modes M_CFG 1889 modes M_CFG
1890 count 8
1853 field ENSELDO 0x40 1891 field ENSELDO 0x40
1854 field ENSELDI 0x20 1892 field ENSELDI 0x20
1855 field ENSELINGO 0x10 1893 field ENSELINGO 0x10
@@ -1945,6 +1983,7 @@ register PERRDIAG {
1945 address 0x04E 1983 address 0x04E
1946 access_mode RO 1984 access_mode RO
1947 modes M_DFF0, M_DFF1, M_SCSI 1985 modes M_DFF0, M_DFF1, M_SCSI
1986 count 3
1948 field HIZERO 0x80 1987 field HIZERO 0x80
1949 field HIPERR 0x40 1988 field HIPERR 0x40
1950 field PREVPHASE 0x20 1989 field PREVPHASE 0x20
@@ -1962,6 +2001,7 @@ register LQISTATE {
1962 address 0x04E 2001 address 0x04E
1963 access_mode RO 2002 access_mode RO
1964 modes M_CFG 2003 modes M_CFG
2004 count 6
1965} 2005}
1966 2006
1967/* 2007/*
@@ -1971,6 +2011,7 @@ register SOFFCNT {
1971 address 0x04F 2011 address 0x04F
1972 access_mode RO 2012 access_mode RO
1973 modes M_DFF0, M_DFF1, M_SCSI 2013 modes M_DFF0, M_DFF1, M_SCSI
2014 count 1
1974} 2015}
1975 2016
1976/* 2017/*
@@ -1980,6 +2021,7 @@ register LQOSTATE {
1980 address 0x04F 2021 address 0x04F
1981 access_mode RO 2022 access_mode RO
1982 modes M_CFG 2023 modes M_CFG
2024 count 2
1983} 2025}
1984 2026
1985/* 2027/*
@@ -1989,6 +2031,7 @@ register LQISTAT0 {
1989 address 0x050 2031 address 0x050
1990 access_mode RO 2032 access_mode RO
1991 modes M_DFF0, M_DFF1, M_SCSI 2033 modes M_DFF0, M_DFF1, M_SCSI
2034 count 2
1992 field LQIATNQAS 0x20 2035 field LQIATNQAS 0x20
1993 field LQICRCT1 0x10 2036 field LQICRCT1 0x10
1994 field LQICRCT2 0x08 2037 field LQICRCT2 0x08
@@ -2004,6 +2047,7 @@ register CLRLQIINT0 {
2004 address 0x050 2047 address 0x050
2005 access_mode WO 2048 access_mode WO
2006 modes M_DFF0, M_DFF1, M_SCSI 2049 modes M_DFF0, M_DFF1, M_SCSI
2050 count 1
2007 field CLRLQIATNQAS 0x20 2051 field CLRLQIATNQAS 0x20
2008 field CLRLQICRCT1 0x10 2052 field CLRLQICRCT1 0x10
2009 field CLRLQICRCT2 0x08 2053 field CLRLQICRCT2 0x08
@@ -2019,6 +2063,7 @@ register LQIMODE0 {
2019 address 0x050 2063 address 0x050
2020 access_mode RW 2064 access_mode RW
2021 modes M_CFG 2065 modes M_CFG
2066 count 3
2022 field ENLQIATNQASK 0x20 2067 field ENLQIATNQASK 0x20
2023 field ENLQICRCT1 0x10 2068 field ENLQICRCT1 0x10
2024 field ENLQICRCT2 0x08 2069 field ENLQICRCT2 0x08
@@ -2034,6 +2079,7 @@ register LQISTAT1 {
2034 address 0x051 2079 address 0x051
2035 access_mode RO 2080 access_mode RO
2036 modes M_DFF0, M_DFF1, M_SCSI 2081 modes M_DFF0, M_DFF1, M_SCSI
2082 count 3
2037 field LQIPHASE_LQ 0x80 2083 field LQIPHASE_LQ 0x80
2038 field LQIPHASE_NLQ 0x40 2084 field LQIPHASE_NLQ 0x40
2039 field LQIABORT 0x20 2085 field LQIABORT 0x20
@@ -2051,6 +2097,7 @@ register CLRLQIINT1 {
2051 address 0x051 2097 address 0x051
2052 access_mode WO 2098 access_mode WO
2053 modes M_DFF0, M_DFF1, M_SCSI 2099 modes M_DFF0, M_DFF1, M_SCSI
2100 count 4
2054 field CLRLQIPHASE_LQ 0x80 2101 field CLRLQIPHASE_LQ 0x80
2055 field CLRLQIPHASE_NLQ 0x40 2102 field CLRLQIPHASE_NLQ 0x40
2056 field CLRLIQABORT 0x20 2103 field CLRLIQABORT 0x20
@@ -2068,6 +2115,7 @@ register LQIMODE1 {
2068 address 0x051 2115 address 0x051
2069 access_mode RW 2116 access_mode RW
2070 modes M_CFG 2117 modes M_CFG
2118 count 4
2071 field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */ 2119 field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */
2072 field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */ 2120 field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */
2073 field ENLIQABORT 0x20 2121 field ENLIQABORT 0x20
@@ -2102,6 +2150,7 @@ register SSTAT3 {
2102 address 0x053 2150 address 0x053
2103 access_mode RO 2151 access_mode RO
2104 modes M_DFF0, M_DFF1, M_SCSI 2152 modes M_DFF0, M_DFF1, M_SCSI
2153 count 3
2105 field NTRAMPERR 0x02 2154 field NTRAMPERR 0x02
2106 field OSRAMPERR 0x01 2155 field OSRAMPERR 0x01
2107} 2156}
@@ -2113,6 +2162,7 @@ register CLRSINT3 {
2113 address 0x053 2162 address 0x053
2114 access_mode WO 2163 access_mode WO
2115 modes M_DFF0, M_DFF1, M_SCSI 2164 modes M_DFF0, M_DFF1, M_SCSI
2165 count 3
2116 field CLRNTRAMPERR 0x02 2166 field CLRNTRAMPERR 0x02
2117 field CLROSRAMPERR 0x01 2167 field CLROSRAMPERR 0x01
2118} 2168}
@@ -2124,6 +2174,7 @@ register SIMODE3 {
2124 address 0x053 2174 address 0x053
2125 access_mode RW 2175 access_mode RW
2126 modes M_CFG 2176 modes M_CFG
2177 count 4
2127 field ENNTRAMPERR 0x02 2178 field ENNTRAMPERR 0x02
2128 field ENOSRAMPERR 0x01 2179 field ENOSRAMPERR 0x01
2129} 2180}
@@ -2135,6 +2186,7 @@ register LQOSTAT0 {
2135 address 0x054 2186 address 0x054
2136 access_mode RO 2187 access_mode RO
2137 modes M_DFF0, M_DFF1, M_SCSI 2188 modes M_DFF0, M_DFF1, M_SCSI
2189 count 2
2138 field LQOTARGSCBPERR 0x10 2190 field LQOTARGSCBPERR 0x10
2139 field LQOSTOPT2 0x08 2191 field LQOSTOPT2 0x08
2140 field LQOATNLQ 0x04 2192 field LQOATNLQ 0x04
@@ -2149,6 +2201,7 @@ register CLRLQOINT0 {
2149 address 0x054 2201 address 0x054
2150 access_mode WO 2202 access_mode WO
2151 modes M_DFF0, M_DFF1, M_SCSI 2203 modes M_DFF0, M_DFF1, M_SCSI
2204 count 3
2152 field CLRLQOTARGSCBPERR 0x10 2205 field CLRLQOTARGSCBPERR 0x10
2153 field CLRLQOSTOPT2 0x08 2206 field CLRLQOSTOPT2 0x08
2154 field CLRLQOATNLQ 0x04 2207 field CLRLQOATNLQ 0x04
@@ -2163,6 +2216,7 @@ register LQOMODE0 {
2163 address 0x054 2216 address 0x054
2164 access_mode RW 2217 access_mode RW
2165 modes M_CFG 2218 modes M_CFG
2219 count 4
2166 field ENLQOTARGSCBPERR 0x10 2220 field ENLQOTARGSCBPERR 0x10
2167 field ENLQOSTOPT2 0x08 2221 field ENLQOSTOPT2 0x08
2168 field ENLQOATNLQ 0x04 2222 field ENLQOATNLQ 0x04
@@ -2191,6 +2245,7 @@ register CLRLQOINT1 {
2191 address 0x055 2245 address 0x055
2192 access_mode WO 2246 access_mode WO
2193 modes M_DFF0, M_DFF1, M_SCSI 2247 modes M_DFF0, M_DFF1, M_SCSI
2248 count 7
2194 field CLRLQOINITSCBPERR 0x10 2249 field CLRLQOINITSCBPERR 0x10
2195 field CLRLQOSTOPI2 0x08 2250 field CLRLQOSTOPI2 0x08
2196 field CLRLQOBADQAS 0x04 2251 field CLRLQOBADQAS 0x04
@@ -2205,6 +2260,7 @@ register LQOMODE1 {
2205 address 0x055 2260 address 0x055
2206 access_mode RW 2261 access_mode RW
2207 modes M_CFG 2262 modes M_CFG
2263 count 4
2208 field ENLQOINITSCBPERR 0x10 2264 field ENLQOINITSCBPERR 0x10
2209 field ENLQOSTOPI2 0x08 2265 field ENLQOSTOPI2 0x08
2210 field ENLQOBADQAS 0x04 2266 field ENLQOBADQAS 0x04
@@ -2232,6 +2288,7 @@ register OS_SPACE_CNT {
2232 address 0x056 2288 address 0x056
2233 access_mode RO 2289 access_mode RO
2234 modes M_CFG 2290 modes M_CFG
2291 count 2
2235} 2292}
2236 2293
2237/* 2294/*
@@ -2286,13 +2343,19 @@ register NEXTSCB {
2286 modes M_SCSI 2343 modes M_SCSI
2287} 2344}
2288 2345
2289/* Rev B only. */ 2346/*
2347 * LQO SCSI Control
2348 * (Rev B only.)
2349 */
2290register LQOSCSCTL { 2350register LQOSCSCTL {
2291 address 0x05A 2351 address 0x05A
2292 access_mode RW 2352 access_mode RW
2293 size 1 2353 size 1
2294 modes M_CFG 2354 modes M_CFG
2355 count 1
2295 field LQOH2A_VERSION 0x80 2356 field LQOH2A_VERSION 0x80
2357 field LQOBUSETDLY 0x40
2358 field LQONOHOLDLACK 0x02
2296 field LQONOCHKOVER 0x01 2359 field LQONOCHKOVER 0x01
2297} 2360}
2298 2361
@@ -2459,6 +2522,7 @@ register NEGPERIOD {
2459 address 0x061 2522 address 0x061
2460 access_mode RW 2523 access_mode RW
2461 modes M_SCSI 2524 modes M_SCSI
2525 count 1
2462} 2526}
2463 2527
2464/* 2528/*
@@ -2478,6 +2542,7 @@ register NEGOFFSET {
2478 address 0x062 2542 address 0x062
2479 access_mode RW 2543 access_mode RW
2480 modes M_SCSI 2544 modes M_SCSI
2545 count 1
2481} 2546}
2482 2547
2483/* 2548/*
@@ -2487,6 +2552,7 @@ register NEGPPROPTS {
2487 address 0x063 2552 address 0x063
2488 access_mode RW 2553 access_mode RW
2489 modes M_SCSI 2554 modes M_SCSI
2555 count 1
2490 field PPROPT_PACE 0x08 2556 field PPROPT_PACE 0x08
2491 field PPROPT_QAS 0x04 2557 field PPROPT_QAS 0x04
2492 field PPROPT_DT 0x02 2558 field PPROPT_DT 0x02
@@ -2516,12 +2582,19 @@ register ANNEXCOL {
2516 address 0x065 2582 address 0x065
2517 access_mode RW 2583 access_mode RW
2518 modes M_SCSI 2584 modes M_SCSI
2585 count 7
2519} 2586}
2520 2587
2588/*
2589 * SCSI Check
2590 * (Rev. B only)
2591 */
2521register SCSCHKN { 2592register SCSCHKN {
2522 address 0x066 2593 address 0x066
2523 access_mode RW 2594 access_mode RW
2524 modes M_CFG 2595 modes M_CFG
2596 count 1
2597 field BIDICHKDIS 0x80
2525 field STSELSKIDDIS 0x40 2598 field STSELSKIDDIS 0x40
2526 field CURRFIFODEF 0x20 2599 field CURRFIFODEF 0x20
2527 field WIDERESEN 0x10 2600 field WIDERESEN 0x10
@@ -2561,6 +2634,7 @@ register ANNEXDAT {
2561 address 0x066 2634 address 0x066
2562 access_mode RW 2635 access_mode RW
2563 modes M_SCSI 2636 modes M_SCSI
2637 count 3
2564} 2638}
2565 2639
2566/* 2640/*
@@ -2596,6 +2670,7 @@ register TOWNID {
2596 address 0x069 2670 address 0x069
2597 access_mode RW 2671 access_mode RW
2598 modes M_SCSI 2672 modes M_SCSI
2673 count 2
2599} 2674}
2600 2675
2601/* 2676/*
@@ -2737,6 +2812,7 @@ register SCBAUTOPTR {
2737 address 0x0AB 2812 address 0x0AB
2738 access_mode RW 2813 access_mode RW
2739 modes M_CFG 2814 modes M_CFG
2815 count 1
2740 field AUSCBPTR_EN 0x80 2816 field AUSCBPTR_EN 0x80
2741 field SCBPTR_ADDR 0x38 2817 field SCBPTR_ADDR 0x38
2742 field SCBPTR_OFF 0x07 2818 field SCBPTR_OFF 0x07
@@ -2881,6 +2957,7 @@ register BRDDAT {
2881 address 0x0B8 2957 address 0x0B8
2882 access_mode RW 2958 access_mode RW
2883 modes M_SCSI 2959 modes M_SCSI
2960 count 2
2884} 2961}
2885 2962
2886/* 2963/*
@@ -2890,6 +2967,7 @@ register BRDCTL {
2890 address 0x0B9 2967 address 0x0B9
2891 access_mode RW 2968 access_mode RW
2892 modes M_SCSI 2969 modes M_SCSI
2970 count 7
2893 field FLXARBACK 0x80 2971 field FLXARBACK 0x80
2894 field FLXARBREQ 0x40 2972 field FLXARBREQ 0x40
2895 field BRDADDR 0x38 2973 field BRDADDR 0x38
@@ -2905,6 +2983,7 @@ register SEEADR {
2905 address 0x0BA 2983 address 0x0BA
2906 access_mode RW 2984 access_mode RW
2907 modes M_SCSI 2985 modes M_SCSI
2986 count 4
2908} 2987}
2909 2988
2910/* 2989/*
@@ -2915,6 +2994,7 @@ register SEEDAT {
2915 access_mode RW 2994 access_mode RW
2916 size 2 2995 size 2
2917 modes M_SCSI 2996 modes M_SCSI
2997 count 4
2918} 2998}
2919 2999
2920/* 3000/*
@@ -2924,6 +3004,7 @@ register SEESTAT {
2924 address 0x0BE 3004 address 0x0BE
2925 access_mode RO 3005 access_mode RO
2926 modes M_SCSI 3006 modes M_SCSI
3007 count 1
2927 field INIT_DONE 0x80 3008 field INIT_DONE 0x80
2928 field SEEOPCODE 0x70 3009 field SEEOPCODE 0x70
2929 field LDALTID_L 0x08 3010 field LDALTID_L 0x08
@@ -2939,6 +3020,7 @@ register SEECTL {
2939 address 0x0BE 3020 address 0x0BE
2940 access_mode RW 3021 access_mode RW
2941 modes M_SCSI 3022 modes M_SCSI
3023 count 4
2942 field SEEOPCODE 0x70 { 3024 field SEEOPCODE 0x70 {
2943 SEEOP_ERASE 0x70, 3025 SEEOP_ERASE 0x70,
2944 SEEOP_READ 0x60, 3026 SEEOP_READ 0x60,
@@ -3000,6 +3082,7 @@ register DSPDATACTL {
3000 address 0x0C1 3082 address 0x0C1
3001 access_mode RW 3083 access_mode RW
3002 modes M_CFG 3084 modes M_CFG
3085 count 3
3003 field BYPASSENAB 0x80 3086 field BYPASSENAB 0x80
3004 field DESQDIS 0x10 3087 field DESQDIS 0x10
3005 field RCVROFFSTDIS 0x04 3088 field RCVROFFSTDIS 0x04
@@ -3058,6 +3141,7 @@ register DSPSELECT {
3058 address 0x0C4 3141 address 0x0C4
3059 access_mode RW 3142 access_mode RW
3060 modes M_CFG 3143 modes M_CFG
3144 count 1
3061 field AUTOINCEN 0x80 3145 field AUTOINCEN 0x80
3062 field DSPSEL 0x1F 3146 field DSPSEL 0x1F
3063} 3147}
@@ -3071,6 +3155,7 @@ register WRTBIASCTL {
3071 address 0x0C5 3155 address 0x0C5
3072 access_mode WO 3156 access_mode WO
3073 modes M_CFG 3157 modes M_CFG
3158 count 3
3074 field AUTOXBCDIS 0x80 3159 field AUTOXBCDIS 0x80
3075 field XMITMANVAL 0x3F 3160 field XMITMANVAL 0x3F
3076} 3161}
@@ -3196,7 +3281,8 @@ register OVLYADDR {
3196 */ 3281 */
3197register SEQCTL0 { 3282register SEQCTL0 {
3198 address 0x0D6 3283 address 0x0D6
3199 access_mode RW 3284 access_mode RW
3285 count 11
3200 field PERRORDIS 0x80 3286 field PERRORDIS 0x80
3201 field PAUSEDIS 0x40 3287 field PAUSEDIS 0x40
3202 field FAILDIS 0x20 3288 field FAILDIS 0x20
@@ -3226,7 +3312,8 @@ register SEQCTL1 {
3226 */ 3312 */
3227register FLAGS { 3313register FLAGS {
3228 address 0x0D8 3314 address 0x0D8
3229 access_mode RO 3315 access_mode RO
3316 count 23
3230 field ZERO 0x02 3317 field ZERO 0x02
3231 field CARRY 0x01 3318 field CARRY 0x01
3232} 3319}
@@ -3255,7 +3342,8 @@ register SEQINTCTL {
3255 */ 3342 */
3256register SEQRAM { 3343register SEQRAM {
3257 address 0x0DA 3344 address 0x0DA
3258 access_mode RW 3345 access_mode RW
3346 count 2
3259} 3347}
3260 3348
3261/* 3349/*
@@ -3266,6 +3354,7 @@ register PRGMCNT {
3266 address 0x0DE 3354 address 0x0DE
3267 access_mode RW 3355 access_mode RW
3268 size 2 3356 size 2
3357 count 5
3269} 3358}
3270 3359
3271/* 3360/*
@@ -3273,7 +3362,7 @@ register PRGMCNT {
3273 */ 3362 */
3274register ACCUM { 3363register ACCUM {
3275 address 0x0E0 3364 address 0x0E0
3276 access_mode RW 3365 access_mode RW
3277 accumulator 3366 accumulator
3278} 3367}
3279 3368
@@ -3401,6 +3490,7 @@ register INTVEC1_ADDR {
3401 access_mode RW 3490 access_mode RW
3402 size 2 3491 size 2
3403 modes M_CFG 3492 modes M_CFG
3493 count 1
3404} 3494}
3405 3495
3406/* 3496/*
@@ -3412,6 +3502,7 @@ register CURADDR {
3412 access_mode RW 3502 access_mode RW
3413 size 2 3503 size 2
3414 modes M_SCSI 3504 modes M_SCSI
3505 count 2
3415} 3506}
3416 3507
3417/* 3508/*
@@ -3423,6 +3514,7 @@ register INTVEC2_ADDR {
3423 access_mode RW 3514 access_mode RW
3424 size 2 3515 size 2
3425 modes M_CFG 3516 modes M_CFG
3517 count 1
3426} 3518}
3427 3519
3428/* 3520/*
@@ -3579,6 +3671,7 @@ scratch_ram {
3579 /* Parameters for DMA Logic */ 3671 /* Parameters for DMA Logic */
3580 DMAPARAMS { 3672 DMAPARAMS {
3581 size 1 3673 size 1
3674 count 8
3582 field PRELOADEN 0x80 3675 field PRELOADEN 0x80
3583 field WIDEODD 0x40 3676 field WIDEODD 0x40
3584 field SCSIEN 0x20 3677 field SCSIEN 0x20
@@ -3648,9 +3741,11 @@ scratch_ram {
3648 */ 3741 */
3649 KERNEL_TQINPOS { 3742 KERNEL_TQINPOS {
3650 size 1 3743 size 1
3744 count 1
3651 } 3745 }
3652 TQINPOS { 3746 TQINPOS {
3653 size 1 3747 size 1
3748 count 8
3654 } 3749 }
3655 /* 3750 /*
3656 * Base address of our shared data with the kernel driver in host 3751 * Base address of our shared data with the kernel driver in host
@@ -3681,6 +3776,7 @@ scratch_ram {
3681 } 3776 }
3682 ARG_2 { 3777 ARG_2 {
3683 size 1 3778 size 1
3779 count 1
3684 alias RETURN_2 3780 alias RETURN_2
3685 } 3781 }
3686 3782
@@ -3698,6 +3794,7 @@ scratch_ram {
3698 */ 3794 */
3699 SCSISEQ_TEMPLATE { 3795 SCSISEQ_TEMPLATE {
3700 size 1 3796 size 1
3797 count 7
3701 field MANUALCTL 0x40 3798 field MANUALCTL 0x40
3702 field ENSELI 0x20 3799 field ENSELI 0x20
3703 field ENRSELI 0x10 3800 field ENRSELI 0x10
@@ -3711,6 +3808,7 @@ scratch_ram {
3711 */ 3808 */
3712 INITIATOR_TAG { 3809 INITIATOR_TAG {
3713 size 1 3810 size 1
3811 count 1
3714 } 3812 }
3715 3813
3716 SEQ_FLAGS2 { 3814 SEQ_FLAGS2 {
@@ -3777,6 +3875,7 @@ scratch_ram {
3777 */ 3875 */
3778 CMDSIZE_TABLE { 3876 CMDSIZE_TABLE {
3779 size 8 3877 size 8
3878 count 8
3780 } 3879 }
3781 /* 3880 /*
3782 * When an SCB with the MK_MESSAGE flag is 3881 * When an SCB with the MK_MESSAGE flag is
@@ -3803,8 +3902,8 @@ scratch_ram {
3803/************************* Hardware SCB Definition ****************************/ 3902/************************* Hardware SCB Definition ****************************/
3804scb { 3903scb {
3805 address 0x180 3904 address 0x180
3806 size 64 3905 size 64
3807 modes 0, 1, 2, 3 3906 modes 0, 1, 2, 3
3808 SCB_RESIDUAL_DATACNT { 3907 SCB_RESIDUAL_DATACNT {
3809 size 4 3908 size 4
3810 alias SCB_CDB_STORE 3909 alias SCB_CDB_STORE
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index ade0fb8fbdb2..55508b0fcec4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,7 +52,7 @@
52 52
53 53
54/***************************** Lookup Tables **********************************/ 54/***************************** Lookup Tables **********************************/
55static char *ahd_chip_names[] = 55static const char *const ahd_chip_names[] =
56{ 56{
57 "NONE", 57 "NONE",
58 "aic7901", 58 "aic7901",
@@ -66,10 +66,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
66 */ 66 */
67struct ahd_hard_error_entry { 67struct ahd_hard_error_entry {
68 uint8_t errno; 68 uint8_t errno;
69 char *errmesg; 69 const char *errmesg;
70}; 70};
71 71
72static struct ahd_hard_error_entry ahd_hard_errors[] = { 72static const struct ahd_hard_error_entry ahd_hard_errors[] = {
73 { DSCTMOUT, "Discard Timer has timed out" }, 73 { DSCTMOUT, "Discard Timer has timed out" },
74 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 74 { ILLOPCODE, "Illegal Opcode in sequencer program" },
75 { SQPARERR, "Sequencer Parity Error" }, 75 { SQPARERR, "Sequencer Parity Error" },
@@ -79,7 +79,7 @@ static struct ahd_hard_error_entry ahd_hard_errors[] = {
79}; 79};
80static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); 80static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
81 81
82static struct ahd_phase_table_entry ahd_phase_table[] = 82static const struct ahd_phase_table_entry ahd_phase_table[] =
83{ 83{
84 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 84 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
85 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 85 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -213,7 +213,7 @@ static void ahd_dumpseq(struct ahd_softc *ahd);
213#endif 213#endif
214static void ahd_loadseq(struct ahd_softc *ahd); 214static void ahd_loadseq(struct ahd_softc *ahd);
215static int ahd_check_patch(struct ahd_softc *ahd, 215static int ahd_check_patch(struct ahd_softc *ahd,
216 struct patch **start_patch, 216 const struct patch **start_patch,
217 u_int start_instr, u_int *skip_addr); 217 u_int start_instr, u_int *skip_addr);
218static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, 218static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
219 u_int address); 219 u_int address);
@@ -254,7 +254,7 @@ static void ahd_freeze_devq(struct ahd_softc *ahd,
254 struct scb *scb); 254 struct scb *scb);
255static void ahd_handle_scb_status(struct ahd_softc *ahd, 255static void ahd_handle_scb_status(struct ahd_softc *ahd,
256 struct scb *scb); 256 struct scb *scb);
257static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); 257static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
258static void ahd_shutdown(void *arg); 258static void ahd_shutdown(void *arg);
259static void ahd_update_coalescing_values(struct ahd_softc *ahd, 259static void ahd_update_coalescing_values(struct ahd_softc *ahd,
260 u_int timer, 260 u_int timer,
@@ -266,8 +266,774 @@ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
266 int target, char channel, int lun, 266 int target, char channel, int lun,
267 u_int tag, role_t role); 267 u_int tag, role_t role);
268 268
269/******************************** Private Inlines *****************************/ 269static void ahd_reset_cmds_pending(struct ahd_softc *ahd);
270
271/*************************** Interrupt Services *******************************/
272static void ahd_run_qoutfifo(struct ahd_softc *ahd);
273#ifdef AHD_TARGET_MODE
274static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
275#endif
276static void ahd_handle_hwerrint(struct ahd_softc *ahd);
277static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
278static void ahd_handle_scsiint(struct ahd_softc *ahd,
279 u_int intstat);
280
281/************************ Sequencer Execution Control *************************/
282void
283ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
284{
285 if (ahd->src_mode == src && ahd->dst_mode == dst)
286 return;
287#ifdef AHD_DEBUG
288 if (ahd->src_mode == AHD_MODE_UNKNOWN
289 || ahd->dst_mode == AHD_MODE_UNKNOWN)
290 panic("Setting mode prior to saving it.\n");
291 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
292 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
293 ahd_build_mode_state(ahd, src, dst));
294#endif
295 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
296 ahd->src_mode = src;
297 ahd->dst_mode = dst;
298}
299
300static void
301ahd_update_modes(struct ahd_softc *ahd)
302{
303 ahd_mode_state mode_ptr;
304 ahd_mode src;
305 ahd_mode dst;
306
307 mode_ptr = ahd_inb(ahd, MODE_PTR);
308#ifdef AHD_DEBUG
309 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
310 printf("Reading mode 0x%x\n", mode_ptr);
311#endif
312 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
313 ahd_known_modes(ahd, src, dst);
314}
315
316static void
317ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
318 ahd_mode dstmode, const char *file, int line)
319{
320#ifdef AHD_DEBUG
321 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
322 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
323 panic("%s:%s:%d: Mode assertion failed.\n",
324 ahd_name(ahd), file, line);
325 }
326#endif
327}
328
329#define AHD_ASSERT_MODES(ahd, source, dest) \
330 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
331
332ahd_mode_state
333ahd_save_modes(struct ahd_softc *ahd)
334{
335 if (ahd->src_mode == AHD_MODE_UNKNOWN
336 || ahd->dst_mode == AHD_MODE_UNKNOWN)
337 ahd_update_modes(ahd);
338
339 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
340}
341
342void
343ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
344{
345 ahd_mode src;
346 ahd_mode dst;
347
348 ahd_extract_mode_state(ahd, state, &src, &dst);
349 ahd_set_modes(ahd, src, dst);
350}
351
352/*
353 * Determine whether the sequencer has halted code execution.
354 * Returns non-zero status if the sequencer is stopped.
355 */
356int
357ahd_is_paused(struct ahd_softc *ahd)
358{
359 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
360}
361
362/*
363 * Request that the sequencer stop and wait, indefinitely, for it
364 * to stop. The sequencer will only acknowledge that it is paused
365 * once it has reached an instruction boundary and PAUSEDIS is
366 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
367 * for critical sections.
368 */
369void
370ahd_pause(struct ahd_softc *ahd)
371{
372 ahd_outb(ahd, HCNTRL, ahd->pause);
373
374 /*
375 * Since the sequencer can disable pausing in a critical section, we
376 * must loop until it actually stops.
377 */
378 while (ahd_is_paused(ahd) == 0)
379 ;
380}
381
382/*
383 * Allow the sequencer to continue program execution.
384 * We check here to ensure that no additional interrupt
385 * sources that would cause the sequencer to halt have been
386 * asserted. If, for example, a SCSI bus reset is detected
387 * while we are fielding a different, pausing, interrupt type,
388 * we don't want to release the sequencer before going back
389 * into our interrupt handler and dealing with this new
390 * condition.
391 */
392void
393ahd_unpause(struct ahd_softc *ahd)
394{
395 /*
396 * Automatically restore our modes to those saved
397 * prior to the first change of the mode.
398 */
399 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
400 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
401 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
402 ahd_reset_cmds_pending(ahd);
403 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
404 }
405
406 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
407 ahd_outb(ahd, HCNTRL, ahd->unpause);
408
409 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
410}
411
412/*********************** Scatter Gather List Handling *************************/
413void *
414ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
415 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
416{
417 scb->sg_count++;
418 if (sizeof(dma_addr_t) > 4
419 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
420 struct ahd_dma64_seg *sg;
421
422 sg = (struct ahd_dma64_seg *)sgptr;
423 sg->addr = ahd_htole64(addr);
424 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
425 return (sg + 1);
426 } else {
427 struct ahd_dma_seg *sg;
270 428
429 sg = (struct ahd_dma_seg *)sgptr;
430 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
431 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
432 | (last ? AHD_DMA_LAST_SEG : 0));
433 return (sg + 1);
434 }
435}
436
437static void
438ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
439{
440 /* XXX Handle target mode SCBs. */
441 scb->crc_retry_count = 0;
442 if ((scb->flags & SCB_PACKETIZED) != 0) {
443 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
444 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
445 } else {
446 if (ahd_get_transfer_length(scb) & 0x01)
447 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
448 else
449 scb->hscb->task_attribute = 0;
450 }
451
452 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
453 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
454 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
455 ahd_htole32(scb->sense_busaddr);
456}
457
458static void
459ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
460{
461 /*
462 * Copy the first SG into the "current" data ponter area.
463 */
464 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
465 struct ahd_dma64_seg *sg;
466
467 sg = (struct ahd_dma64_seg *)scb->sg_list;
468 scb->hscb->dataptr = sg->addr;
469 scb->hscb->datacnt = sg->len;
470 } else {
471 struct ahd_dma_seg *sg;
472 uint32_t *dataptr_words;
473
474 sg = (struct ahd_dma_seg *)scb->sg_list;
475 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
476 dataptr_words[0] = sg->addr;
477 dataptr_words[1] = 0;
478 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
479 uint64_t high_addr;
480
481 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
482 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
483 }
484 scb->hscb->datacnt = sg->len;
485 }
486 /*
487 * Note where to find the SG entries in bus space.
488 * We also set the full residual flag which the
489 * sequencer will clear as soon as a data transfer
490 * occurs.
491 */
492 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
493}
494
495static void
496ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
497{
498 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
499 scb->hscb->dataptr = 0;
500 scb->hscb->datacnt = 0;
501}
502
503/************************** Memory mapping routines ***************************/
504static void *
505ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
506{
507 dma_addr_t sg_offset;
508
509 /* sg_list_phys points to entry 1, not 0 */
510 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
511 return ((uint8_t *)scb->sg_list + sg_offset);
512}
513
514static uint32_t
515ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
516{
517 dma_addr_t sg_offset;
518
519 /* sg_list_phys points to entry 1, not 0 */
520 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
521 - ahd_sg_size(ahd);
522
523 return (scb->sg_list_busaddr + sg_offset);
524}
525
526static void
527ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
528{
529 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
530 scb->hscb_map->dmamap,
531 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
532 /*len*/sizeof(*scb->hscb), op);
533}
534
535void
536ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
537{
538 if (scb->sg_count == 0)
539 return;
540
541 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
542 scb->sg_map->dmamap,
543 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
544 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
545}
546
547static void
548ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
549{
550 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
551 scb->sense_map->dmamap,
552 /*offset*/scb->sense_busaddr,
553 /*len*/AHD_SENSE_BUFSIZE, op);
554}
555
556#ifdef AHD_TARGET_MODE
557static uint32_t
558ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
559{
560 return (((uint8_t *)&ahd->targetcmds[index])
561 - (uint8_t *)ahd->qoutfifo);
562}
563#endif
564
565/*********************** Miscelaneous Support Functions ***********************/
566/*
567 * Return pointers to the transfer negotiation information
568 * for the specified our_id/remote_id pair.
569 */
570struct ahd_initiator_tinfo *
571ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
572 u_int remote_id, struct ahd_tmode_tstate **tstate)
573{
574 /*
575 * Transfer data structures are stored from the perspective
576 * of the target role. Since the parameters for a connection
577 * in the initiator role to a given target are the same as
578 * when the roles are reversed, we pretend we are the target.
579 */
580 if (channel == 'B')
581 our_id += 8;
582 *tstate = ahd->enabled_targets[our_id];
583 return (&(*tstate)->transinfo[remote_id]);
584}
585
586uint16_t
587ahd_inw(struct ahd_softc *ahd, u_int port)
588{
589 /*
590 * Read high byte first as some registers increment
591 * or have other side effects when the low byte is
592 * read.
593 */
594 uint16_t r = ahd_inb(ahd, port+1) << 8;
595 return r | ahd_inb(ahd, port);
596}
597
598void
599ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
600{
601 /*
602 * Write low byte first to accomodate registers
603 * such as PRGMCNT where the order maters.
604 */
605 ahd_outb(ahd, port, value & 0xFF);
606 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
607}
608
609uint32_t
610ahd_inl(struct ahd_softc *ahd, u_int port)
611{
612 return ((ahd_inb(ahd, port))
613 | (ahd_inb(ahd, port+1) << 8)
614 | (ahd_inb(ahd, port+2) << 16)
615 | (ahd_inb(ahd, port+3) << 24));
616}
617
618void
619ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
620{
621 ahd_outb(ahd, port, (value) & 0xFF);
622 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
623 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
624 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
625}
626
627uint64_t
628ahd_inq(struct ahd_softc *ahd, u_int port)
629{
630 return ((ahd_inb(ahd, port))
631 | (ahd_inb(ahd, port+1) << 8)
632 | (ahd_inb(ahd, port+2) << 16)
633 | (ahd_inb(ahd, port+3) << 24)
634 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
635 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
636 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
637 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
638}
639
640void
641ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
642{
643 ahd_outb(ahd, port, value & 0xFF);
644 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
645 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
646 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
647 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
648 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
649 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
650 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
651}
652
653u_int
654ahd_get_scbptr(struct ahd_softc *ahd)
655{
656 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
657 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
658 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
659}
660
661void
662ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
663{
664 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
665 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
666 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
667 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
668}
669
670#if 0 /* unused */
671static u_int
672ahd_get_hnscb_qoff(struct ahd_softc *ahd)
673{
674 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
675}
676#endif
677
678static void
679ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
680{
681 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
682}
683
684#if 0 /* unused */
685static u_int
686ahd_get_hescb_qoff(struct ahd_softc *ahd)
687{
688 return (ahd_inb(ahd, HESCB_QOFF));
689}
690#endif
691
692static void
693ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
694{
695 ahd_outb(ahd, HESCB_QOFF, value);
696}
697
698static u_int
699ahd_get_snscb_qoff(struct ahd_softc *ahd)
700{
701 u_int oldvalue;
702
703 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
704 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
705 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
706 return (oldvalue);
707}
708
709static void
710ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
711{
712 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
713 ahd_outw(ahd, SNSCB_QOFF, value);
714}
715
716#if 0 /* unused */
717static u_int
718ahd_get_sescb_qoff(struct ahd_softc *ahd)
719{
720 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
721 return (ahd_inb(ahd, SESCB_QOFF));
722}
723#endif
724
725static void
726ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
727{
728 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
729 ahd_outb(ahd, SESCB_QOFF, value);
730}
731
732#if 0 /* unused */
733static u_int
734ahd_get_sdscb_qoff(struct ahd_softc *ahd)
735{
736 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
737 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
738}
739#endif
740
741static void
742ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
743{
744 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
745 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
746 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
747}
748
749u_int
750ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
751{
752 u_int value;
753
754 /*
755 * Workaround PCI-X Rev A. hardware bug.
756 * After a host read of SCB memory, the chip
757 * may become confused into thinking prefetch
758 * was required. This starts the discard timer
759 * running and can cause an unexpected discard
760 * timer interrupt. The work around is to read
761 * a normal register prior to the exhaustion of
762 * the discard timer. The mode pointer register
763 * has no side effects and so serves well for
764 * this purpose.
765 *
766 * Razor #528
767 */
768 value = ahd_inb(ahd, offset);
769 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
770 ahd_inb(ahd, MODE_PTR);
771 return (value);
772}
773
774u_int
775ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
776{
777 return (ahd_inb_scbram(ahd, offset)
778 | (ahd_inb_scbram(ahd, offset+1) << 8));
779}
780
781static uint32_t
782ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
783{
784 return (ahd_inw_scbram(ahd, offset)
785 | (ahd_inw_scbram(ahd, offset+2) << 16));
786}
787
788static uint64_t
789ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
790{
791 return (ahd_inl_scbram(ahd, offset)
792 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
793}
794
795struct scb *
796ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
797{
798 struct scb* scb;
799
800 if (tag >= AHD_SCB_MAX)
801 return (NULL);
802 scb = ahd->scb_data.scbindex[tag];
803 if (scb != NULL)
804 ahd_sync_scb(ahd, scb,
805 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
806 return (scb);
807}
808
809static void
810ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
811{
812 struct hardware_scb *q_hscb;
813 struct map_node *q_hscb_map;
814 uint32_t saved_hscb_busaddr;
815
816 /*
817 * Our queuing method is a bit tricky. The card
818 * knows in advance which HSCB (by address) to download,
819 * and we can't disappoint it. To achieve this, the next
820 * HSCB to download is saved off in ahd->next_queued_hscb.
821 * When we are called to queue "an arbitrary scb",
822 * we copy the contents of the incoming HSCB to the one
823 * the sequencer knows about, swap HSCB pointers and
824 * finally assign the SCB to the tag indexed location
825 * in the scb_array. This makes sure that we can still
826 * locate the correct SCB by SCB_TAG.
827 */
828 q_hscb = ahd->next_queued_hscb;
829 q_hscb_map = ahd->next_queued_hscb_map;
830 saved_hscb_busaddr = q_hscb->hscb_busaddr;
831 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
832 q_hscb->hscb_busaddr = saved_hscb_busaddr;
833 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
834
835 /* Now swap HSCB pointers. */
836 ahd->next_queued_hscb = scb->hscb;
837 ahd->next_queued_hscb_map = scb->hscb_map;
838 scb->hscb = q_hscb;
839 scb->hscb_map = q_hscb_map;
840
841 /* Now define the mapping from tag to SCB in the scbindex */
842 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
843}
844
845/*
846 * Tell the sequencer about a new transaction to execute.
847 */
848void
849ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
850{
851 ahd_swap_with_next_hscb(ahd, scb);
852
853 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
854 panic("Attempt to queue invalid SCB tag %x\n",
855 SCB_GET_TAG(scb));
856
857 /*
858 * Keep a history of SCBs we've downloaded in the qinfifo.
859 */
860 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
861 ahd->qinfifonext++;
862
863 if (scb->sg_count != 0)
864 ahd_setup_data_scb(ahd, scb);
865 else
866 ahd_setup_noxfer_scb(ahd, scb);
867 ahd_setup_scb_common(ahd, scb);
868
869 /*
870 * Make sure our data is consistent from the
871 * perspective of the adapter.
872 */
873 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
874
875#ifdef AHD_DEBUG
876 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
877 uint64_t host_dataptr;
878
879 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
880 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
881 ahd_name(ahd),
882 SCB_GET_TAG(scb), scb->hscb->scsiid,
883 ahd_le32toh(scb->hscb->hscb_busaddr),
884 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
885 (u_int)(host_dataptr & 0xFFFFFFFF),
886 ahd_le32toh(scb->hscb->datacnt));
887 }
888#endif
889 /* Tell the adapter about the newly queued SCB */
890 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
891}
892
893/************************** Interrupt Processing ******************************/
894static void
895ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
896{
897 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
898 /*offset*/0,
899 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
900}
901
902static void
903ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
904{
905#ifdef AHD_TARGET_MODE
906 if ((ahd->flags & AHD_TARGETROLE) != 0) {
907 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
908 ahd->shared_data_map.dmamap,
909 ahd_targetcmd_offset(ahd, 0),
910 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
911 op);
912 }
913#endif
914}
915
916/*
917 * See if the firmware has posted any completed commands
918 * into our in-core command complete fifos.
919 */
920#define AHD_RUN_QOUTFIFO 0x1
921#define AHD_RUN_TQINFIFO 0x2
922static u_int
923ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
924{
925 u_int retval;
926
927 retval = 0;
928 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
929 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
930 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
931 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
932 == ahd->qoutfifonext_valid_tag)
933 retval |= AHD_RUN_QOUTFIFO;
934#ifdef AHD_TARGET_MODE
935 if ((ahd->flags & AHD_TARGETROLE) != 0
936 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
937 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
938 ahd->shared_data_map.dmamap,
939 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
940 /*len*/sizeof(struct target_cmd),
941 BUS_DMASYNC_POSTREAD);
942 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
943 retval |= AHD_RUN_TQINFIFO;
944 }
945#endif
946 return (retval);
947}
948
949/*
950 * Catch an interrupt from the adapter
951 */
952int
953ahd_intr(struct ahd_softc *ahd)
954{
955 u_int intstat;
956
957 if ((ahd->pause & INTEN) == 0) {
958 /*
959 * Our interrupt is not enabled on the chip
960 * and may be disabled for re-entrancy reasons,
961 * so just return. This is likely just a shared
962 * interrupt.
963 */
964 return (0);
965 }
966
967 /*
968 * Instead of directly reading the interrupt status register,
969 * infer the cause of the interrupt by checking our in-core
970 * completion queues. This avoids a costly PCI bus read in
971 * most cases.
972 */
973 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
974 && (ahd_check_cmdcmpltqueues(ahd) != 0))
975 intstat = CMDCMPLT;
976 else
977 intstat = ahd_inb(ahd, INTSTAT);
978
979 if ((intstat & INT_PEND) == 0)
980 return (0);
981
982 if (intstat & CMDCMPLT) {
983 ahd_outb(ahd, CLRINT, CLRCMDINT);
984
985 /*
986 * Ensure that the chip sees that we've cleared
987 * this interrupt before we walk the output fifo.
988 * Otherwise, we may, due to posted bus writes,
989 * clear the interrupt after we finish the scan,
990 * and after the sequencer has added new entries
991 * and asserted the interrupt again.
992 */
993 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
994 if (ahd_is_paused(ahd)) {
995 /*
996 * Potentially lost SEQINT.
997 * If SEQINTCODE is non-zero,
998 * simulate the SEQINT.
999 */
1000 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
1001 intstat |= SEQINT;
1002 }
1003 } else {
1004 ahd_flush_device_writes(ahd);
1005 }
1006 ahd_run_qoutfifo(ahd);
1007 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
1008 ahd->cmdcmplt_total++;
1009#ifdef AHD_TARGET_MODE
1010 if ((ahd->flags & AHD_TARGETROLE) != 0)
1011 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
1012#endif
1013 }
1014
1015 /*
1016 * Handle statuses that may invalidate our cached
1017 * copy of INTSTAT separately.
1018 */
1019 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
1020 /* Hot eject. Do nothing */
1021 } else if (intstat & HWERRINT) {
1022 ahd_handle_hwerrint(ahd);
1023 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
1024 ahd->bus_intr(ahd);
1025 } else {
1026
1027 if ((intstat & SEQINT) != 0)
1028 ahd_handle_seqint(ahd, intstat);
1029
1030 if ((intstat & SCSIINT) != 0)
1031 ahd_handle_scsiint(ahd, intstat);
1032 }
1033 return (1);
1034}
1035
1036/******************************** Private Inlines *****************************/
271static __inline void 1037static __inline void
272ahd_assert_atn(struct ahd_softc *ahd) 1038ahd_assert_atn(struct ahd_softc *ahd)
273{ 1039{
@@ -280,7 +1046,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
280 * are currently in a packetized transfer. We could 1046 * are currently in a packetized transfer. We could
281 * just as easily be sending or receiving a message. 1047 * just as easily be sending or receiving a message.
282 */ 1048 */
283static __inline int 1049static int
284ahd_currently_packetized(struct ahd_softc *ahd) 1050ahd_currently_packetized(struct ahd_softc *ahd)
285{ 1051{
286 ahd_mode_state saved_modes; 1052 ahd_mode_state saved_modes;
@@ -896,7 +1662,7 @@ clrchn:
896 * a copy of the first byte (little endian) of the sgptr 1662 * a copy of the first byte (little endian) of the sgptr
897 * hscb field. 1663 * hscb field.
898 */ 1664 */
899void 1665static void
900ahd_run_qoutfifo(struct ahd_softc *ahd) 1666ahd_run_qoutfifo(struct ahd_softc *ahd)
901{ 1667{
902 struct ahd_completion *completion; 1668 struct ahd_completion *completion;
@@ -935,7 +1701,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
935} 1701}
936 1702
937/************************* Interrupt Handling *********************************/ 1703/************************* Interrupt Handling *********************************/
938void 1704static void
939ahd_handle_hwerrint(struct ahd_softc *ahd) 1705ahd_handle_hwerrint(struct ahd_softc *ahd)
940{ 1706{
941 /* 1707 /*
@@ -1009,7 +1775,7 @@ ahd_dump_sglist(struct scb *scb)
1009} 1775}
1010#endif /* AHD_DEBUG */ 1776#endif /* AHD_DEBUG */
1011 1777
1012void 1778static void
1013ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) 1779ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1014{ 1780{
1015 u_int seqintcode; 1781 u_int seqintcode;
@@ -1621,7 +2387,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1621 ahd_unpause(ahd); 2387 ahd_unpause(ahd);
1622} 2388}
1623 2389
1624void 2390static void
1625ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) 2391ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1626{ 2392{
1627 struct scb *scb; 2393 struct scb *scb;
@@ -3571,11 +4337,11 @@ ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3571 devinfo->target, devinfo->lun); 4337 devinfo->target, devinfo->lun);
3572} 4338}
3573 4339
3574static struct ahd_phase_table_entry* 4340static const struct ahd_phase_table_entry*
3575ahd_lookup_phase_entry(int phase) 4341ahd_lookup_phase_entry(int phase)
3576{ 4342{
3577 struct ahd_phase_table_entry *entry; 4343 const struct ahd_phase_table_entry *entry;
3578 struct ahd_phase_table_entry *last_entry; 4344 const struct ahd_phase_table_entry *last_entry;
3579 4345
3580 /* 4346 /*
3581 * num_phases doesn't include the default entry which 4347 * num_phases doesn't include the default entry which
@@ -3941,7 +4707,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
3941 */ 4707 */
3942static void 4708static void
3943ahd_handle_message_phase(struct ahd_softc *ahd) 4709ahd_handle_message_phase(struct ahd_softc *ahd)
3944{ 4710{
3945 struct ahd_devinfo devinfo; 4711 struct ahd_devinfo devinfo;
3946 u_int bus_phase; 4712 u_int bus_phase;
3947 int end_session; 4713 int end_session;
@@ -5983,8 +6749,7 @@ found:
5983 */ 6749 */
5984void 6750void
5985ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 6751ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5986{ 6752{
5987
5988 /* Clean up for the next user */ 6753 /* Clean up for the next user */
5989 scb->flags = SCB_FLAG_NONE; 6754 scb->flags = SCB_FLAG_NONE;
5990 scb->hscb->control = 0; 6755 scb->hscb->control = 0;
@@ -6272,6 +7037,24 @@ static const char *termstat_strings[] = {
6272 "Not Configured" 7037 "Not Configured"
6273}; 7038};
6274 7039
7040/***************************** Timer Facilities *******************************/
7041#define ahd_timer_init init_timer
7042#define ahd_timer_stop del_timer_sync
7043typedef void ahd_linux_callback_t (u_long);
7044
7045static void
7046ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
7047{
7048 struct ahd_softc *ahd;
7049
7050 ahd = (struct ahd_softc *)arg;
7051 del_timer(timer);
7052 timer->data = (u_long)arg;
7053 timer->expires = jiffies + (usec * HZ)/1000000;
7054 timer->function = (ahd_linux_callback_t*)func;
7055 add_timer(timer);
7056}
7057
6275/* 7058/*
6276 * Start the board, ready for normal operation 7059 * Start the board, ready for normal operation
6277 */ 7060 */
@@ -7370,7 +8153,7 @@ ahd_qinfifo_count(struct ahd_softc *ahd)
7370 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); 8153 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
7371} 8154}
7372 8155
7373void 8156static void
7374ahd_reset_cmds_pending(struct ahd_softc *ahd) 8157ahd_reset_cmds_pending(struct ahd_softc *ahd)
7375{ 8158{
7376 struct scb *scb; 8159 struct scb *scb;
@@ -8571,7 +9354,7 @@ ahd_loadseq(struct ahd_softc *ahd)
8571 struct cs cs_table[num_critical_sections]; 9354 struct cs cs_table[num_critical_sections];
8572 u_int begin_set[num_critical_sections]; 9355 u_int begin_set[num_critical_sections];
8573 u_int end_set[num_critical_sections]; 9356 u_int end_set[num_critical_sections];
8574 struct patch *cur_patch; 9357 const struct patch *cur_patch;
8575 u_int cs_count; 9358 u_int cs_count;
8576 u_int cur_cs; 9359 u_int cur_cs;
8577 u_int i; 9360 u_int i;
@@ -8726,11 +9509,11 @@ ahd_loadseq(struct ahd_softc *ahd)
8726} 9509}
8727 9510
8728static int 9511static int
8729ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, 9512ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
8730 u_int start_instr, u_int *skip_addr) 9513 u_int start_instr, u_int *skip_addr)
8731{ 9514{
8732 struct patch *cur_patch; 9515 const struct patch *cur_patch;
8733 struct patch *last_patch; 9516 const struct patch *last_patch;
8734 u_int num_patches; 9517 u_int num_patches;
8735 9518
8736 num_patches = ARRAY_SIZE(patches); 9519 num_patches = ARRAY_SIZE(patches);
@@ -8764,7 +9547,7 @@ ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
8764static u_int 9547static u_int
8765ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) 9548ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
8766{ 9549{
8767 struct patch *cur_patch; 9550 const struct patch *cur_patch;
8768 int address_offset; 9551 int address_offset;
8769 u_int skip_addr; 9552 u_int skip_addr;
8770 u_int i; 9553 u_int i;
@@ -8895,7 +9678,7 @@ sized:
8895} 9678}
8896 9679
8897int 9680int
8898ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, 9681ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
8899 const char *name, u_int address, u_int value, 9682 const char *name, u_int address, u_int value,
8900 u_int *cur_column, u_int wrap_point) 9683 u_int *cur_column, u_int wrap_point)
8901{ 9684{
@@ -9886,7 +10669,7 @@ ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
9886#endif 10669#endif
9887} 10670}
9888 10671
9889void 10672static void
9890ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) 10673ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
9891{ 10674{
9892 struct target_cmd *cmd; 10675 struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 45e55575a0fa..5f12cf9d99d0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -63,18 +63,15 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, 63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
64 ahd_mode_state state, 64 ahd_mode_state state,
65 ahd_mode *src, ahd_mode *dst); 65 ahd_mode *src, ahd_mode *dst);
66static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, 66
67 ahd_mode dst); 67void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
68static __inline void ahd_update_modes(struct ahd_softc *ahd); 68 ahd_mode dst);
69static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 69ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
70 ahd_mode dstmode, const char *file, 70void ahd_restore_modes(struct ahd_softc *ahd,
71 int line); 71 ahd_mode_state state);
72static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); 72int ahd_is_paused(struct ahd_softc *ahd);
73static __inline void ahd_restore_modes(struct ahd_softc *ahd, 73void ahd_pause(struct ahd_softc *ahd);
74 ahd_mode_state state); 74void ahd_unpause(struct ahd_softc *ahd);
75static __inline int ahd_is_paused(struct ahd_softc *ahd);
76static __inline void ahd_pause(struct ahd_softc *ahd);
77static __inline void ahd_unpause(struct ahd_softc *ahd);
78 75
79static __inline void 76static __inline void
80ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 77ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +96,16 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
99 *dst = (state & DST_MODE) >> DST_MODE_SHIFT; 96 *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
100} 97}
101 98
102static __inline void
103ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
104{
105 if (ahd->src_mode == src && ahd->dst_mode == dst)
106 return;
107#ifdef AHD_DEBUG
108 if (ahd->src_mode == AHD_MODE_UNKNOWN
109 || ahd->dst_mode == AHD_MODE_UNKNOWN)
110 panic("Setting mode prior to saving it.\n");
111 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
112 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
113 ahd_build_mode_state(ahd, src, dst));
114#endif
115 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
116 ahd->src_mode = src;
117 ahd->dst_mode = dst;
118}
119
120static __inline void
121ahd_update_modes(struct ahd_softc *ahd)
122{
123 ahd_mode_state mode_ptr;
124 ahd_mode src;
125 ahd_mode dst;
126
127 mode_ptr = ahd_inb(ahd, MODE_PTR);
128#ifdef AHD_DEBUG
129 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
130 printf("Reading mode 0x%x\n", mode_ptr);
131#endif
132 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
133 ahd_known_modes(ahd, src, dst);
134}
135
136static __inline void
137ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
138 ahd_mode dstmode, const char *file, int line)
139{
140#ifdef AHD_DEBUG
141 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
142 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
143 panic("%s:%s:%d: Mode assertion failed.\n",
144 ahd_name(ahd), file, line);
145 }
146#endif
147}
148
149static __inline ahd_mode_state
150ahd_save_modes(struct ahd_softc *ahd)
151{
152 if (ahd->src_mode == AHD_MODE_UNKNOWN
153 || ahd->dst_mode == AHD_MODE_UNKNOWN)
154 ahd_update_modes(ahd);
155
156 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
157}
158
159static __inline void
160ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
161{
162 ahd_mode src;
163 ahd_mode dst;
164
165 ahd_extract_mode_state(ahd, state, &src, &dst);
166 ahd_set_modes(ahd, src, dst);
167}
168
169#define AHD_ASSERT_MODES(ahd, source, dest) \
170 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
171
172/*
173 * Determine whether the sequencer has halted code execution.
174 * Returns non-zero status if the sequencer is stopped.
175 */
176static __inline int
177ahd_is_paused(struct ahd_softc *ahd)
178{
179 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
180}
181
182/*
183 * Request that the sequencer stop and wait, indefinitely, for it
184 * to stop. The sequencer will only acknowledge that it is paused
185 * once it has reached an instruction boundary and PAUSEDIS is
186 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
187 * for critical sections.
188 */
189static __inline void
190ahd_pause(struct ahd_softc *ahd)
191{
192 ahd_outb(ahd, HCNTRL, ahd->pause);
193
194 /*
195 * Since the sequencer can disable pausing in a critical section, we
196 * must loop until it actually stops.
197 */
198 while (ahd_is_paused(ahd) == 0)
199 ;
200}
201
202/*
203 * Allow the sequencer to continue program execution.
204 * We check here to ensure that no additional interrupt
205 * sources that would cause the sequencer to halt have been
206 * asserted. If, for example, a SCSI bus reset is detected
207 * while we are fielding a different, pausing, interrupt type,
208 * we don't want to release the sequencer before going back
209 * into our interrupt handler and dealing with this new
210 * condition.
211 */
212static __inline void
213ahd_unpause(struct ahd_softc *ahd)
214{
215 /*
216 * Automatically restore our modes to those saved
217 * prior to the first change of the mode.
218 */
219 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
220 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
221 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
222 ahd_reset_cmds_pending(ahd);
223 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
224 }
225
226 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
227 ahd_outb(ahd, HCNTRL, ahd->unpause);
228
229 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
230}
231
232/*********************** Scatter Gather List Handling *************************/ 99/*********************** Scatter Gather List Handling *************************/
233static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 100void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
234 void *sgptr, dma_addr_t addr, 101 void *sgptr, dma_addr_t addr,
235 bus_size_t len, int last); 102 bus_size_t len, int last);
236static __inline void ahd_setup_scb_common(struct ahd_softc *ahd,
237 struct scb *scb);
238static __inline void ahd_setup_data_scb(struct ahd_softc *ahd,
239 struct scb *scb);
240static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd,
241 struct scb *scb);
242
243static __inline void *
244ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
245 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
246{
247 scb->sg_count++;
248 if (sizeof(dma_addr_t) > 4
249 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
250 struct ahd_dma64_seg *sg;
251
252 sg = (struct ahd_dma64_seg *)sgptr;
253 sg->addr = ahd_htole64(addr);
254 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
255 return (sg + 1);
256 } else {
257 struct ahd_dma_seg *sg;
258
259 sg = (struct ahd_dma_seg *)sgptr;
260 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
261 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
262 | (last ? AHD_DMA_LAST_SEG : 0));
263 return (sg + 1);
264 }
265}
266
267static __inline void
268ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
269{
270 /* XXX Handle target mode SCBs. */
271 scb->crc_retry_count = 0;
272 if ((scb->flags & SCB_PACKETIZED) != 0) {
273 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
274 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
275 } else {
276 if (ahd_get_transfer_length(scb) & 0x01)
277 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
278 else
279 scb->hscb->task_attribute = 0;
280 }
281
282 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
283 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
284 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
285 ahd_htole32(scb->sense_busaddr);
286}
287
288static __inline void
289ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
290{
291 /*
292 * Copy the first SG into the "current" data ponter area.
293 */
294 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
295 struct ahd_dma64_seg *sg;
296
297 sg = (struct ahd_dma64_seg *)scb->sg_list;
298 scb->hscb->dataptr = sg->addr;
299 scb->hscb->datacnt = sg->len;
300 } else {
301 struct ahd_dma_seg *sg;
302 uint32_t *dataptr_words;
303
304 sg = (struct ahd_dma_seg *)scb->sg_list;
305 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
306 dataptr_words[0] = sg->addr;
307 dataptr_words[1] = 0;
308 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
309 uint64_t high_addr;
310
311 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
312 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
313 }
314 scb->hscb->datacnt = sg->len;
315 }
316 /*
317 * Note where to find the SG entries in bus space.
318 * We also set the full residual flag which the
319 * sequencer will clear as soon as a data transfer
320 * occurs.
321 */
322 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
323}
324
325static __inline void
326ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
327{
328 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
329 scb->hscb->dataptr = 0;
330 scb->hscb->datacnt = 0;
331}
332 103
333/************************** Memory mapping routines ***************************/ 104/************************** Memory mapping routines ***************************/
334static __inline size_t ahd_sg_size(struct ahd_softc *ahd); 105static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
335static __inline void * 106
336 ahd_sg_bus_to_virt(struct ahd_softc *ahd, 107void ahd_sync_sglist(struct ahd_softc *ahd,
337 struct scb *scb, 108 struct scb *scb, int op);
338 uint32_t sg_busaddr);
339static __inline uint32_t
340 ahd_sg_virt_to_bus(struct ahd_softc *ahd,
341 struct scb *scb,
342 void *sg);
343static __inline void ahd_sync_scb(struct ahd_softc *ahd,
344 struct scb *scb, int op);
345static __inline void ahd_sync_sglist(struct ahd_softc *ahd,
346 struct scb *scb, int op);
347static __inline void ahd_sync_sense(struct ahd_softc *ahd,
348 struct scb *scb, int op);
349static __inline uint32_t
350 ahd_targetcmd_offset(struct ahd_softc *ahd,
351 u_int index);
352 109
353static __inline size_t 110static __inline size_t
354ahd_sg_size(struct ahd_softc *ahd) 111ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +115,32 @@ ahd_sg_size(struct ahd_softc *ahd)
358 return (sizeof(struct ahd_dma_seg)); 115 return (sizeof(struct ahd_dma_seg));
359} 116}
360 117
361static __inline void *
362ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
363{
364 dma_addr_t sg_offset;
365
366 /* sg_list_phys points to entry 1, not 0 */
367 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
368 return ((uint8_t *)scb->sg_list + sg_offset);
369}
370
371static __inline uint32_t
372ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
373{
374 dma_addr_t sg_offset;
375
376 /* sg_list_phys points to entry 1, not 0 */
377 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
378 - ahd_sg_size(ahd);
379
380 return (scb->sg_list_busaddr + sg_offset);
381}
382
383static __inline void
384ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
385{
386 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
387 scb->hscb_map->dmamap,
388 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
389 /*len*/sizeof(*scb->hscb), op);
390}
391
392static __inline void
393ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
394{
395 if (scb->sg_count == 0)
396 return;
397
398 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
399 scb->sg_map->dmamap,
400 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
401 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
402}
403
404static __inline void
405ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
406{
407 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
408 scb->sense_map->dmamap,
409 /*offset*/scb->sense_busaddr,
410 /*len*/AHD_SENSE_BUFSIZE, op);
411}
412
413static __inline uint32_t
414ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
415{
416 return (((uint8_t *)&ahd->targetcmds[index])
417 - (uint8_t *)ahd->qoutfifo);
418}
419
420/*********************** Miscellaneous Support Functions ***********************/ 118/*********************** Miscellaneous Support Functions ***********************/
421static __inline struct ahd_initiator_tinfo * 119struct ahd_initiator_tinfo *
422 ahd_fetch_transinfo(struct ahd_softc *ahd, 120 ahd_fetch_transinfo(struct ahd_softc *ahd,
423 char channel, u_int our_id, 121 char channel, u_int our_id,
424 u_int remote_id, 122 u_int remote_id,
425 struct ahd_tmode_tstate **tstate); 123 struct ahd_tmode_tstate **tstate);
426static __inline uint16_t 124uint16_t
427 ahd_inw(struct ahd_softc *ahd, u_int port); 125 ahd_inw(struct ahd_softc *ahd, u_int port);
428static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, 126void ahd_outw(struct ahd_softc *ahd, u_int port,
429 u_int value); 127 u_int value);
430static __inline uint32_t 128uint32_t
431 ahd_inl(struct ahd_softc *ahd, u_int port); 129 ahd_inl(struct ahd_softc *ahd, u_int port);
432static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, 130void ahd_outl(struct ahd_softc *ahd, u_int port,
433 uint32_t value); 131 uint32_t value);
434static __inline uint64_t 132uint64_t
435 ahd_inq(struct ahd_softc *ahd, u_int port); 133 ahd_inq(struct ahd_softc *ahd, u_int port);
436static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, 134void ahd_outq(struct ahd_softc *ahd, u_int port,
437 uint64_t value); 135 uint64_t value);
438static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); 136u_int ahd_get_scbptr(struct ahd_softc *ahd);
439static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); 137void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
440static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); 138u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
441static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); 139u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
442static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); 140struct scb *
443static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); 141 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
444static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); 142void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
445static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); 143
446static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd);
447static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
448static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd);
449static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
450static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
451static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
452static __inline uint32_t
453 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
454static __inline uint64_t
455 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
456static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd,
457 struct scb *scb);
458static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
459static __inline uint8_t * 144static __inline uint8_t *
460 ahd_get_sense_buf(struct ahd_softc *ahd, 145 ahd_get_sense_buf(struct ahd_softc *ahd,
461 struct scb *scb); 146 struct scb *scb);
@@ -463,25 +148,7 @@ static __inline uint32_t
463 ahd_get_sense_bufaddr(struct ahd_softc *ahd, 148 ahd_get_sense_bufaddr(struct ahd_softc *ahd,
464 struct scb *scb); 149 struct scb *scb);
465 150
466/* 151#if 0 /* unused */
467 * Return pointers to the transfer negotiation information
468 * for the specified our_id/remote_id pair.
469 */
470static __inline struct ahd_initiator_tinfo *
471ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
472 u_int remote_id, struct ahd_tmode_tstate **tstate)
473{
474 /*
475 * Transfer data structures are stored from the perspective
476 * of the target role. Since the parameters for a connection
477 * in the initiator role to a given target are the same as
478 * when the roles are reversed, we pretend we are the target.
479 */
480 if (channel == 'B')
481 our_id += 8;
482 *tstate = ahd->enabled_targets[our_id];
483 return (&(*tstate)->transinfo[remote_id]);
484}
485 152
486#define AHD_COPY_COL_IDX(dst, src) \ 153#define AHD_COPY_COL_IDX(dst, src) \
487do { \ 154do { \
@@ -489,304 +156,7 @@ do { \
489 dst->hscb->lun = src->hscb->lun; \ 156 dst->hscb->lun = src->hscb->lun; \
490} while (0) 157} while (0)
491 158
492static __inline uint16_t
493ahd_inw(struct ahd_softc *ahd, u_int port)
494{
495 /*
496 * Read high byte first as some registers increment
497 * or have other side effects when the low byte is
498 * read.
499 */
500 uint16_t r = ahd_inb(ahd, port+1) << 8;
501 return r | ahd_inb(ahd, port);
502}
503
504static __inline void
505ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
506{
507 /*
508 * Write low byte first to accomodate registers
509 * such as PRGMCNT where the order maters.
510 */
511 ahd_outb(ahd, port, value & 0xFF);
512 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
513}
514
515static __inline uint32_t
516ahd_inl(struct ahd_softc *ahd, u_int port)
517{
518 return ((ahd_inb(ahd, port))
519 | (ahd_inb(ahd, port+1) << 8)
520 | (ahd_inb(ahd, port+2) << 16)
521 | (ahd_inb(ahd, port+3) << 24));
522}
523
524static __inline void
525ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
526{
527 ahd_outb(ahd, port, (value) & 0xFF);
528 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
529 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
530 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
531}
532
533static __inline uint64_t
534ahd_inq(struct ahd_softc *ahd, u_int port)
535{
536 return ((ahd_inb(ahd, port))
537 | (ahd_inb(ahd, port+1) << 8)
538 | (ahd_inb(ahd, port+2) << 16)
539 | (ahd_inb(ahd, port+3) << 24)
540 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
541 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
542 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
543 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
544}
545
546static __inline void
547ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
548{
549 ahd_outb(ahd, port, value & 0xFF);
550 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
551 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
552 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
553 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
554 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
555 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
556 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
557}
558
559static __inline u_int
560ahd_get_scbptr(struct ahd_softc *ahd)
561{
562 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
563 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
564 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
565}
566
567static __inline void
568ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
569{
570 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
571 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
572 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
573 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
574}
575
576static __inline u_int
577ahd_get_hnscb_qoff(struct ahd_softc *ahd)
578{
579 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
580}
581
582static __inline void
583ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
584{
585 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
586}
587
588static __inline u_int
589ahd_get_hescb_qoff(struct ahd_softc *ahd)
590{
591 return (ahd_inb(ahd, HESCB_QOFF));
592}
593
594static __inline void
595ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
596{
597 ahd_outb(ahd, HESCB_QOFF, value);
598}
599
600static __inline u_int
601ahd_get_snscb_qoff(struct ahd_softc *ahd)
602{
603 u_int oldvalue;
604
605 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
606 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
607 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
608 return (oldvalue);
609}
610
611static __inline void
612ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
613{
614 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
615 ahd_outw(ahd, SNSCB_QOFF, value);
616}
617
618static __inline u_int
619ahd_get_sescb_qoff(struct ahd_softc *ahd)
620{
621 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
622 return (ahd_inb(ahd, SESCB_QOFF));
623}
624
625static __inline void
626ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
627{
628 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
629 ahd_outb(ahd, SESCB_QOFF, value);
630}
631
632static __inline u_int
633ahd_get_sdscb_qoff(struct ahd_softc *ahd)
634{
635 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
636 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
637}
638
639static __inline void
640ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
641{
642 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
643 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
644 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
645}
646
647static __inline u_int
648ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
649{
650 u_int value;
651
652 /*
653 * Workaround PCI-X Rev A. hardware bug.
654 * After a host read of SCB memory, the chip
655 * may become confused into thinking prefetch
656 * was required. This starts the discard timer
657 * running and can cause an unexpected discard
658 * timer interrupt. The work around is to read
659 * a normal register prior to the exhaustion of
660 * the discard timer. The mode pointer register
661 * has no side effects and so serves well for
662 * this purpose.
663 *
664 * Razor #528
665 */
666 value = ahd_inb(ahd, offset);
667 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
668 ahd_inb(ahd, MODE_PTR);
669 return (value);
670}
671
672static __inline u_int
673ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
674{
675 return (ahd_inb_scbram(ahd, offset)
676 | (ahd_inb_scbram(ahd, offset+1) << 8));
677}
678
679static __inline uint32_t
680ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
681{
682 return (ahd_inw_scbram(ahd, offset)
683 | (ahd_inw_scbram(ahd, offset+2) << 16));
684}
685
686static __inline uint64_t
687ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
688{
689 return (ahd_inl_scbram(ahd, offset)
690 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
691}
692
693static __inline struct scb *
694ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
695{
696 struct scb* scb;
697
698 if (tag >= AHD_SCB_MAX)
699 return (NULL);
700 scb = ahd->scb_data.scbindex[tag];
701 if (scb != NULL)
702 ahd_sync_scb(ahd, scb,
703 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
704 return (scb);
705}
706
707static __inline void
708ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
709{
710 struct hardware_scb *q_hscb;
711 struct map_node *q_hscb_map;
712 uint32_t saved_hscb_busaddr;
713
714 /*
715 * Our queuing method is a bit tricky. The card
716 * knows in advance which HSCB (by address) to download,
717 * and we can't disappoint it. To achieve this, the next
718 * HSCB to download is saved off in ahd->next_queued_hscb.
719 * When we are called to queue "an arbitrary scb",
720 * we copy the contents of the incoming HSCB to the one
721 * the sequencer knows about, swap HSCB pointers and
722 * finally assign the SCB to the tag indexed location
723 * in the scb_array. This makes sure that we can still
724 * locate the correct SCB by SCB_TAG.
725 */
726 q_hscb = ahd->next_queued_hscb;
727 q_hscb_map = ahd->next_queued_hscb_map;
728 saved_hscb_busaddr = q_hscb->hscb_busaddr;
729 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
730 q_hscb->hscb_busaddr = saved_hscb_busaddr;
731 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
732
733 /* Now swap HSCB pointers. */
734 ahd->next_queued_hscb = scb->hscb;
735 ahd->next_queued_hscb_map = scb->hscb_map;
736 scb->hscb = q_hscb;
737 scb->hscb_map = q_hscb_map;
738
739 /* Now define the mapping from tag to SCB in the scbindex */
740 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
741}
742
743/*
744 * Tell the sequencer about a new transaction to execute.
745 */
746static __inline void
747ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
748{
749 ahd_swap_with_next_hscb(ahd, scb);
750
751 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
752 panic("Attempt to queue invalid SCB tag %x\n",
753 SCB_GET_TAG(scb));
754
755 /*
756 * Keep a history of SCBs we've downloaded in the qinfifo.
757 */
758 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
759 ahd->qinfifonext++;
760
761 if (scb->sg_count != 0)
762 ahd_setup_data_scb(ahd, scb);
763 else
764 ahd_setup_noxfer_scb(ahd, scb);
765 ahd_setup_scb_common(ahd, scb);
766
767 /*
768 * Make sure our data is consistent from the
769 * perspective of the adapter.
770 */
771 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
772
773#ifdef AHD_DEBUG
774 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
775 uint64_t host_dataptr;
776
777 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
778 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
779 ahd_name(ahd),
780 SCB_GET_TAG(scb), scb->hscb->scsiid,
781 ahd_le32toh(scb->hscb->hscb_busaddr),
782 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
783 (u_int)(host_dataptr & 0xFFFFFFFF),
784 ahd_le32toh(scb->hscb->datacnt));
785 }
786#endif 159#endif
787 /* Tell the adapter about the newly queued SCB */
788 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
789}
790 160
791static __inline uint8_t * 161static __inline uint8_t *
792ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) 162ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +171,6 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
801} 171}
802 172
803/************************** Interrupt Processing ******************************/ 173/************************** Interrupt Processing ******************************/
804static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); 174int ahd_intr(struct ahd_softc *ahd);
805static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
806static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
807static __inline int ahd_intr(struct ahd_softc *ahd);
808
809static __inline void
810ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
811{
812 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
813 /*offset*/0,
814 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
815}
816
817static __inline void
818ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
819{
820#ifdef AHD_TARGET_MODE
821 if ((ahd->flags & AHD_TARGETROLE) != 0) {
822 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
823 ahd->shared_data_map.dmamap,
824 ahd_targetcmd_offset(ahd, 0),
825 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
826 op);
827 }
828#endif
829}
830
831/*
832 * See if the firmware has posted any completed commands
833 * into our in-core command complete fifos.
834 */
835#define AHD_RUN_QOUTFIFO 0x1
836#define AHD_RUN_TQINFIFO 0x2
837static __inline u_int
838ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
839{
840 u_int retval;
841
842 retval = 0;
843 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
844 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
845 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
846 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
847 == ahd->qoutfifonext_valid_tag)
848 retval |= AHD_RUN_QOUTFIFO;
849#ifdef AHD_TARGET_MODE
850 if ((ahd->flags & AHD_TARGETROLE) != 0
851 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
852 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
853 ahd->shared_data_map.dmamap,
854 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
855 /*len*/sizeof(struct target_cmd),
856 BUS_DMASYNC_POSTREAD);
857 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
858 retval |= AHD_RUN_TQINFIFO;
859 }
860#endif
861 return (retval);
862}
863
864/*
865 * Catch an interrupt from the adapter
866 */
867static __inline int
868ahd_intr(struct ahd_softc *ahd)
869{
870 u_int intstat;
871
872 if ((ahd->pause & INTEN) == 0) {
873 /*
874 * Our interrupt is not enabled on the chip
875 * and may be disabled for re-entrancy reasons,
876 * so just return. This is likely just a shared
877 * interrupt.
878 */
879 return (0);
880 }
881
882 /*
883 * Instead of directly reading the interrupt status register,
884 * infer the cause of the interrupt by checking our in-core
885 * completion queues. This avoids a costly PCI bus read in
886 * most cases.
887 */
888 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
889 && (ahd_check_cmdcmpltqueues(ahd) != 0))
890 intstat = CMDCMPLT;
891 else
892 intstat = ahd_inb(ahd, INTSTAT);
893
894 if ((intstat & INT_PEND) == 0)
895 return (0);
896
897 if (intstat & CMDCMPLT) {
898 ahd_outb(ahd, CLRINT, CLRCMDINT);
899
900 /*
901 * Ensure that the chip sees that we've cleared
902 * this interrupt before we walk the output fifo.
903 * Otherwise, we may, due to posted bus writes,
904 * clear the interrupt after we finish the scan,
905 * and after the sequencer has added new entries
906 * and asserted the interrupt again.
907 */
908 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
909 if (ahd_is_paused(ahd)) {
910 /*
911 * Potentially lost SEQINT.
912 * If SEQINTCODE is non-zero,
913 * simulate the SEQINT.
914 */
915 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
916 intstat |= SEQINT;
917 }
918 } else {
919 ahd_flush_device_writes(ahd);
920 }
921 ahd_run_qoutfifo(ahd);
922 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
923 ahd->cmdcmplt_total++;
924#ifdef AHD_TARGET_MODE
925 if ((ahd->flags & AHD_TARGETROLE) != 0)
926 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
927#endif
928 }
929
930 /*
931 * Handle statuses that may invalidate our cached
932 * copy of INTSTAT separately.
933 */
934 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
935 /* Hot eject. Do nothing */
936 } else if (intstat & HWERRINT) {
937 ahd_handle_hwerrint(ahd);
938 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
939 ahd->bus_intr(ahd);
940 } else {
941
942 if ((intstat & SEQINT) != 0)
943 ahd_handle_seqint(ahd, intstat);
944
945 if ((intstat & SCSIINT) != 0)
946 ahd_handle_scsiint(ahd, intstat);
947 }
948 return (1);
949}
950 175
951#endif /* _AIC79XX_INLINE_H_ */ 176#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0081aa357c8b..0f829b3b8ab7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -193,7 +193,7 @@ struct ahd_linux_iocell_opts
193#define AIC79XX_PRECOMP_INDEX 0 193#define AIC79XX_PRECOMP_INDEX 0
194#define AIC79XX_SLEWRATE_INDEX 1 194#define AIC79XX_SLEWRATE_INDEX 1
195#define AIC79XX_AMPLITUDE_INDEX 2 195#define AIC79XX_AMPLITUDE_INDEX 2
196static struct ahd_linux_iocell_opts aic79xx_iocell_info[] = 196static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
197{ 197{
198 AIC79XX_DEFAULT_IOOPTS, 198 AIC79XX_DEFAULT_IOOPTS,
199 AIC79XX_DEFAULT_IOOPTS, 199 AIC79XX_DEFAULT_IOOPTS,
@@ -369,10 +369,167 @@ static void ahd_release_simq(struct ahd_softc *ahd);
369static int ahd_linux_unit; 369static int ahd_linux_unit;
370 370
371 371
372/************************** OS Utility Wrappers *******************************/
373void ahd_delay(long);
374void
375ahd_delay(long usec)
376{
377 /*
378 * udelay on Linux can have problems for
379 * multi-millisecond waits. Wait at most
380 * 1024us per call.
381 */
382 while (usec > 0) {
383 udelay(usec % 1024);
384 usec -= 1024;
385 }
386}
387
388
389/***************************** Low Level I/O **********************************/
390uint8_t ahd_inb(struct ahd_softc * ahd, long port);
391void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
392void ahd_outw_atomic(struct ahd_softc * ahd,
393 long port, uint16_t val);
394void ahd_outsb(struct ahd_softc * ahd, long port,
395 uint8_t *, int count);
396void ahd_insb(struct ahd_softc * ahd, long port,
397 uint8_t *, int count);
398
399uint8_t
400ahd_inb(struct ahd_softc * ahd, long port)
401{
402 uint8_t x;
403
404 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
405 x = readb(ahd->bshs[0].maddr + port);
406 } else {
407 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
408 }
409 mb();
410 return (x);
411}
412
413#if 0 /* unused */
414static uint16_t
415ahd_inw_atomic(struct ahd_softc * ahd, long port)
416{
417 uint8_t x;
418
419 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
420 x = readw(ahd->bshs[0].maddr + port);
421 } else {
422 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
423 }
424 mb();
425 return (x);
426}
427#endif
428
429void
430ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
431{
432 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
433 writeb(val, ahd->bshs[0].maddr + port);
434 } else {
435 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
436 }
437 mb();
438}
439
440void
441ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
442{
443 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
444 writew(val, ahd->bshs[0].maddr + port);
445 } else {
446 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
447 }
448 mb();
449}
450
451void
452ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
453{
454 int i;
455
456 /*
457 * There is probably a more efficient way to do this on Linux
458 * but we don't use this for anything speed critical and this
459 * should work.
460 */
461 for (i = 0; i < count; i++)
462 ahd_outb(ahd, port, *array++);
463}
464
465void
466ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
467{
468 int i;
469
470 /*
471 * There is probably a more efficient way to do this on Linux
472 * but we don't use this for anything speed critical and this
473 * should work.
474 */
475 for (i = 0; i < count; i++)
476 *array++ = ahd_inb(ahd, port);
477}
478
479/******************************* PCI Routines *********************************/
480uint32_t
481ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
482{
483 switch (width) {
484 case 1:
485 {
486 uint8_t retval;
487
488 pci_read_config_byte(pci, reg, &retval);
489 return (retval);
490 }
491 case 2:
492 {
493 uint16_t retval;
494 pci_read_config_word(pci, reg, &retval);
495 return (retval);
496 }
497 case 4:
498 {
499 uint32_t retval;
500 pci_read_config_dword(pci, reg, &retval);
501 return (retval);
502 }
503 default:
504 panic("ahd_pci_read_config: Read size too big");
505 /* NOTREACHED */
506 return (0);
507 }
508}
509
510void
511ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
512{
513 switch (width) {
514 case 1:
515 pci_write_config_byte(pci, reg, value);
516 break;
517 case 2:
518 pci_write_config_word(pci, reg, value);
519 break;
520 case 4:
521 pci_write_config_dword(pci, reg, value);
522 break;
523 default:
524 panic("ahd_pci_write_config: Write size too big");
525 /* NOTREACHED */
526 }
527}
528
372/****************************** Inlines ***************************************/ 529/****************************** Inlines ***************************************/
373static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*); 530static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
374 531
375static __inline void 532static void
376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 533ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
377{ 534{
378 struct scsi_cmnd *cmd; 535 struct scsi_cmnd *cmd;
@@ -400,13 +557,11 @@ ahd_linux_info(struct Scsi_Host *host)
400 bp = &buffer[0]; 557 bp = &buffer[0];
401 ahd = *(struct ahd_softc **)host->hostdata; 558 ahd = *(struct ahd_softc **)host->hostdata;
402 memset(bp, 0, sizeof(buffer)); 559 memset(bp, 0, sizeof(buffer));
403 strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev "); 560 strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
404 strcat(bp, AIC79XX_DRIVER_VERSION); 561 " <");
405 strcat(bp, "\n");
406 strcat(bp, " <");
407 strcat(bp, ahd->description); 562 strcat(bp, ahd->description);
408 strcat(bp, ">\n"); 563 strcat(bp, ">\n"
409 strcat(bp, " "); 564 " ");
410 ahd_controller_info(ahd, ahd_info); 565 ahd_controller_info(ahd, ahd_info);
411 strcat(bp, ahd_info); 566 strcat(bp, ahd_info);
412 567
@@ -432,7 +587,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
432 return rtn; 587 return rtn;
433} 588}
434 589
435static inline struct scsi_target ** 590static struct scsi_target **
436ahd_linux_target_in_softc(struct scsi_target *starget) 591ahd_linux_target_in_softc(struct scsi_target *starget)
437{ 592{
438 struct ahd_softc *ahd = 593 struct ahd_softc *ahd =
@@ -991,7 +1146,7 @@ aic79xx_setup(char *s)
991 char *p; 1146 char *p;
992 char *end; 1147 char *end;
993 1148
994 static struct { 1149 static const struct {
995 const char *name; 1150 const char *name;
996 uint32_t *flag; 1151 uint32_t *flag;
997 } options[] = { 1152 } options[] = {
@@ -1223,7 +1378,7 @@ ahd_platform_init(struct ahd_softc *ahd)
1223 * Lookup and commit any modified IO Cell options. 1378 * Lookup and commit any modified IO Cell options.
1224 */ 1379 */
1225 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { 1380 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
1226 struct ahd_linux_iocell_opts *iocell_opts; 1381 const struct ahd_linux_iocell_opts *iocell_opts;
1227 1382
1228 iocell_opts = &aic79xx_iocell_info[ahd->unit]; 1383 iocell_opts = &aic79xx_iocell_info[ahd->unit];
1229 if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP) 1384 if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
@@ -2613,7 +2768,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2613 uint8_t precomp; 2768 uint8_t precomp;
2614 2769
2615 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { 2770 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
2616 struct ahd_linux_iocell_opts *iocell_opts; 2771 const struct ahd_linux_iocell_opts *iocell_opts;
2617 2772
2618 iocell_opts = &aic79xx_iocell_info[ahd->unit]; 2773 iocell_opts = &aic79xx_iocell_info[ahd->unit];
2619 precomp = iocell_opts->precomp; 2774 precomp = iocell_opts->precomp;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 853998be1474..8d6612c19922 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
222/***************************** Timer Facilities *******************************/ 222/***************************** Timer Facilities *******************************/
223#define ahd_timer_init init_timer 223#define ahd_timer_init init_timer
224#define ahd_timer_stop del_timer_sync 224#define ahd_timer_stop del_timer_sync
225typedef void ahd_linux_callback_t (u_long);
226static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
227 ahd_callback_t *func, void *arg);
228
229static __inline void
230ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
231{
232 struct ahd_softc *ahd;
233
234 ahd = (struct ahd_softc *)arg;
235 del_timer(timer);
236 timer->data = (u_long)arg;
237 timer->expires = jiffies + (usec * HZ)/1000000;
238 timer->function = (ahd_linux_callback_t*)func;
239 add_timer(timer);
240}
241 225
242/***************************** SMP support ************************************/ 226/***************************** SMP support ************************************/
243#include <linux/spinlock.h> 227#include <linux/spinlock.h>
@@ -376,7 +360,7 @@ struct ahd_platform_data {
376#define AHD_LINUX_NOIRQ ((uint32_t)~0) 360#define AHD_LINUX_NOIRQ ((uint32_t)~0)
377 uint32_t irq; /* IRQ for this adapter */ 361 uint32_t irq; /* IRQ for this adapter */
378 uint32_t bios_address; 362 uint32_t bios_address;
379 uint32_t mem_busaddr; /* Mem Base Addr */ 363 resource_size_t mem_busaddr; /* Mem Base Addr */
380}; 364};
381 365
382/************************** OS Utility Wrappers *******************************/ 366/************************** OS Utility Wrappers *******************************/
@@ -386,111 +370,18 @@ struct ahd_platform_data {
386#define malloc(size, type, flags) kmalloc(size, flags) 370#define malloc(size, type, flags) kmalloc(size, flags)
387#define free(ptr, type) kfree(ptr) 371#define free(ptr, type) kfree(ptr)
388 372
389static __inline void ahd_delay(long); 373void ahd_delay(long);
390static __inline void
391ahd_delay(long usec)
392{
393 /*
394 * udelay on Linux can have problems for
395 * multi-millisecond waits. Wait at most
396 * 1024us per call.
397 */
398 while (usec > 0) {
399 udelay(usec % 1024);
400 usec -= 1024;
401 }
402}
403
404 374
405/***************************** Low Level I/O **********************************/ 375/***************************** Low Level I/O **********************************/
406static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port); 376uint8_t ahd_inb(struct ahd_softc * ahd, long port);
407static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port); 377void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
408static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); 378void ahd_outw_atomic(struct ahd_softc * ahd,
409static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
410 long port, uint16_t val); 379 long port, uint16_t val);
411static __inline void ahd_outsb(struct ahd_softc * ahd, long port, 380void ahd_outsb(struct ahd_softc * ahd, long port,
412 uint8_t *, int count); 381 uint8_t *, int count);
413static __inline void ahd_insb(struct ahd_softc * ahd, long port, 382void ahd_insb(struct ahd_softc * ahd, long port,
414 uint8_t *, int count); 383 uint8_t *, int count);
415 384
416static __inline uint8_t
417ahd_inb(struct ahd_softc * ahd, long port)
418{
419 uint8_t x;
420
421 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
422 x = readb(ahd->bshs[0].maddr + port);
423 } else {
424 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
425 }
426 mb();
427 return (x);
428}
429
430static __inline uint16_t
431ahd_inw_atomic(struct ahd_softc * ahd, long port)
432{
433 uint8_t x;
434
435 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
436 x = readw(ahd->bshs[0].maddr + port);
437 } else {
438 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
439 }
440 mb();
441 return (x);
442}
443
444static __inline void
445ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
446{
447 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
448 writeb(val, ahd->bshs[0].maddr + port);
449 } else {
450 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
451 }
452 mb();
453}
454
455static __inline void
456ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
457{
458 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
459 writew(val, ahd->bshs[0].maddr + port);
460 } else {
461 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
462 }
463 mb();
464}
465
466static __inline void
467ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
468{
469 int i;
470
471 /*
472 * There is probably a more efficient way to do this on Linux
473 * but we don't use this for anything speed critical and this
474 * should work.
475 */
476 for (i = 0; i < count; i++)
477 ahd_outb(ahd, port, *array++);
478}
479
480static __inline void
481ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
482{
483 int i;
484
485 /*
486 * There is probably a more efficient way to do this on Linux
487 * but we don't use this for anything speed critical and this
488 * should work.
489 */
490 for (i = 0; i < count; i++)
491 *array++ = ahd_inb(ahd, port);
492}
493
494/**************************** Initialization **********************************/ 385/**************************** Initialization **********************************/
495int ahd_linux_register_host(struct ahd_softc *, 386int ahd_linux_register_host(struct ahd_softc *,
496 struct scsi_host_template *); 387 struct scsi_host_template *);
@@ -593,62 +484,12 @@ void ahd_linux_pci_exit(void);
593int ahd_pci_map_registers(struct ahd_softc *ahd); 484int ahd_pci_map_registers(struct ahd_softc *ahd);
594int ahd_pci_map_int(struct ahd_softc *ahd); 485int ahd_pci_map_int(struct ahd_softc *ahd);
595 486
596static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, 487uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
597 int reg, int width); 488 int reg, int width);
598 489void ahd_pci_write_config(ahd_dev_softc_t pci,
599static __inline uint32_t
600ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
601{
602 switch (width) {
603 case 1:
604 {
605 uint8_t retval;
606
607 pci_read_config_byte(pci, reg, &retval);
608 return (retval);
609 }
610 case 2:
611 {
612 uint16_t retval;
613 pci_read_config_word(pci, reg, &retval);
614 return (retval);
615 }
616 case 4:
617 {
618 uint32_t retval;
619 pci_read_config_dword(pci, reg, &retval);
620 return (retval);
621 }
622 default:
623 panic("ahd_pci_read_config: Read size too big");
624 /* NOTREACHED */
625 return (0);
626 }
627}
628
629static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
630 int reg, uint32_t value, 490 int reg, uint32_t value,
631 int width); 491 int width);
632 492
633static __inline void
634ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
635{
636 switch (width) {
637 case 1:
638 pci_write_config_byte(pci, reg, value);
639 break;
640 case 2:
641 pci_write_config_word(pci, reg, value);
642 break;
643 case 4:
644 pci_write_config_dword(pci, reg, value);
645 break;
646 default:
647 panic("ahd_pci_write_config: Write size too big");
648 /* NOTREACHED */
649 }
650}
651
652static __inline int ahd_get_pci_function(ahd_dev_softc_t); 493static __inline int ahd_get_pci_function(ahd_dev_softc_t);
653static __inline int 494static __inline int
654ahd_get_pci_function(ahd_dev_softc_t pci) 495ahd_get_pci_function(ahd_dev_softc_t pci)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index dfaaae5e73ae..6593056867f6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -49,7 +49,7 @@
49 ID2C(x), \ 49 ID2C(x), \
50 ID2C(IDIROC(x)) 50 ID2C(IDIROC(x))
51 51
52static struct pci_device_id ahd_linux_pci_id_table[] = { 52static const struct pci_device_id ahd_linux_pci_id_table[] = {
53 /* aic7901 based controllers */ 53 /* aic7901 based controllers */
54 ID(ID_AHA_29320A), 54 ID(ID_AHA_29320A),
55 ID(ID_AHA_29320ALP), 55 ID(ID_AHA_29320ALP),
@@ -159,7 +159,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
159 char buf[80]; 159 char buf[80];
160 struct ahd_softc *ahd; 160 struct ahd_softc *ahd;
161 ahd_dev_softc_t pci; 161 ahd_dev_softc_t pci;
162 struct ahd_pci_identity *entry; 162 const struct ahd_pci_identity *entry;
163 char *name; 163 char *name;
164 int error; 164 int error;
165 struct device *dev = &pdev->dev; 165 struct device *dev = &pdev->dev;
@@ -249,8 +249,8 @@ ahd_linux_pci_exit(void)
249} 249}
250 250
251static int 251static int
252ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base, 252ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
253 u_long *base2) 253 resource_size_t *base2)
254{ 254{
255 *base = pci_resource_start(ahd->dev_softc, 0); 255 *base = pci_resource_start(ahd->dev_softc, 0);
256 /* 256 /*
@@ -272,11 +272,11 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
272 272
273static int 273static int
274ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, 274ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
275 u_long *bus_addr, 275 resource_size_t *bus_addr,
276 uint8_t __iomem **maddr) 276 uint8_t __iomem **maddr)
277{ 277{
278 u_long start; 278 resource_size_t start;
279 u_long base_page; 279 resource_size_t base_page;
280 u_long base_offset; 280 u_long base_offset;
281 int error = 0; 281 int error = 0;
282 282
@@ -310,7 +310,7 @@ int
310ahd_pci_map_registers(struct ahd_softc *ahd) 310ahd_pci_map_registers(struct ahd_softc *ahd)
311{ 311{
312 uint32_t command; 312 uint32_t command;
313 u_long base; 313 resource_size_t base;
314 uint8_t __iomem *maddr; 314 uint8_t __iomem *maddr;
315 int error; 315 int error;
316 316
@@ -346,31 +346,32 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
346 } else 346 } else
347 command |= PCIM_CMD_MEMEN; 347 command |= PCIM_CMD_MEMEN;
348 } else if (bootverbose) { 348 } else if (bootverbose) {
349 printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx " 349 printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
350 "unavailable. Cannot memory map device.\n", 350 "unavailable. Cannot memory map device.\n",
351 ahd_get_pci_bus(ahd->dev_softc), 351 ahd_get_pci_bus(ahd->dev_softc),
352 ahd_get_pci_slot(ahd->dev_softc), 352 ahd_get_pci_slot(ahd->dev_softc),
353 ahd_get_pci_function(ahd->dev_softc), 353 ahd_get_pci_function(ahd->dev_softc),
354 base); 354 (unsigned long long)base);
355 } 355 }
356 356
357 if (maddr == NULL) { 357 if (maddr == NULL) {
358 u_long base2; 358 resource_size_t base2;
359 359
360 error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2); 360 error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
361 if (error == 0) { 361 if (error == 0) {
362 ahd->tags[0] = BUS_SPACE_PIO; 362 ahd->tags[0] = BUS_SPACE_PIO;
363 ahd->tags[1] = BUS_SPACE_PIO; 363 ahd->tags[1] = BUS_SPACE_PIO;
364 ahd->bshs[0].ioport = base; 364 ahd->bshs[0].ioport = (u_long)base;
365 ahd->bshs[1].ioport = base2; 365 ahd->bshs[1].ioport = (u_long)base2;
366 command |= PCIM_CMD_PORTEN; 366 command |= PCIM_CMD_PORTEN;
367 } else { 367 } else {
368 printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx" 368 printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
369 "unavailable. Cannot map device.\n", 369 "0x%llx unavailable. Cannot map device.\n",
370 ahd_get_pci_bus(ahd->dev_softc), 370 ahd_get_pci_bus(ahd->dev_softc),
371 ahd_get_pci_slot(ahd->dev_softc), 371 ahd_get_pci_slot(ahd->dev_softc),
372 ahd_get_pci_function(ahd->dev_softc), 372 ahd_get_pci_function(ahd->dev_softc),
373 base, base2); 373 (unsigned long long)base,
374 (unsigned long long)base2);
374 } 375 }
375 } 376 }
376 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4); 377 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c9f79fdf9131..c25b6adffbf9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -97,7 +97,7 @@ static ahd_device_setup_t ahd_aic7901A_setup;
97static ahd_device_setup_t ahd_aic7902_setup; 97static ahd_device_setup_t ahd_aic7902_setup;
98static ahd_device_setup_t ahd_aic790X_setup; 98static ahd_device_setup_t ahd_aic790X_setup;
99 99
100static struct ahd_pci_identity ahd_pci_ident_table [] = 100static const struct ahd_pci_identity ahd_pci_ident_table[] =
101{ 101{
102 /* aic7901 based controllers */ 102 /* aic7901 based controllers */
103 { 103 {
@@ -253,7 +253,7 @@ static void ahd_configure_termination(struct ahd_softc *ahd,
253static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); 253static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
254static void ahd_pci_intr(struct ahd_softc *ahd); 254static void ahd_pci_intr(struct ahd_softc *ahd);
255 255
256struct ahd_pci_identity * 256const struct ahd_pci_identity *
257ahd_find_pci_device(ahd_dev_softc_t pci) 257ahd_find_pci_device(ahd_dev_softc_t pci)
258{ 258{
259 uint64_t full_id; 259 uint64_t full_id;
@@ -261,7 +261,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
261 uint16_t vendor; 261 uint16_t vendor;
262 uint16_t subdevice; 262 uint16_t subdevice;
263 uint16_t subvendor; 263 uint16_t subvendor;
264 struct ahd_pci_identity *entry; 264 const struct ahd_pci_identity *entry;
265 u_int i; 265 u_int i;
266 266
267 vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 267 vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -292,7 +292,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
292} 292}
293 293
294int 294int
295ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) 295ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
296{ 296{
297 struct scb_data *shared_scb_data; 297 struct scb_data *shared_scb_data;
298 u_int command; 298 u_int command;
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 6b28bebcbca0..014bed716e7c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -57,7 +57,7 @@ static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
57 * Table of syncrates that don't follow the "divisible by 4" 57 * Table of syncrates that don't follow the "divisible by 4"
58 * rule. This table will be expanded in future SCSI specs. 58 * rule. This table will be expanded in future SCSI specs.
59 */ 59 */
60static struct { 60static const struct {
61 u_int period_factor; 61 u_int period_factor;
62 u_int period; /* in 100ths of ns */ 62 u_int period; /* in 100ths of ns */
63} scsi_syncrates[] = { 63} scsi_syncrates[] = {
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index 2068e00d2c75..c21ceab8e913 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -48,13 +48,6 @@ ahd_reg_print_t ahd_error_print;
48#endif 48#endif
49 49
50#if AIC_DEBUG_REGISTERS 50#if AIC_DEBUG_REGISTERS
51ahd_reg_print_t ahd_clrerr_print;
52#else
53#define ahd_clrerr_print(regvalue, cur_col, wrap) \
54 ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
55#endif
56
57#if AIC_DEBUG_REGISTERS
58ahd_reg_print_t ahd_hcntrl_print; 51ahd_reg_print_t ahd_hcntrl_print;
59#else 52#else
60#define ahd_hcntrl_print(regvalue, cur_col, wrap) \ 53#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
@@ -167,13 +160,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
167#endif 160#endif
168 161
169#if AIC_DEBUG_REGISTERS 162#if AIC_DEBUG_REGISTERS
170ahd_reg_print_t ahd_arbctl_print;
171#else
172#define ahd_arbctl_print(regvalue, cur_col, wrap) \
173 ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
174#endif
175
176#if AIC_DEBUG_REGISTERS
177ahd_reg_print_t ahd_sg_cache_pre_print; 163ahd_reg_print_t ahd_sg_cache_pre_print;
178#else 164#else
179#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \ 165#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
@@ -188,20 +174,6 @@ ahd_reg_print_t ahd_lqin_print;
188#endif 174#endif
189 175
190#if AIC_DEBUG_REGISTERS 176#if AIC_DEBUG_REGISTERS
191ahd_reg_print_t ahd_typeptr_print;
192#else
193#define ahd_typeptr_print(regvalue, cur_col, wrap) \
194 ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
195#endif
196
197#if AIC_DEBUG_REGISTERS
198ahd_reg_print_t ahd_tagptr_print;
199#else
200#define ahd_tagptr_print(regvalue, cur_col, wrap) \
201 ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
202#endif
203
204#if AIC_DEBUG_REGISTERS
205ahd_reg_print_t ahd_lunptr_print; 177ahd_reg_print_t ahd_lunptr_print;
206#else 178#else
207#define ahd_lunptr_print(regvalue, cur_col, wrap) \ 179#define ahd_lunptr_print(regvalue, cur_col, wrap) \
@@ -209,20 +181,6 @@ ahd_reg_print_t ahd_lunptr_print;
209#endif 181#endif
210 182
211#if AIC_DEBUG_REGISTERS 183#if AIC_DEBUG_REGISTERS
212ahd_reg_print_t ahd_datalenptr_print;
213#else
214#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
215 ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
216#endif
217
218#if AIC_DEBUG_REGISTERS
219ahd_reg_print_t ahd_statlenptr_print;
220#else
221#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
222 ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
223#endif
224
225#if AIC_DEBUG_REGISTERS
226ahd_reg_print_t ahd_cmdlenptr_print; 184ahd_reg_print_t ahd_cmdlenptr_print;
227#else 185#else
228#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \ 186#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
@@ -258,13 +216,6 @@ ahd_reg_print_t ahd_qnextptr_print;
258#endif 216#endif
259 217
260#if AIC_DEBUG_REGISTERS 218#if AIC_DEBUG_REGISTERS
261ahd_reg_print_t ahd_idptr_print;
262#else
263#define ahd_idptr_print(regvalue, cur_col, wrap) \
264 ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
265#endif
266
267#if AIC_DEBUG_REGISTERS
268ahd_reg_print_t ahd_abrtbyteptr_print; 219ahd_reg_print_t ahd_abrtbyteptr_print;
269#else 220#else
270#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \ 221#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
@@ -279,27 +230,6 @@ ahd_reg_print_t ahd_abrtbitptr_print;
279#endif 230#endif
280 231
281#if AIC_DEBUG_REGISTERS 232#if AIC_DEBUG_REGISTERS
282ahd_reg_print_t ahd_maxcmdbytes_print;
283#else
284#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
285 ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
286#endif
287
288#if AIC_DEBUG_REGISTERS
289ahd_reg_print_t ahd_maxcmd2rcv_print;
290#else
291#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
292 ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
293#endif
294
295#if AIC_DEBUG_REGISTERS
296ahd_reg_print_t ahd_shortthresh_print;
297#else
298#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
299 ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahd_reg_print_t ahd_lunlen_print; 233ahd_reg_print_t ahd_lunlen_print;
304#else 234#else
305#define ahd_lunlen_print(regvalue, cur_col, wrap) \ 235#define ahd_lunlen_print(regvalue, cur_col, wrap) \
@@ -328,41 +258,6 @@ ahd_reg_print_t ahd_maxcmdcnt_print;
328#endif 258#endif
329 259
330#if AIC_DEBUG_REGISTERS 260#if AIC_DEBUG_REGISTERS
331ahd_reg_print_t ahd_lqrsvd01_print;
332#else
333#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
334 ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
335#endif
336
337#if AIC_DEBUG_REGISTERS
338ahd_reg_print_t ahd_lqrsvd16_print;
339#else
340#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
341 ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
342#endif
343
344#if AIC_DEBUG_REGISTERS
345ahd_reg_print_t ahd_lqrsvd17_print;
346#else
347#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
348 ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
349#endif
350
351#if AIC_DEBUG_REGISTERS
352ahd_reg_print_t ahd_cmdrsvd0_print;
353#else
354#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
355 ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
356#endif
357
358#if AIC_DEBUG_REGISTERS
359ahd_reg_print_t ahd_lqctl0_print;
360#else
361#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
362 ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
363#endif
364
365#if AIC_DEBUG_REGISTERS
366ahd_reg_print_t ahd_lqctl1_print; 261ahd_reg_print_t ahd_lqctl1_print;
367#else 262#else
368#define ahd_lqctl1_print(regvalue, cur_col, wrap) \ 263#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
@@ -370,13 +265,6 @@ ahd_reg_print_t ahd_lqctl1_print;
370#endif 265#endif
371 266
372#if AIC_DEBUG_REGISTERS 267#if AIC_DEBUG_REGISTERS
373ahd_reg_print_t ahd_scsbist0_print;
374#else
375#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
376 ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
377#endif
378
379#if AIC_DEBUG_REGISTERS
380ahd_reg_print_t ahd_lqctl2_print; 268ahd_reg_print_t ahd_lqctl2_print;
381#else 269#else
382#define ahd_lqctl2_print(regvalue, cur_col, wrap) \ 270#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
@@ -384,13 +272,6 @@ ahd_reg_print_t ahd_lqctl2_print;
384#endif 272#endif
385 273
386#if AIC_DEBUG_REGISTERS 274#if AIC_DEBUG_REGISTERS
387ahd_reg_print_t ahd_scsbist1_print;
388#else
389#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
390 ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
391#endif
392
393#if AIC_DEBUG_REGISTERS
394ahd_reg_print_t ahd_scsiseq0_print; 275ahd_reg_print_t ahd_scsiseq0_print;
395#else 276#else
396#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \ 277#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
@@ -412,20 +293,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
412#endif 293#endif
413 294
414#if AIC_DEBUG_REGISTERS 295#if AIC_DEBUG_REGISTERS
415ahd_reg_print_t ahd_dlcount_print;
416#else
417#define ahd_dlcount_print(regvalue, cur_col, wrap) \
418 ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
419#endif
420
421#if AIC_DEBUG_REGISTERS
422ahd_reg_print_t ahd_businitid_print;
423#else
424#define ahd_businitid_print(regvalue, cur_col, wrap) \
425 ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
426#endif
427
428#if AIC_DEBUG_REGISTERS
429ahd_reg_print_t ahd_sxfrctl1_print; 296ahd_reg_print_t ahd_sxfrctl1_print;
430#else 297#else
431#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \ 298#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
@@ -433,20 +300,6 @@ ahd_reg_print_t ahd_sxfrctl1_print;
433#endif 300#endif
434 301
435#if AIC_DEBUG_REGISTERS 302#if AIC_DEBUG_REGISTERS
436ahd_reg_print_t ahd_bustargid_print;
437#else
438#define ahd_bustargid_print(regvalue, cur_col, wrap) \
439 ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
440#endif
441
442#if AIC_DEBUG_REGISTERS
443ahd_reg_print_t ahd_sxfrctl2_print;
444#else
445#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
446 ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
447#endif
448
449#if AIC_DEBUG_REGISTERS
450ahd_reg_print_t ahd_dffstat_print; 303ahd_reg_print_t ahd_dffstat_print;
451#else 304#else
452#define ahd_dffstat_print(regvalue, cur_col, wrap) \ 305#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -454,17 +307,17 @@ ahd_reg_print_t ahd_dffstat_print;
454#endif 307#endif
455 308
456#if AIC_DEBUG_REGISTERS 309#if AIC_DEBUG_REGISTERS
457ahd_reg_print_t ahd_scsisigo_print; 310ahd_reg_print_t ahd_multargid_print;
458#else 311#else
459#define ahd_scsisigo_print(regvalue, cur_col, wrap) \ 312#define ahd_multargid_print(regvalue, cur_col, wrap) \
460 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap) 313 ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
461#endif 314#endif
462 315
463#if AIC_DEBUG_REGISTERS 316#if AIC_DEBUG_REGISTERS
464ahd_reg_print_t ahd_multargid_print; 317ahd_reg_print_t ahd_scsisigo_print;
465#else 318#else
466#define ahd_multargid_print(regvalue, cur_col, wrap) \ 319#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
467 ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap) 320 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
468#endif 321#endif
469 322
470#if AIC_DEBUG_REGISTERS 323#if AIC_DEBUG_REGISTERS
@@ -482,13 +335,6 @@ ahd_reg_print_t ahd_scsiphase_print;
482#endif 335#endif
483 336
484#if AIC_DEBUG_REGISTERS 337#if AIC_DEBUG_REGISTERS
485ahd_reg_print_t ahd_scsidat0_img_print;
486#else
487#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
488 ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
489#endif
490
491#if AIC_DEBUG_REGISTERS
492ahd_reg_print_t ahd_scsidat_print; 338ahd_reg_print_t ahd_scsidat_print;
493#else 339#else
494#define ahd_scsidat_print(regvalue, cur_col, wrap) \ 340#define ahd_scsidat_print(regvalue, cur_col, wrap) \
@@ -531,13 +377,6 @@ ahd_reg_print_t ahd_sblkctl_print;
531#endif 377#endif
532 378
533#if AIC_DEBUG_REGISTERS 379#if AIC_DEBUG_REGISTERS
534ahd_reg_print_t ahd_clrsint0_print;
535#else
536#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
537 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
538#endif
539
540#if AIC_DEBUG_REGISTERS
541ahd_reg_print_t ahd_sstat0_print; 380ahd_reg_print_t ahd_sstat0_print;
542#else 381#else
543#define ahd_sstat0_print(regvalue, cur_col, wrap) \ 382#define ahd_sstat0_print(regvalue, cur_col, wrap) \
@@ -552,10 +391,10 @@ ahd_reg_print_t ahd_simode0_print;
552#endif 391#endif
553 392
554#if AIC_DEBUG_REGISTERS 393#if AIC_DEBUG_REGISTERS
555ahd_reg_print_t ahd_clrsint1_print; 394ahd_reg_print_t ahd_clrsint0_print;
556#else 395#else
557#define ahd_clrsint1_print(regvalue, cur_col, wrap) \ 396#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
558 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap) 397 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
559#endif 398#endif
560 399
561#if AIC_DEBUG_REGISTERS 400#if AIC_DEBUG_REGISTERS
@@ -566,17 +405,17 @@ ahd_reg_print_t ahd_sstat1_print;
566#endif 405#endif
567 406
568#if AIC_DEBUG_REGISTERS 407#if AIC_DEBUG_REGISTERS
569ahd_reg_print_t ahd_sstat2_print; 408ahd_reg_print_t ahd_clrsint1_print;
570#else 409#else
571#define ahd_sstat2_print(regvalue, cur_col, wrap) \ 410#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
572 ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap) 411 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
573#endif 412#endif
574 413
575#if AIC_DEBUG_REGISTERS 414#if AIC_DEBUG_REGISTERS
576ahd_reg_print_t ahd_simode2_print; 415ahd_reg_print_t ahd_sstat2_print;
577#else 416#else
578#define ahd_simode2_print(regvalue, cur_col, wrap) \ 417#define ahd_sstat2_print(regvalue, cur_col, wrap) \
579 ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap) 418 ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
580#endif 419#endif
581 420
582#if AIC_DEBUG_REGISTERS 421#if AIC_DEBUG_REGISTERS
@@ -622,17 +461,17 @@ ahd_reg_print_t ahd_lqistat0_print;
622#endif 461#endif
623 462
624#if AIC_DEBUG_REGISTERS 463#if AIC_DEBUG_REGISTERS
625ahd_reg_print_t ahd_clrlqiint0_print; 464ahd_reg_print_t ahd_lqimode0_print;
626#else 465#else
627#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \ 466#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
628 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap) 467 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
629#endif 468#endif
630 469
631#if AIC_DEBUG_REGISTERS 470#if AIC_DEBUG_REGISTERS
632ahd_reg_print_t ahd_lqimode0_print; 471ahd_reg_print_t ahd_clrlqiint0_print;
633#else 472#else
634#define ahd_lqimode0_print(regvalue, cur_col, wrap) \ 473#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
635 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap) 474 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
636#endif 475#endif
637 476
638#if AIC_DEBUG_REGISTERS 477#if AIC_DEBUG_REGISTERS
@@ -790,13 +629,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
790#endif 629#endif
791 630
792#if AIC_DEBUG_REGISTERS 631#if AIC_DEBUG_REGISTERS
793ahd_reg_print_t ahd_currscb_print;
794#else
795#define ahd_currscb_print(regvalue, cur_col, wrap) \
796 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
797#endif
798
799#if AIC_DEBUG_REGISTERS
800ahd_reg_print_t ahd_seqimode_print; 632ahd_reg_print_t ahd_seqimode_print;
801#else 633#else
802#define ahd_seqimode_print(regvalue, cur_col, wrap) \ 634#define ahd_seqimode_print(regvalue, cur_col, wrap) \
@@ -804,24 +636,17 @@ ahd_reg_print_t ahd_seqimode_print;
804#endif 636#endif
805 637
806#if AIC_DEBUG_REGISTERS 638#if AIC_DEBUG_REGISTERS
807ahd_reg_print_t ahd_mdffstat_print; 639ahd_reg_print_t ahd_currscb_print;
808#else
809#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
810 ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
811#endif
812
813#if AIC_DEBUG_REGISTERS
814ahd_reg_print_t ahd_crccontrol_print;
815#else 640#else
816#define ahd_crccontrol_print(regvalue, cur_col, wrap) \ 641#define ahd_currscb_print(regvalue, cur_col, wrap) \
817 ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap) 642 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
818#endif 643#endif
819 644
820#if AIC_DEBUG_REGISTERS 645#if AIC_DEBUG_REGISTERS
821ahd_reg_print_t ahd_dfftag_print; 646ahd_reg_print_t ahd_mdffstat_print;
822#else 647#else
823#define ahd_dfftag_print(regvalue, cur_col, wrap) \ 648#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
824 ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap) 649 ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
825#endif 650#endif
826 651
827#if AIC_DEBUG_REGISTERS 652#if AIC_DEBUG_REGISTERS
@@ -832,20 +657,6 @@ ahd_reg_print_t ahd_lastscb_print;
832#endif 657#endif
833 658
834#if AIC_DEBUG_REGISTERS 659#if AIC_DEBUG_REGISTERS
835ahd_reg_print_t ahd_scsitest_print;
836#else
837#define ahd_scsitest_print(regvalue, cur_col, wrap) \
838 ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
839#endif
840
841#if AIC_DEBUG_REGISTERS
842ahd_reg_print_t ahd_iopdnctl_print;
843#else
844#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
845 ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
846#endif
847
848#if AIC_DEBUG_REGISTERS
849ahd_reg_print_t ahd_shaddr_print; 660ahd_reg_print_t ahd_shaddr_print;
850#else 661#else
851#define ahd_shaddr_print(regvalue, cur_col, wrap) \ 662#define ahd_shaddr_print(regvalue, cur_col, wrap) \
@@ -860,13 +671,6 @@ ahd_reg_print_t ahd_negoaddr_print;
860#endif 671#endif
861 672
862#if AIC_DEBUG_REGISTERS 673#if AIC_DEBUG_REGISTERS
863ahd_reg_print_t ahd_dgrpcrci_print;
864#else
865#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
866 ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
867#endif
868
869#if AIC_DEBUG_REGISTERS
870ahd_reg_print_t ahd_negperiod_print; 674ahd_reg_print_t ahd_negperiod_print;
871#else 675#else
872#define ahd_negperiod_print(regvalue, cur_col, wrap) \ 676#define ahd_negperiod_print(regvalue, cur_col, wrap) \
@@ -874,13 +678,6 @@ ahd_reg_print_t ahd_negperiod_print;
874#endif 678#endif
875 679
876#if AIC_DEBUG_REGISTERS 680#if AIC_DEBUG_REGISTERS
877ahd_reg_print_t ahd_packcrci_print;
878#else
879#define ahd_packcrci_print(regvalue, cur_col, wrap) \
880 ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
881#endif
882
883#if AIC_DEBUG_REGISTERS
884ahd_reg_print_t ahd_negoffset_print; 681ahd_reg_print_t ahd_negoffset_print;
885#else 682#else
886#define ahd_negoffset_print(regvalue, cur_col, wrap) \ 683#define ahd_negoffset_print(regvalue, cur_col, wrap) \
@@ -930,13 +727,6 @@ ahd_reg_print_t ahd_iownid_print;
930#endif 727#endif
931 728
932#if AIC_DEBUG_REGISTERS 729#if AIC_DEBUG_REGISTERS
933ahd_reg_print_t ahd_pll960ctl0_print;
934#else
935#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
936 ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahd_reg_print_t ahd_shcnt_print; 730ahd_reg_print_t ahd_shcnt_print;
941#else 731#else
942#define ahd_shcnt_print(regvalue, cur_col, wrap) \ 732#define ahd_shcnt_print(regvalue, cur_col, wrap) \
@@ -951,27 +741,6 @@ ahd_reg_print_t ahd_townid_print;
951#endif 741#endif
952 742
953#if AIC_DEBUG_REGISTERS 743#if AIC_DEBUG_REGISTERS
954ahd_reg_print_t ahd_pll960ctl1_print;
955#else
956#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
957 ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
958#endif
959
960#if AIC_DEBUG_REGISTERS
961ahd_reg_print_t ahd_pll960cnt0_print;
962#else
963#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
964 ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
965#endif
966
967#if AIC_DEBUG_REGISTERS
968ahd_reg_print_t ahd_xsig_print;
969#else
970#define ahd_xsig_print(regvalue, cur_col, wrap) \
971 ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
972#endif
973
974#if AIC_DEBUG_REGISTERS
975ahd_reg_print_t ahd_seloid_print; 744ahd_reg_print_t ahd_seloid_print;
976#else 745#else
977#define ahd_seloid_print(regvalue, cur_col, wrap) \ 746#define ahd_seloid_print(regvalue, cur_col, wrap) \
@@ -979,41 +748,6 @@ ahd_reg_print_t ahd_seloid_print;
979#endif 748#endif
980 749
981#if AIC_DEBUG_REGISTERS 750#if AIC_DEBUG_REGISTERS
982ahd_reg_print_t ahd_pll400ctl0_print;
983#else
984#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
985 ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
986#endif
987
988#if AIC_DEBUG_REGISTERS
989ahd_reg_print_t ahd_fairness_print;
990#else
991#define ahd_fairness_print(regvalue, cur_col, wrap) \
992 ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
993#endif
994
995#if AIC_DEBUG_REGISTERS
996ahd_reg_print_t ahd_pll400ctl1_print;
997#else
998#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
999 ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
1000#endif
1001
1002#if AIC_DEBUG_REGISTERS
1003ahd_reg_print_t ahd_unfairness_print;
1004#else
1005#define ahd_unfairness_print(regvalue, cur_col, wrap) \
1006 ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahd_reg_print_t ahd_pll400cnt0_print;
1011#else
1012#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
1013 ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
1014#endif
1015
1016#if AIC_DEBUG_REGISTERS
1017ahd_reg_print_t ahd_haddr_print; 751ahd_reg_print_t ahd_haddr_print;
1018#else 752#else
1019#define ahd_haddr_print(regvalue, cur_col, wrap) \ 753#define ahd_haddr_print(regvalue, cur_col, wrap) \
@@ -1021,27 +755,6 @@ ahd_reg_print_t ahd_haddr_print;
1021#endif 755#endif
1022 756
1023#if AIC_DEBUG_REGISTERS 757#if AIC_DEBUG_REGISTERS
1024ahd_reg_print_t ahd_plldelay_print;
1025#else
1026#define ahd_plldelay_print(regvalue, cur_col, wrap) \
1027 ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
1028#endif
1029
1030#if AIC_DEBUG_REGISTERS
1031ahd_reg_print_t ahd_hodmaadr_print;
1032#else
1033#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
1034 ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahd_reg_print_t ahd_hodmacnt_print;
1039#else
1040#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
1041 ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahd_reg_print_t ahd_hcnt_print; 758ahd_reg_print_t ahd_hcnt_print;
1046#else 759#else
1047#define ahd_hcnt_print(regvalue, cur_col, wrap) \ 760#define ahd_hcnt_print(regvalue, cur_col, wrap) \
@@ -1049,10 +762,10 @@ ahd_reg_print_t ahd_hcnt_print;
1049#endif 762#endif
1050 763
1051#if AIC_DEBUG_REGISTERS 764#if AIC_DEBUG_REGISTERS
1052ahd_reg_print_t ahd_hodmaen_print; 765ahd_reg_print_t ahd_sghaddr_print;
1053#else 766#else
1054#define ahd_hodmaen_print(regvalue, cur_col, wrap) \ 767#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
1055 ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap) 768 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
1056#endif 769#endif
1057 770
1058#if AIC_DEBUG_REGISTERS 771#if AIC_DEBUG_REGISTERS
@@ -1063,10 +776,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
1063#endif 776#endif
1064 777
1065#if AIC_DEBUG_REGISTERS 778#if AIC_DEBUG_REGISTERS
1066ahd_reg_print_t ahd_sghaddr_print; 779ahd_reg_print_t ahd_sghcnt_print;
1067#else 780#else
1068#define ahd_sghaddr_print(regvalue, cur_col, wrap) \ 781#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
1069 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap) 782 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
1070#endif 783#endif
1071 784
1072#if AIC_DEBUG_REGISTERS 785#if AIC_DEBUG_REGISTERS
@@ -1077,13 +790,6 @@ ahd_reg_print_t ahd_scbhcnt_print;
1077#endif 790#endif
1078 791
1079#if AIC_DEBUG_REGISTERS 792#if AIC_DEBUG_REGISTERS
1080ahd_reg_print_t ahd_sghcnt_print;
1081#else
1082#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
1083 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
1084#endif
1085
1086#if AIC_DEBUG_REGISTERS
1087ahd_reg_print_t ahd_dff_thrsh_print; 793ahd_reg_print_t ahd_dff_thrsh_print;
1088#else 794#else
1089#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \ 795#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
@@ -1091,132 +797,6 @@ ahd_reg_print_t ahd_dff_thrsh_print;
1091#endif 797#endif
1092 798
1093#if AIC_DEBUG_REGISTERS 799#if AIC_DEBUG_REGISTERS
1094ahd_reg_print_t ahd_romaddr_print;
1095#else
1096#define ahd_romaddr_print(regvalue, cur_col, wrap) \
1097 ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
1098#endif
1099
1100#if AIC_DEBUG_REGISTERS
1101ahd_reg_print_t ahd_romcntrl_print;
1102#else
1103#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
1104 ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
1105#endif
1106
1107#if AIC_DEBUG_REGISTERS
1108ahd_reg_print_t ahd_romdata_print;
1109#else
1110#define ahd_romdata_print(regvalue, cur_col, wrap) \
1111 ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
1112#endif
1113
1114#if AIC_DEBUG_REGISTERS
1115ahd_reg_print_t ahd_cmcrxmsg0_print;
1116#else
1117#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
1118 ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
1119#endif
1120
1121#if AIC_DEBUG_REGISTERS
1122ahd_reg_print_t ahd_roenable_print;
1123#else
1124#define ahd_roenable_print(regvalue, cur_col, wrap) \
1125 ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
1126#endif
1127
1128#if AIC_DEBUG_REGISTERS
1129ahd_reg_print_t ahd_ovlyrxmsg0_print;
1130#else
1131#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
1132 ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
1133#endif
1134
1135#if AIC_DEBUG_REGISTERS
1136ahd_reg_print_t ahd_dchrxmsg0_print;
1137#else
1138#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
1139 ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
1140#endif
1141
1142#if AIC_DEBUG_REGISTERS
1143ahd_reg_print_t ahd_ovlyrxmsg1_print;
1144#else
1145#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
1146 ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
1147#endif
1148
1149#if AIC_DEBUG_REGISTERS
1150ahd_reg_print_t ahd_nsenable_print;
1151#else
1152#define ahd_nsenable_print(regvalue, cur_col, wrap) \
1153 ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
1154#endif
1155
1156#if AIC_DEBUG_REGISTERS
1157ahd_reg_print_t ahd_cmcrxmsg1_print;
1158#else
1159#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
1160 ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
1161#endif
1162
1163#if AIC_DEBUG_REGISTERS
1164ahd_reg_print_t ahd_dchrxmsg1_print;
1165#else
1166#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
1167 ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
1168#endif
1169
1170#if AIC_DEBUG_REGISTERS
1171ahd_reg_print_t ahd_dchrxmsg2_print;
1172#else
1173#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
1174 ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
1175#endif
1176
1177#if AIC_DEBUG_REGISTERS
1178ahd_reg_print_t ahd_cmcrxmsg2_print;
1179#else
1180#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
1181 ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
1182#endif
1183
1184#if AIC_DEBUG_REGISTERS
1185ahd_reg_print_t ahd_ost_print;
1186#else
1187#define ahd_ost_print(regvalue, cur_col, wrap) \
1188 ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
1189#endif
1190
1191#if AIC_DEBUG_REGISTERS
1192ahd_reg_print_t ahd_ovlyrxmsg2_print;
1193#else
1194#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
1195 ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
1196#endif
1197
1198#if AIC_DEBUG_REGISTERS
1199ahd_reg_print_t ahd_dchrxmsg3_print;
1200#else
1201#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
1202 ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
1203#endif
1204
1205#if AIC_DEBUG_REGISTERS
1206ahd_reg_print_t ahd_ovlyrxmsg3_print;
1207#else
1208#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
1209 ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
1210#endif
1211
1212#if AIC_DEBUG_REGISTERS
1213ahd_reg_print_t ahd_cmcrxmsg3_print;
1214#else
1215#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
1216 ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
1217#endif
1218
1219#if AIC_DEBUG_REGISTERS
1220ahd_reg_print_t ahd_pcixctl_print; 800ahd_reg_print_t ahd_pcixctl_print;
1221#else 801#else
1222#define ahd_pcixctl_print(regvalue, cur_col, wrap) \ 802#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
@@ -1224,34 +804,6 @@ ahd_reg_print_t ahd_pcixctl_print;
1224#endif 804#endif
1225 805
1226#if AIC_DEBUG_REGISTERS 806#if AIC_DEBUG_REGISTERS
1227ahd_reg_print_t ahd_ovlyseqbcnt_print;
1228#else
1229#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
1230 ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
1231#endif
1232
1233#if AIC_DEBUG_REGISTERS
1234ahd_reg_print_t ahd_dchseqbcnt_print;
1235#else
1236#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
1237 ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
1238#endif
1239
1240#if AIC_DEBUG_REGISTERS
1241ahd_reg_print_t ahd_cmcseqbcnt_print;
1242#else
1243#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
1244 ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
1245#endif
1246
1247#if AIC_DEBUG_REGISTERS
1248ahd_reg_print_t ahd_cmcspltstat0_print;
1249#else
1250#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
1251 ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
1252#endif
1253
1254#if AIC_DEBUG_REGISTERS
1255ahd_reg_print_t ahd_dchspltstat0_print; 807ahd_reg_print_t ahd_dchspltstat0_print;
1256#else 808#else
1257#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \ 809#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
@@ -1259,27 +811,6 @@ ahd_reg_print_t ahd_dchspltstat0_print;
1259#endif 811#endif
1260 812
1261#if AIC_DEBUG_REGISTERS 813#if AIC_DEBUG_REGISTERS
1262ahd_reg_print_t ahd_ovlyspltstat0_print;
1263#else
1264#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
1265 ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
1266#endif
1267
1268#if AIC_DEBUG_REGISTERS
1269ahd_reg_print_t ahd_cmcspltstat1_print;
1270#else
1271#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
1272 ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
1273#endif
1274
1275#if AIC_DEBUG_REGISTERS
1276ahd_reg_print_t ahd_ovlyspltstat1_print;
1277#else
1278#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
1279 ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
1280#endif
1281
1282#if AIC_DEBUG_REGISTERS
1283ahd_reg_print_t ahd_dchspltstat1_print; 814ahd_reg_print_t ahd_dchspltstat1_print;
1284#else 815#else
1285#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \ 816#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
@@ -1287,90 +818,6 @@ ahd_reg_print_t ahd_dchspltstat1_print;
1287#endif 818#endif
1288 819
1289#if AIC_DEBUG_REGISTERS 820#if AIC_DEBUG_REGISTERS
1290ahd_reg_print_t ahd_sgrxmsg0_print;
1291#else
1292#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
1293 ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
1294#endif
1295
1296#if AIC_DEBUG_REGISTERS
1297ahd_reg_print_t ahd_slvspltoutadr0_print;
1298#else
1299#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
1300 ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
1301#endif
1302
1303#if AIC_DEBUG_REGISTERS
1304ahd_reg_print_t ahd_sgrxmsg1_print;
1305#else
1306#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
1307 ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
1308#endif
1309
1310#if AIC_DEBUG_REGISTERS
1311ahd_reg_print_t ahd_slvspltoutadr1_print;
1312#else
1313#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
1314 ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
1315#endif
1316
1317#if AIC_DEBUG_REGISTERS
1318ahd_reg_print_t ahd_sgrxmsg2_print;
1319#else
1320#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
1321 ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
1322#endif
1323
1324#if AIC_DEBUG_REGISTERS
1325ahd_reg_print_t ahd_slvspltoutadr2_print;
1326#else
1327#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
1328 ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
1329#endif
1330
1331#if AIC_DEBUG_REGISTERS
1332ahd_reg_print_t ahd_sgrxmsg3_print;
1333#else
1334#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
1335 ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
1336#endif
1337
1338#if AIC_DEBUG_REGISTERS
1339ahd_reg_print_t ahd_slvspltoutadr3_print;
1340#else
1341#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
1342 ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
1343#endif
1344
1345#if AIC_DEBUG_REGISTERS
1346ahd_reg_print_t ahd_sgseqbcnt_print;
1347#else
1348#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
1349 ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
1350#endif
1351
1352#if AIC_DEBUG_REGISTERS
1353ahd_reg_print_t ahd_slvspltoutattr0_print;
1354#else
1355#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
1356 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
1357#endif
1358
1359#if AIC_DEBUG_REGISTERS
1360ahd_reg_print_t ahd_slvspltoutattr1_print;
1361#else
1362#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
1363 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
1364#endif
1365
1366#if AIC_DEBUG_REGISTERS
1367ahd_reg_print_t ahd_slvspltoutattr2_print;
1368#else
1369#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
1370 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
1371#endif
1372
1373#if AIC_DEBUG_REGISTERS
1374ahd_reg_print_t ahd_sgspltstat0_print; 821ahd_reg_print_t ahd_sgspltstat0_print;
1375#else 822#else
1376#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \ 823#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
@@ -1385,13 +832,6 @@ ahd_reg_print_t ahd_sgspltstat1_print;
1385#endif 832#endif
1386 833
1387#if AIC_DEBUG_REGISTERS 834#if AIC_DEBUG_REGISTERS
1388ahd_reg_print_t ahd_sfunct_print;
1389#else
1390#define ahd_sfunct_print(regvalue, cur_col, wrap) \
1391 ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
1392#endif
1393
1394#if AIC_DEBUG_REGISTERS
1395ahd_reg_print_t ahd_df0pcistat_print; 835ahd_reg_print_t ahd_df0pcistat_print;
1396#else 836#else
1397#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \ 837#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
@@ -1406,41 +846,6 @@ ahd_reg_print_t ahd_reg0_print;
1406#endif 846#endif
1407 847
1408#if AIC_DEBUG_REGISTERS 848#if AIC_DEBUG_REGISTERS
1409ahd_reg_print_t ahd_df1pcistat_print;
1410#else
1411#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
1412 ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
1413#endif
1414
1415#if AIC_DEBUG_REGISTERS
1416ahd_reg_print_t ahd_sgpcistat_print;
1417#else
1418#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
1419 ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
1420#endif
1421
1422#if AIC_DEBUG_REGISTERS
1423ahd_reg_print_t ahd_reg1_print;
1424#else
1425#define ahd_reg1_print(regvalue, cur_col, wrap) \
1426 ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
1427#endif
1428
1429#if AIC_DEBUG_REGISTERS
1430ahd_reg_print_t ahd_cmcpcistat_print;
1431#else
1432#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
1433 ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
1434#endif
1435
1436#if AIC_DEBUG_REGISTERS
1437ahd_reg_print_t ahd_ovlypcistat_print;
1438#else
1439#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
1440 ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
1441#endif
1442
1443#if AIC_DEBUG_REGISTERS
1444ahd_reg_print_t ahd_reg_isr_print; 849ahd_reg_print_t ahd_reg_isr_print;
1445#else 850#else
1446#define ahd_reg_isr_print(regvalue, cur_col, wrap) \ 851#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
@@ -1455,13 +860,6 @@ ahd_reg_print_t ahd_sg_state_print;
1455#endif 860#endif
1456 861
1457#if AIC_DEBUG_REGISTERS 862#if AIC_DEBUG_REGISTERS
1458ahd_reg_print_t ahd_msipcistat_print;
1459#else
1460#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
1461 ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
1462#endif
1463
1464#if AIC_DEBUG_REGISTERS
1465ahd_reg_print_t ahd_targpcistat_print; 863ahd_reg_print_t ahd_targpcistat_print;
1466#else 864#else
1467#define ahd_targpcistat_print(regvalue, cur_col, wrap) \ 865#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
@@ -1469,13 +867,6 @@ ahd_reg_print_t ahd_targpcistat_print;
1469#endif 867#endif
1470 868
1471#if AIC_DEBUG_REGISTERS 869#if AIC_DEBUG_REGISTERS
1472ahd_reg_print_t ahd_data_count_odd_print;
1473#else
1474#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
1475 ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
1476#endif
1477
1478#if AIC_DEBUG_REGISTERS
1479ahd_reg_print_t ahd_scbptr_print; 870ahd_reg_print_t ahd_scbptr_print;
1480#else 871#else
1481#define ahd_scbptr_print(regvalue, cur_col, wrap) \ 872#define ahd_scbptr_print(regvalue, cur_col, wrap) \
@@ -1483,13 +874,6 @@ ahd_reg_print_t ahd_scbptr_print;
1483#endif 874#endif
1484 875
1485#if AIC_DEBUG_REGISTERS 876#if AIC_DEBUG_REGISTERS
1486ahd_reg_print_t ahd_ccscbacnt_print;
1487#else
1488#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
1489 ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
1490#endif
1491
1492#if AIC_DEBUG_REGISTERS
1493ahd_reg_print_t ahd_scbautoptr_print; 877ahd_reg_print_t ahd_scbautoptr_print;
1494#else 878#else
1495#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \ 879#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -1504,13 +888,6 @@ ahd_reg_print_t ahd_ccsgaddr_print;
1504#endif 888#endif
1505 889
1506#if AIC_DEBUG_REGISTERS 890#if AIC_DEBUG_REGISTERS
1507ahd_reg_print_t ahd_ccscbadr_bk_print;
1508#else
1509#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
1510 ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
1511#endif
1512
1513#if AIC_DEBUG_REGISTERS
1514ahd_reg_print_t ahd_ccscbaddr_print; 891ahd_reg_print_t ahd_ccscbaddr_print;
1515#else 892#else
1516#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \ 893#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -1518,13 +895,6 @@ ahd_reg_print_t ahd_ccscbaddr_print;
1518#endif 895#endif
1519 896
1520#if AIC_DEBUG_REGISTERS 897#if AIC_DEBUG_REGISTERS
1521ahd_reg_print_t ahd_cmc_rambist_print;
1522#else
1523#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
1524 ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
1525#endif
1526
1527#if AIC_DEBUG_REGISTERS
1528ahd_reg_print_t ahd_ccscbctl_print; 898ahd_reg_print_t ahd_ccscbctl_print;
1529#else 899#else
1530#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \ 900#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
@@ -1546,13 +916,6 @@ ahd_reg_print_t ahd_ccsgram_print;
1546#endif 916#endif
1547 917
1548#if AIC_DEBUG_REGISTERS 918#if AIC_DEBUG_REGISTERS
1549ahd_reg_print_t ahd_flexadr_print;
1550#else
1551#define ahd_flexadr_print(regvalue, cur_col, wrap) \
1552 ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
1553#endif
1554
1555#if AIC_DEBUG_REGISTERS
1556ahd_reg_print_t ahd_ccscbram_print; 919ahd_reg_print_t ahd_ccscbram_print;
1557#else 920#else
1558#define ahd_ccscbram_print(regvalue, cur_col, wrap) \ 921#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -1560,27 +923,6 @@ ahd_reg_print_t ahd_ccscbram_print;
1560#endif 923#endif
1561 924
1562#if AIC_DEBUG_REGISTERS 925#if AIC_DEBUG_REGISTERS
1563ahd_reg_print_t ahd_flexcnt_print;
1564#else
1565#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
1566 ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
1567#endif
1568
1569#if AIC_DEBUG_REGISTERS
1570ahd_reg_print_t ahd_flexdmastat_print;
1571#else
1572#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
1573 ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
1574#endif
1575
1576#if AIC_DEBUG_REGISTERS
1577ahd_reg_print_t ahd_flexdata_print;
1578#else
1579#define ahd_flexdata_print(regvalue, cur_col, wrap) \
1580 ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
1581#endif
1582
1583#if AIC_DEBUG_REGISTERS
1584ahd_reg_print_t ahd_brddat_print; 926ahd_reg_print_t ahd_brddat_print;
1585#else 927#else
1586#define ahd_brddat_print(regvalue, cur_col, wrap) \ 928#define ahd_brddat_print(regvalue, cur_col, wrap) \
@@ -1623,27 +965,6 @@ ahd_reg_print_t ahd_seestat_print;
1623#endif 965#endif
1624 966
1625#if AIC_DEBUG_REGISTERS 967#if AIC_DEBUG_REGISTERS
1626ahd_reg_print_t ahd_scbcnt_print;
1627#else
1628#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
1629 ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
1630#endif
1631
1632#if AIC_DEBUG_REGISTERS
1633ahd_reg_print_t ahd_dfwaddr_print;
1634#else
1635#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
1636 ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
1637#endif
1638
1639#if AIC_DEBUG_REGISTERS
1640ahd_reg_print_t ahd_dspfltrctl_print;
1641#else
1642#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
1643 ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
1644#endif
1645
1646#if AIC_DEBUG_REGISTERS
1647ahd_reg_print_t ahd_dspdatactl_print; 968ahd_reg_print_t ahd_dspdatactl_print;
1648#else 969#else
1649#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \ 970#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
@@ -1651,27 +972,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
1651#endif 972#endif
1652 973
1653#if AIC_DEBUG_REGISTERS 974#if AIC_DEBUG_REGISTERS
1654ahd_reg_print_t ahd_dfraddr_print;
1655#else
1656#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
1657 ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
1658#endif
1659
1660#if AIC_DEBUG_REGISTERS
1661ahd_reg_print_t ahd_dspreqctl_print;
1662#else
1663#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
1664 ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
1665#endif
1666
1667#if AIC_DEBUG_REGISTERS
1668ahd_reg_print_t ahd_dspackctl_print;
1669#else
1670#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
1671 ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
1672#endif
1673
1674#if AIC_DEBUG_REGISTERS
1675ahd_reg_print_t ahd_dfdat_print; 975ahd_reg_print_t ahd_dfdat_print;
1676#else 976#else
1677#define ahd_dfdat_print(regvalue, cur_col, wrap) \ 977#define ahd_dfdat_print(regvalue, cur_col, wrap) \
@@ -1693,76 +993,6 @@ ahd_reg_print_t ahd_wrtbiasctl_print;
1693#endif 993#endif
1694 994
1695#if AIC_DEBUG_REGISTERS 995#if AIC_DEBUG_REGISTERS
1696ahd_reg_print_t ahd_rcvrbiosctl_print;
1697#else
1698#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
1699 ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
1700#endif
1701
1702#if AIC_DEBUG_REGISTERS
1703ahd_reg_print_t ahd_wrtbiascalc_print;
1704#else
1705#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
1706 ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
1707#endif
1708
1709#if AIC_DEBUG_REGISTERS
1710ahd_reg_print_t ahd_rcvrbiascalc_print;
1711#else
1712#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
1713 ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
1714#endif
1715
1716#if AIC_DEBUG_REGISTERS
1717ahd_reg_print_t ahd_dfptrs_print;
1718#else
1719#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
1720 ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
1721#endif
1722
1723#if AIC_DEBUG_REGISTERS
1724ahd_reg_print_t ahd_skewcalc_print;
1725#else
1726#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
1727 ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
1728#endif
1729
1730#if AIC_DEBUG_REGISTERS
1731ahd_reg_print_t ahd_dfbkptr_print;
1732#else
1733#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
1734 ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
1735#endif
1736
1737#if AIC_DEBUG_REGISTERS
1738ahd_reg_print_t ahd_dfdbctl_print;
1739#else
1740#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
1741 ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
1742#endif
1743
1744#if AIC_DEBUG_REGISTERS
1745ahd_reg_print_t ahd_dfscnt_print;
1746#else
1747#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
1748 ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
1749#endif
1750
1751#if AIC_DEBUG_REGISTERS
1752ahd_reg_print_t ahd_dfbcnt_print;
1753#else
1754#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
1755 ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
1756#endif
1757
1758#if AIC_DEBUG_REGISTERS
1759ahd_reg_print_t ahd_ovlyaddr_print;
1760#else
1761#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
1762 ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
1763#endif
1764
1765#if AIC_DEBUG_REGISTERS
1766ahd_reg_print_t ahd_seqctl0_print; 996ahd_reg_print_t ahd_seqctl0_print;
1767#else 997#else
1768#define ahd_seqctl0_print(regvalue, cur_col, wrap) \ 998#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
@@ -1770,13 +1000,6 @@ ahd_reg_print_t ahd_seqctl0_print;
1770#endif 1000#endif
1771 1001
1772#if AIC_DEBUG_REGISTERS 1002#if AIC_DEBUG_REGISTERS
1773ahd_reg_print_t ahd_seqctl1_print;
1774#else
1775#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
1776 ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
1777#endif
1778
1779#if AIC_DEBUG_REGISTERS
1780ahd_reg_print_t ahd_flags_print; 1003ahd_reg_print_t ahd_flags_print;
1781#else 1004#else
1782#define ahd_flags_print(regvalue, cur_col, wrap) \ 1005#define ahd_flags_print(regvalue, cur_col, wrap) \
@@ -1826,20 +1049,6 @@ ahd_reg_print_t ahd_dindex_print;
1826#endif 1049#endif
1827 1050
1828#if AIC_DEBUG_REGISTERS 1051#if AIC_DEBUG_REGISTERS
1829ahd_reg_print_t ahd_brkaddr0_print;
1830#else
1831#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
1832 ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
1833#endif
1834
1835#if AIC_DEBUG_REGISTERS
1836ahd_reg_print_t ahd_brkaddr1_print;
1837#else
1838#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
1839 ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
1840#endif
1841
1842#if AIC_DEBUG_REGISTERS
1843ahd_reg_print_t ahd_allones_print; 1052ahd_reg_print_t ahd_allones_print;
1844#else 1053#else
1845#define ahd_allones_print(regvalue, cur_col, wrap) \ 1054#define ahd_allones_print(regvalue, cur_col, wrap) \
@@ -1875,13 +1084,6 @@ ahd_reg_print_t ahd_dindir_print;
1875#endif 1084#endif
1876 1085
1877#if AIC_DEBUG_REGISTERS 1086#if AIC_DEBUG_REGISTERS
1878ahd_reg_print_t ahd_function1_print;
1879#else
1880#define ahd_function1_print(regvalue, cur_col, wrap) \
1881 ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
1882#endif
1883
1884#if AIC_DEBUG_REGISTERS
1885ahd_reg_print_t ahd_stack_print; 1087ahd_reg_print_t ahd_stack_print;
1886#else 1088#else
1887#define ahd_stack_print(regvalue, cur_col, wrap) \ 1089#define ahd_stack_print(regvalue, cur_col, wrap) \
@@ -1903,13 +1105,6 @@ ahd_reg_print_t ahd_curaddr_print;
1903#endif 1105#endif
1904 1106
1905#if AIC_DEBUG_REGISTERS 1107#if AIC_DEBUG_REGISTERS
1906ahd_reg_print_t ahd_lastaddr_print;
1907#else
1908#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
1909 ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
1910#endif
1911
1912#if AIC_DEBUG_REGISTERS
1913ahd_reg_print_t ahd_intvec2_addr_print; 1108ahd_reg_print_t ahd_intvec2_addr_print;
1914#else 1109#else
1915#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \ 1110#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
@@ -1931,24 +1126,17 @@ ahd_reg_print_t ahd_accum_save_print;
1931#endif 1126#endif
1932 1127
1933#if AIC_DEBUG_REGISTERS 1128#if AIC_DEBUG_REGISTERS
1934ahd_reg_print_t ahd_waiting_scb_tails_print; 1129ahd_reg_print_t ahd_sram_base_print;
1935#else
1936#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1937 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1938#endif
1939
1940#if AIC_DEBUG_REGISTERS
1941ahd_reg_print_t ahd_ahd_pci_config_base_print;
1942#else 1130#else
1943#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \ 1131#define ahd_sram_base_print(regvalue, cur_col, wrap) \
1944 ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap) 1132 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
1945#endif 1133#endif
1946 1134
1947#if AIC_DEBUG_REGISTERS 1135#if AIC_DEBUG_REGISTERS
1948ahd_reg_print_t ahd_sram_base_print; 1136ahd_reg_print_t ahd_waiting_scb_tails_print;
1949#else 1137#else
1950#define ahd_sram_base_print(regvalue, cur_col, wrap) \ 1138#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1951 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) 1139 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1952#endif 1140#endif
1953 1141
1954#if AIC_DEBUG_REGISTERS 1142#if AIC_DEBUG_REGISTERS
@@ -2218,17 +1406,17 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
2218#endif 1406#endif
2219 1407
2220#if AIC_DEBUG_REGISTERS 1408#if AIC_DEBUG_REGISTERS
2221ahd_reg_print_t ahd_scb_base_print; 1409ahd_reg_print_t ahd_scb_residual_datacnt_print;
2222#else 1410#else
2223#define ahd_scb_base_print(regvalue, cur_col, wrap) \ 1411#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
2224 ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap) 1412 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
2225#endif 1413#endif
2226 1414
2227#if AIC_DEBUG_REGISTERS 1415#if AIC_DEBUG_REGISTERS
2228ahd_reg_print_t ahd_scb_residual_datacnt_print; 1416ahd_reg_print_t ahd_scb_base_print;
2229#else 1417#else
2230#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \ 1418#define ahd_scb_base_print(regvalue, cur_col, wrap) \
2231 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap) 1419 ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
2232#endif 1420#endif
2233 1421
2234#if AIC_DEBUG_REGISTERS 1422#if AIC_DEBUG_REGISTERS
@@ -2246,27 +1434,6 @@ ahd_reg_print_t ahd_scb_scsi_status_print;
2246#endif 1434#endif
2247 1435
2248#if AIC_DEBUG_REGISTERS 1436#if AIC_DEBUG_REGISTERS
2249ahd_reg_print_t ahd_scb_target_phases_print;
2250#else
2251#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
2252 ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
2253#endif
2254
2255#if AIC_DEBUG_REGISTERS
2256ahd_reg_print_t ahd_scb_target_data_dir_print;
2257#else
2258#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
2259 ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
2260#endif
2261
2262#if AIC_DEBUG_REGISTERS
2263ahd_reg_print_t ahd_scb_target_itag_print;
2264#else
2265#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
2266 ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
2267#endif
2268
2269#if AIC_DEBUG_REGISTERS
2270ahd_reg_print_t ahd_scb_sense_busaddr_print; 1437ahd_reg_print_t ahd_scb_sense_busaddr_print;
2271#else 1438#else
2272#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \ 1439#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
@@ -2365,13 +1532,6 @@ ahd_reg_print_t ahd_scb_next2_print;
2365#endif 1532#endif
2366 1533
2367#if AIC_DEBUG_REGISTERS 1534#if AIC_DEBUG_REGISTERS
2368ahd_reg_print_t ahd_scb_spare_print;
2369#else
2370#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
2371 ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
2372#endif
2373
2374#if AIC_DEBUG_REGISTERS
2375ahd_reg_print_t ahd_scb_disconnected_lists_print; 1535ahd_reg_print_t ahd_scb_disconnected_lists_print;
2376#else 1536#else
2377#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \ 1537#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
@@ -2557,10 +1717,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2557 1717
2558#define SG_CACHE_PRE 0x1b 1718#define SG_CACHE_PRE 0x1b
2559 1719
2560#define LQIN 0x20
2561
2562#define TYPEPTR 0x20 1720#define TYPEPTR 0x20
2563 1721
1722#define LQIN 0x20
1723
2564#define TAGPTR 0x21 1724#define TAGPTR 0x21
2565 1725
2566#define LUNPTR 0x22 1726#define LUNPTR 0x22
@@ -2620,14 +1780,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2620#define SINGLECMD 0x02 1780#define SINGLECMD 0x02
2621#define ABORTPENDING 0x01 1781#define ABORTPENDING 0x01
2622 1782
2623#define SCSBIST0 0x39
2624#define GSBISTERR 0x40
2625#define GSBISTDONE 0x20
2626#define GSBISTRUN 0x10
2627#define OSBISTERR 0x04
2628#define OSBISTDONE 0x02
2629#define OSBISTRUN 0x01
2630
2631#define LQCTL2 0x39 1783#define LQCTL2 0x39
2632#define LQIRETRY 0x80 1784#define LQIRETRY 0x80
2633#define LQICONTINUE 0x40 1785#define LQICONTINUE 0x40
@@ -2638,10 +1790,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2638#define LQOTOIDLE 0x02 1790#define LQOTOIDLE 0x02
2639#define LQOPAUSE 0x01 1791#define LQOPAUSE 0x01
2640 1792
2641#define SCSBIST1 0x3a 1793#define SCSBIST0 0x39
2642#define NTBISTERR 0x04 1794#define GSBISTERR 0x40
2643#define NTBISTDONE 0x02 1795#define GSBISTDONE 0x20
2644#define NTBISTRUN 0x01 1796#define GSBISTRUN 0x10
1797#define OSBISTERR 0x04
1798#define OSBISTDONE 0x02
1799#define OSBISTRUN 0x01
2645 1800
2646#define SCSISEQ0 0x3a 1801#define SCSISEQ0 0x3a
2647#define TEMODEO 0x80 1802#define TEMODEO 0x80
@@ -2650,8 +1805,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2650#define FORCEBUSFREE 0x10 1805#define FORCEBUSFREE 0x10
2651#define SCSIRSTO 0x01 1806#define SCSIRSTO 0x01
2652 1807
1808#define SCSBIST1 0x3a
1809#define NTBISTERR 0x04
1810#define NTBISTDONE 0x02
1811#define NTBISTRUN 0x01
1812
2653#define SCSISEQ1 0x3b 1813#define SCSISEQ1 0x3b
2654 1814
1815#define BUSINITID 0x3c
1816
2655#define SXFRCTL0 0x3c 1817#define SXFRCTL0 0x3c
2656#define DFON 0x80 1818#define DFON 0x80
2657#define DFPEXP 0x40 1819#define DFPEXP 0x40
@@ -2660,8 +1822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2660 1822
2661#define DLCOUNT 0x3c 1823#define DLCOUNT 0x3c
2662 1824
2663#define BUSINITID 0x3c
2664
2665#define SXFRCTL1 0x3d 1825#define SXFRCTL1 0x3d
2666#define BITBUCKET 0x80 1826#define BITBUCKET 0x80
2667#define ENSACHK 0x40 1827#define ENSACHK 0x40
@@ -2686,6 +1846,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2686#define CURRFIFO_1 0x01 1846#define CURRFIFO_1 0x01
2687#define CURRFIFO_0 0x00 1847#define CURRFIFO_0 0x00
2688 1848
1849#define MULTARGID 0x40
1850
2689#define SCSISIGO 0x40 1851#define SCSISIGO 0x40
2690#define CDO 0x80 1852#define CDO 0x80
2691#define IOO 0x40 1853#define IOO 0x40
@@ -2696,8 +1858,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2696#define REQO 0x02 1858#define REQO 0x02
2697#define ACKO 0x01 1859#define ACKO 0x01
2698 1860
2699#define MULTARGID 0x40
2700
2701#define SCSISIGI 0x41 1861#define SCSISIGI 0x41
2702#define ATNI 0x10 1862#define ATNI 0x10
2703#define SELI 0x08 1863#define SELI 0x08
@@ -2744,15 +1904,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2744#define ENAB20 0x04 1904#define ENAB20 0x04
2745#define SELWIDE 0x02 1905#define SELWIDE 0x02
2746 1906
2747#define CLRSINT0 0x4b
2748#define CLRSELDO 0x40
2749#define CLRSELDI 0x20
2750#define CLRSELINGO 0x10
2751#define CLRIOERR 0x08
2752#define CLROVERRUN 0x04
2753#define CLRSPIORDY 0x02
2754#define CLRARBDO 0x01
2755
2756#define SSTAT0 0x4b 1907#define SSTAT0 0x4b
2757#define TARGET 0x80 1908#define TARGET 0x80
2758#define SELDO 0x40 1909#define SELDO 0x40
@@ -2772,14 +1923,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2772#define ENSPIORDY 0x02 1923#define ENSPIORDY 0x02
2773#define ENARBDO 0x01 1924#define ENARBDO 0x01
2774 1925
2775#define CLRSINT1 0x4c 1926#define CLRSINT0 0x4b
2776#define CLRSELTIMEO 0x80 1927#define CLRSELDO 0x40
2777#define CLRATNO 0x40 1928#define CLRSELDI 0x20
2778#define CLRSCSIRSTI 0x20 1929#define CLRSELINGO 0x10
2779#define CLRBUSFREE 0x08 1930#define CLRIOERR 0x08
2780#define CLRSCSIPERR 0x04 1931#define CLROVERRUN 0x04
2781#define CLRSTRB2FAST 0x02 1932#define CLRSPIORDY 0x02
2782#define CLRREQINIT 0x01 1933#define CLRARBDO 0x01
2783 1934
2784#define SSTAT1 0x4c 1935#define SSTAT1 0x4c
2785#define SELTO 0x80 1936#define SELTO 0x80
@@ -2791,6 +1942,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2791#define STRB2FAST 0x02 1942#define STRB2FAST 0x02
2792#define REQINIT 0x01 1943#define REQINIT 0x01
2793 1944
1945#define CLRSINT1 0x4c
1946#define CLRSELTIMEO 0x80
1947#define CLRATNO 0x40
1948#define CLRSCSIRSTI 0x20
1949#define CLRBUSFREE 0x08
1950#define CLRSCSIPERR 0x04
1951#define CLRSTRB2FAST 0x02
1952#define CLRREQINIT 0x01
1953
2794#define SSTAT2 0x4d 1954#define SSTAT2 0x4d
2795#define BUSFREETIME 0xc0 1955#define BUSFREETIME 0xc0
2796#define NONPACKREQ 0x20 1956#define NONPACKREQ 0x20
@@ -2838,14 +1998,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2838#define LQIATNLQ 0x02 1998#define LQIATNLQ 0x02
2839#define LQIATNCMD 0x01 1999#define LQIATNCMD 0x01
2840 2000
2841#define CLRLQIINT0 0x50
2842#define CLRLQIATNQAS 0x20
2843#define CLRLQICRCT1 0x10
2844#define CLRLQICRCT2 0x08
2845#define CLRLQIBADLQT 0x04
2846#define CLRLQIATNLQ 0x02
2847#define CLRLQIATNCMD 0x01
2848
2849#define LQIMODE0 0x50 2001#define LQIMODE0 0x50
2850#define ENLQIATNQASK 0x20 2002#define ENLQIATNQASK 0x20
2851#define ENLQICRCT1 0x10 2003#define ENLQICRCT1 0x10
@@ -2854,6 +2006,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2854#define ENLQIATNLQ 0x02 2006#define ENLQIATNLQ 0x02
2855#define ENLQIATNCMD 0x01 2007#define ENLQIATNCMD 0x01
2856 2008
2009#define CLRLQIINT0 0x50
2010#define CLRLQIATNQAS 0x20
2011#define CLRLQICRCT1 0x10
2012#define CLRLQICRCT2 0x08
2013#define CLRLQIBADLQT 0x04
2014#define CLRLQIATNLQ 0x02
2015#define CLRLQIATNCMD 0x01
2016
2857#define LQIMODE1 0x51 2017#define LQIMODE1 0x51
2858#define ENLQIPHASE_LQ 0x80 2018#define ENLQIPHASE_LQ 0x80
2859#define ENLQIPHASE_NLQ 0x40 2019#define ENLQIPHASE_NLQ 0x40
@@ -2976,6 +2136,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2976 2136
2977#define LQOSCSCTL 0x5a 2137#define LQOSCSCTL 0x5a
2978#define LQOH2A_VERSION 0x80 2138#define LQOH2A_VERSION 0x80
2139#define LQOBUSETDLY 0x40
2140#define LQONOHOLDLACK 0x02
2979#define LQONOCHKOVER 0x01 2141#define LQONOCHKOVER 0x01
2980 2142
2981#define NEXTSCB 0x5a 2143#define NEXTSCB 0x5a
@@ -2998,8 +2160,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2998#define CFG4ICMD 0x02 2160#define CFG4ICMD 0x02
2999#define CFG4TCMD 0x01 2161#define CFG4TCMD 0x01
3000 2162
3001#define CURRSCB 0x5c
3002
3003#define SEQIMODE 0x5c 2163#define SEQIMODE 0x5c
3004#define ENCTXTDONE 0x40 2164#define ENCTXTDONE 0x40
3005#define ENSAVEPTRS 0x20 2165#define ENSAVEPTRS 0x20
@@ -3009,6 +2169,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3009#define ENCFG4ICMD 0x02 2169#define ENCFG4ICMD 0x02
3010#define ENCFG4TCMD 0x01 2170#define ENCFG4TCMD 0x01
3011 2171
2172#define CURRSCB 0x5c
2173
3012#define MDFFSTAT 0x5d 2174#define MDFFSTAT 0x5d
3013#define SHCNTNEGATIVE 0x40 2175#define SHCNTNEGATIVE 0x40
3014#define SHCNTMINUS1 0x20 2176#define SHCNTMINUS1 0x20
@@ -3023,29 +2185,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3023 2185
3024#define DFFTAG 0x5e 2186#define DFFTAG 0x5e
3025 2187
3026#define LASTSCB 0x5e
3027
3028#define SCSITEST 0x5e 2188#define SCSITEST 0x5e
3029#define CNTRTEST 0x08 2189#define CNTRTEST 0x08
3030#define SEL_TXPLL_DEBUG 0x04 2190#define SEL_TXPLL_DEBUG 0x04
3031 2191
2192#define LASTSCB 0x5e
2193
3032#define IOPDNCTL 0x5f 2194#define IOPDNCTL 0x5f
3033#define DISABLE_OE 0x80 2195#define DISABLE_OE 0x80
3034#define PDN_IDIST 0x04 2196#define PDN_IDIST 0x04
3035#define PDN_DIFFSENSE 0x01 2197#define PDN_DIFFSENSE 0x01
3036 2198
2199#define DGRPCRCI 0x60
2200
3037#define SHADDR 0x60 2201#define SHADDR 0x60
3038 2202
3039#define NEGOADDR 0x60 2203#define NEGOADDR 0x60
3040 2204
3041#define DGRPCRCI 0x60
3042
3043#define NEGPERIOD 0x61 2205#define NEGPERIOD 0x61
3044 2206
3045#define PACKCRCI 0x62
3046
3047#define NEGOFFSET 0x62 2207#define NEGOFFSET 0x62
3048 2208
2209#define PACKCRCI 0x62
2210
3049#define NEGPPROPTS 0x63 2211#define NEGPPROPTS 0x63
3050#define PPROPT_PACE 0x08 2212#define PPROPT_PACE 0x08
3051#define PPROPT_QAS 0x04 2213#define PPROPT_QAS 0x04
@@ -3066,6 +2228,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3066#define ANNEXDAT 0x66 2228#define ANNEXDAT 0x66
3067 2229
3068#define SCSCHKN 0x66 2230#define SCSCHKN 0x66
2231#define BIDICHKDIS 0x80
3069#define STSELSKIDDIS 0x40 2232#define STSELSKIDDIS 0x40
3070#define CURRFIFODEF 0x20 2233#define CURRFIFODEF 0x20
3071#define WIDERESEN 0x10 2234#define WIDERESEN 0x10
@@ -3090,6 +2253,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3090 2253
3091#define SELOID 0x6b 2254#define SELOID 0x6b
3092 2255
2256#define FAIRNESS 0x6c
2257
3093#define PLL400CTL0 0x6c 2258#define PLL400CTL0 0x6c
3094#define PLL_VCOSEL 0x80 2259#define PLL_VCOSEL 0x80
3095#define PLL_PWDN 0x40 2260#define PLL_PWDN 0x40
@@ -3099,8 +2264,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3099#define PLL_DLPF 0x02 2264#define PLL_DLPF 0x02
3100#define PLL_ENFBM 0x01 2265#define PLL_ENFBM 0x01
3101 2266
3102#define FAIRNESS 0x6c
3103
3104#define PLL400CTL1 0x6d 2267#define PLL400CTL1 0x6d
3105#define PLL_CNTEN 0x80 2268#define PLL_CNTEN 0x80
3106#define PLL_CNTCLR 0x40 2269#define PLL_CNTCLR 0x40
@@ -3112,25 +2275,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3112 2275
3113#define HADDR 0x70 2276#define HADDR 0x70
3114 2277
2278#define HODMAADR 0x70
2279
3115#define PLLDELAY 0x70 2280#define PLLDELAY 0x70
3116#define SPLIT_DROP_REQ 0x80 2281#define SPLIT_DROP_REQ 0x80
3117 2282
3118#define HODMAADR 0x70 2283#define HCNT 0x78
3119 2284
3120#define HODMACNT 0x78 2285#define HODMACNT 0x78
3121 2286
3122#define HCNT 0x78
3123
3124#define HODMAEN 0x7a 2287#define HODMAEN 0x7a
3125 2288
3126#define SCBHADDR 0x7c
3127
3128#define SGHADDR 0x7c 2289#define SGHADDR 0x7c
3129 2290
3130#define SCBHCNT 0x84 2291#define SCBHADDR 0x7c
3131 2292
3132#define SGHCNT 0x84 2293#define SGHCNT 0x84
3133 2294
2295#define SCBHCNT 0x84
2296
3134#define DFF_THRSH 0x88 2297#define DFF_THRSH 0x88
3135#define WR_DFTHRSH 0x70 2298#define WR_DFTHRSH 0x70
3136#define RD_DFTHRSH 0x07 2299#define RD_DFTHRSH 0x07
@@ -3163,6 +2326,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3163 2326
3164#define CMCRXMSG0 0x90 2327#define CMCRXMSG0 0x90
3165 2328
2329#define OVLYRXMSG0 0x90
2330
2331#define DCHRXMSG0 0x90
2332
3166#define ROENABLE 0x90 2333#define ROENABLE 0x90
3167#define MSIROEN 0x20 2334#define MSIROEN 0x20
3168#define OVLYROEN 0x10 2335#define OVLYROEN 0x10
@@ -3171,11 +2338,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3171#define DCH1ROEN 0x02 2338#define DCH1ROEN 0x02
3172#define DCH0ROEN 0x01 2339#define DCH0ROEN 0x01
3173 2340
3174#define OVLYRXMSG0 0x90 2341#define OVLYRXMSG1 0x91
3175 2342
3176#define DCHRXMSG0 0x90 2343#define CMCRXMSG1 0x91
3177 2344
3178#define OVLYRXMSG1 0x91 2345#define DCHRXMSG1 0x91
3179 2346
3180#define NSENABLE 0x91 2347#define NSENABLE 0x91
3181#define MSINSEN 0x20 2348#define MSINSEN 0x20
@@ -3185,10 +2352,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3185#define DCH1NSEN 0x02 2352#define DCH1NSEN 0x02
3186#define DCH0NSEN 0x01 2353#define DCH0NSEN 0x01
3187 2354
3188#define CMCRXMSG1 0x91
3189
3190#define DCHRXMSG1 0x91
3191
3192#define DCHRXMSG2 0x92 2355#define DCHRXMSG2 0x92
3193 2356
3194#define CMCRXMSG2 0x92 2357#define CMCRXMSG2 0x92
@@ -3212,24 +2375,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3212#define TSCSERREN 0x02 2375#define TSCSERREN 0x02
3213#define CMPABCDIS 0x01 2376#define CMPABCDIS 0x01
3214 2377
2378#define CMCSEQBCNT 0x94
2379
3215#define OVLYSEQBCNT 0x94 2380#define OVLYSEQBCNT 0x94
3216 2381
3217#define DCHSEQBCNT 0x94 2382#define DCHSEQBCNT 0x94
3218 2383
3219#define CMCSEQBCNT 0x94
3220
3221#define CMCSPLTSTAT0 0x96
3222
3223#define DCHSPLTSTAT0 0x96 2384#define DCHSPLTSTAT0 0x96
3224 2385
3225#define OVLYSPLTSTAT0 0x96 2386#define OVLYSPLTSTAT0 0x96
3226 2387
3227#define CMCSPLTSTAT1 0x97 2388#define CMCSPLTSTAT0 0x96
3228 2389
3229#define OVLYSPLTSTAT1 0x97 2390#define OVLYSPLTSTAT1 0x97
3230 2391
3231#define DCHSPLTSTAT1 0x97 2392#define DCHSPLTSTAT1 0x97
3232 2393
2394#define CMCSPLTSTAT1 0x97
2395
3233#define SGRXMSG0 0x98 2396#define SGRXMSG0 0x98
3234#define CDNUM 0xf8 2397#define CDNUM 0xf8
3235#define CFNUM 0x07 2398#define CFNUM 0x07
@@ -3257,18 +2420,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3257#define TAG_NUM 0x1f 2420#define TAG_NUM 0x1f
3258#define RLXORD 0x10 2421#define RLXORD 0x10
3259 2422
3260#define SGSEQBCNT 0x9c
3261
3262#define SLVSPLTOUTATTR0 0x9c 2423#define SLVSPLTOUTATTR0 0x9c
3263#define LOWER_BCNT 0xff 2424#define LOWER_BCNT 0xff
3264 2425
2426#define SGSEQBCNT 0x9c
2427
3265#define SLVSPLTOUTATTR1 0x9d 2428#define SLVSPLTOUTATTR1 0x9d
3266#define CMPLT_DNUM 0xf8 2429#define CMPLT_DNUM 0xf8
3267#define CMPLT_FNUM 0x07 2430#define CMPLT_FNUM 0x07
3268 2431
3269#define SLVSPLTOUTATTR2 0x9e
3270#define CMPLT_BNUM 0xff
3271
3272#define SGSPLTSTAT0 0x9e 2432#define SGSPLTSTAT0 0x9e
3273#define STAETERM 0x80 2433#define STAETERM 0x80
3274#define SCBCERR 0x40 2434#define SCBCERR 0x40
@@ -3279,6 +2439,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3279#define RXSCEMSG 0x02 2439#define RXSCEMSG 0x02
3280#define RXSPLTRSP 0x01 2440#define RXSPLTRSP 0x01
3281 2441
2442#define SLVSPLTOUTATTR2 0x9e
2443#define CMPLT_BNUM 0xff
2444
3282#define SGSPLTSTAT1 0x9f 2445#define SGSPLTSTAT1 0x9f
3283#define RXDATABUCKET 0x01 2446#define RXDATABUCKET 0x01
3284 2447
@@ -3334,10 +2497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3334 2497
3335#define CCSGADDR 0xac 2498#define CCSGADDR 0xac
3336 2499
3337#define CCSCBADR_BK 0xac
3338
3339#define CCSCBADDR 0xac 2500#define CCSCBADDR 0xac
3340 2501
2502#define CCSCBADR_BK 0xac
2503
3341#define CMC_RAMBIST 0xad 2504#define CMC_RAMBIST 0xad
3342#define SG_ELEMENT_SIZE 0x80 2505#define SG_ELEMENT_SIZE 0x80
3343#define SCBRAMBIST_FAIL 0x40 2506#define SCBRAMBIST_FAIL 0x40
@@ -3391,9 +2554,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3391#define SEEDAT 0xbc 2554#define SEEDAT 0xbc
3392 2555
3393#define SEECTL 0xbe 2556#define SEECTL 0xbe
2557#define SEEOP_EWDS 0x40
3394#define SEEOP_WALL 0x40 2558#define SEEOP_WALL 0x40
3395#define SEEOP_EWEN 0x40 2559#define SEEOP_EWEN 0x40
3396#define SEEOP_EWDS 0x40
3397#define SEEOPCODE 0x70 2560#define SEEOPCODE 0x70
3398#define SEERST 0x02 2561#define SEERST 0x02
3399#define SEESTART 0x01 2562#define SEESTART 0x01
@@ -3410,25 +2573,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3410 2573
3411#define SCBCNT 0xbf 2574#define SCBCNT 0xbf
3412 2575
3413#define DFWADDR 0xc0
3414
3415#define DSPFLTRCTL 0xc0 2576#define DSPFLTRCTL 0xc0
3416#define FLTRDISABLE 0x20 2577#define FLTRDISABLE 0x20
3417#define EDGESENSE 0x10 2578#define EDGESENSE 0x10
3418#define DSPFCNTSEL 0x0f 2579#define DSPFCNTSEL 0x0f
3419 2580
2581#define DFWADDR 0xc0
2582
3420#define DSPDATACTL 0xc1 2583#define DSPDATACTL 0xc1
3421#define BYPASSENAB 0x80 2584#define BYPASSENAB 0x80
3422#define DESQDIS 0x10 2585#define DESQDIS 0x10
3423#define RCVROFFSTDIS 0x04 2586#define RCVROFFSTDIS 0x04
3424#define XMITOFFSTDIS 0x02 2587#define XMITOFFSTDIS 0x02
3425 2588
3426#define DFRADDR 0xc2
3427
3428#define DSPREQCTL 0xc2 2589#define DSPREQCTL 0xc2
3429#define MANREQCTL 0xc0 2590#define MANREQCTL 0xc0
3430#define MANREQDLY 0x3f 2591#define MANREQDLY 0x3f
3431 2592
2593#define DFRADDR 0xc2
2594
3432#define DSPACKCTL 0xc3 2595#define DSPACKCTL 0xc3
3433#define MANACKCTL 0xc0 2596#define MANACKCTL 0xc0
3434#define MANACKDLY 0x3f 2597#define MANACKDLY 0x3f
@@ -3449,14 +2612,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3449 2612
3450#define WRTBIASCALC 0xc7 2613#define WRTBIASCALC 0xc7
3451 2614
3452#define RCVRBIASCALC 0xc8
3453
3454#define DFPTRS 0xc8 2615#define DFPTRS 0xc8
3455 2616
3456#define SKEWCALC 0xc9 2617#define RCVRBIASCALC 0xc8
3457 2618
3458#define DFBKPTR 0xc9 2619#define DFBKPTR 0xc9
3459 2620
2621#define SKEWCALC 0xc9
2622
3460#define DFDBCTL 0xcb 2623#define DFDBCTL 0xcb
3461#define DFF_CIO_WR_RDY 0x20 2624#define DFF_CIO_WR_RDY 0x20
3462#define DFF_CIO_RD_RDY 0x10 2625#define DFF_CIO_RD_RDY 0x10
@@ -3541,12 +2704,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3541 2704
3542#define ACCUM_SAVE 0xfa 2705#define ACCUM_SAVE 0xfa
3543 2706
3544#define WAITING_SCB_TAILS 0x100
3545
3546#define AHD_PCI_CONFIG_BASE 0x100 2707#define AHD_PCI_CONFIG_BASE 0x100
3547 2708
3548#define SRAM_BASE 0x100 2709#define SRAM_BASE 0x100
3549 2710
2711#define WAITING_SCB_TAILS 0x100
2712
3550#define WAITING_TID_HEAD 0x120 2713#define WAITING_TID_HEAD 0x120
3551 2714
3552#define WAITING_TID_TAIL 0x122 2715#define WAITING_TID_TAIL 0x122
@@ -3575,8 +2738,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3575#define PRELOADEN 0x80 2738#define PRELOADEN 0x80
3576#define WIDEODD 0x40 2739#define WIDEODD 0x40
3577#define SCSIEN 0x20 2740#define SCSIEN 0x20
3578#define SDMAEN 0x10
3579#define SDMAENACK 0x10 2741#define SDMAENACK 0x10
2742#define SDMAEN 0x10
3580#define HDMAEN 0x08 2743#define HDMAEN 0x08
3581#define HDMAENACK 0x08 2744#define HDMAENACK 0x08
3582#define DIRECTION 0x04 2745#define DIRECTION 0x04
@@ -3674,12 +2837,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3674 2837
3675#define MK_MESSAGE_SCSIID 0x162 2838#define MK_MESSAGE_SCSIID 0x162
3676 2839
3677#define SCB_BASE 0x180
3678
3679#define SCB_RESIDUAL_DATACNT 0x180 2840#define SCB_RESIDUAL_DATACNT 0x180
3680#define SCB_CDB_STORE 0x180 2841#define SCB_CDB_STORE 0x180
3681#define SCB_HOST_CDB_PTR 0x180 2842#define SCB_HOST_CDB_PTR 0x180
3682 2843
2844#define SCB_BASE 0x180
2845
3683#define SCB_RESIDUAL_SGPTR 0x184 2846#define SCB_RESIDUAL_SGPTR 0x184
3684#define SG_ADDR_MASK 0xf8 2847#define SG_ADDR_MASK 0xf8
3685#define SG_OVERRUN_RESID 0x02 2848#define SG_OVERRUN_RESID 0x02
@@ -3747,6 +2910,17 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3747#define SCB_DISCONNECTED_LISTS 0x1b8 2910#define SCB_DISCONNECTED_LISTS 0x1b8
3748 2911
3749 2912
2913#define CMD_GROUP_CODE_SHIFT 0x05
2914#define STIMESEL_MIN 0x18
2915#define STIMESEL_SHIFT 0x03
2916#define INVALID_ADDR 0x80
2917#define AHD_PRECOMP_MASK 0x07
2918#define TARGET_DATA_IN 0x01
2919#define CCSCBADDR_MAX 0x80
2920#define NUMDSPS 0x14
2921#define SEEOP_EWEN_ADDR 0xc0
2922#define AHD_ANNEXCOL_PER_DEV0 0x04
2923#define DST_MODE_SHIFT 0x04
3750#define AHD_TIMER_MAX_US 0x18ffe7 2924#define AHD_TIMER_MAX_US 0x18ffe7
3751#define AHD_TIMER_MAX_TICKS 0xffff 2925#define AHD_TIMER_MAX_TICKS 0xffff
3752#define AHD_SENSE_BUFSIZE 0x100 2926#define AHD_SENSE_BUFSIZE 0x100
@@ -3781,43 +2955,32 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3781#define LUNLEN_SINGLE_LEVEL_LUN 0x0f 2955#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
3782#define NVRAM_SCB_OFFSET 0x2c 2956#define NVRAM_SCB_OFFSET 0x2c
3783#define STATUS_PKT_SENSE 0xff 2957#define STATUS_PKT_SENSE 0xff
3784#define CMD_GROUP_CODE_SHIFT 0x05
3785#define MAX_OFFSET_PACED_BUG 0x7f 2958#define MAX_OFFSET_PACED_BUG 0x7f
3786#define STIMESEL_BUG_ADJ 0x08 2959#define STIMESEL_BUG_ADJ 0x08
3787#define STIMESEL_MIN 0x18
3788#define STIMESEL_SHIFT 0x03
3789#define CCSGRAM_MAXSEGS 0x10 2960#define CCSGRAM_MAXSEGS 0x10
3790#define INVALID_ADDR 0x80
3791#define SEEOP_ERAL_ADDR 0x80 2961#define SEEOP_ERAL_ADDR 0x80
3792#define AHD_SLEWRATE_DEF_REVB 0x08 2962#define AHD_SLEWRATE_DEF_REVB 0x08
3793#define AHD_PRECOMP_CUTBACK_17 0x04 2963#define AHD_PRECOMP_CUTBACK_17 0x04
3794#define AHD_PRECOMP_MASK 0x07
3795#define SRC_MODE_SHIFT 0x00 2964#define SRC_MODE_SHIFT 0x00
3796#define PKT_OVERRUN_BUFSIZE 0x200 2965#define PKT_OVERRUN_BUFSIZE 0x200
3797#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 2966#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
3798#define TARGET_DATA_IN 0x01
3799#define HOST_MSG 0xff 2967#define HOST_MSG 0xff
3800#define MAX_OFFSET 0xfe 2968#define MAX_OFFSET 0xfe
3801#define BUS_16_BIT 0x01 2969#define BUS_16_BIT 0x01
3802#define CCSCBADDR_MAX 0x80
3803#define NUMDSPS 0x14
3804#define SEEOP_EWEN_ADDR 0xc0
3805#define AHD_ANNEXCOL_PER_DEV0 0x04
3806#define DST_MODE_SHIFT 0x04
3807 2970
3808 2971
3809/* Downloaded Constant Definitions */ 2972/* Downloaded Constant Definitions */
2973#define SG_SIZEOF 0x04
2974#define SG_PREFETCH_ALIGN_MASK 0x02
2975#define SG_PREFETCH_CNT_LIMIT 0x01
3810#define CACHELINE_MASK 0x07 2976#define CACHELINE_MASK 0x07
3811#define SCB_TRANSFER_SIZE 0x06 2977#define SCB_TRANSFER_SIZE 0x06
3812#define PKT_OVERRUN_BUFOFFSET 0x05 2978#define PKT_OVERRUN_BUFOFFSET 0x05
3813#define SG_SIZEOF 0x04
3814#define SG_PREFETCH_ADDR_MASK 0x03 2979#define SG_PREFETCH_ADDR_MASK 0x03
3815#define SG_PREFETCH_ALIGN_MASK 0x02
3816#define SG_PREFETCH_CNT_LIMIT 0x01
3817#define SG_PREFETCH_CNT 0x00 2980#define SG_PREFETCH_CNT 0x00
3818#define DOWNLOAD_CONST_COUNT 0x08 2981#define DOWNLOAD_CONST_COUNT 0x08
3819 2982
3820 2983
3821/* Exported Labels */ 2984/* Exported Labels */
3822#define LABEL_seq_isr 0x28f
3823#define LABEL_timer_isr 0x28b 2985#define LABEL_timer_isr 0x28b
2986#define LABEL_seq_isr 0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index db38a61a8cb4..c4c8a96bf5a3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,7 +8,7 @@
8 8
9#include "aic79xx_osm.h" 9#include "aic79xx_osm.h"
10 10
11static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = { 11static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
12 { "SRC_MODE", 0x07, 0x07 }, 12 { "SRC_MODE", 0x07, 0x07 },
13 { "DST_MODE", 0x70, 0x70 } 13 { "DST_MODE", 0x70, 0x70 }
14}; 14};
@@ -20,7 +20,7 @@ ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
20 0x00, regvalue, cur_col, wrap)); 20 0x00, regvalue, cur_col, wrap));
21} 21}
22 22
23static ahd_reg_parse_entry_t INTSTAT_parse_table[] = { 23static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
24 { "SPLTINT", 0x01, 0x01 }, 24 { "SPLTINT", 0x01, 0x01 },
25 { "CMDCMPLT", 0x02, 0x02 }, 25 { "CMDCMPLT", 0x02, 0x02 },
26 { "SEQINT", 0x04, 0x04 }, 26 { "SEQINT", 0x04, 0x04 },
@@ -39,7 +39,7 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
39 0x01, regvalue, cur_col, wrap)); 39 0x01, regvalue, cur_col, wrap));
40} 40}
41 41
42static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = { 42static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
43 { "NO_SEQINT", 0x00, 0xff }, 43 { "NO_SEQINT", 0x00, 0xff },
44 { "BAD_PHASE", 0x01, 0xff }, 44 { "BAD_PHASE", 0x01, 0xff },
45 { "SEND_REJECT", 0x02, 0xff }, 45 { "SEND_REJECT", 0x02, 0xff },
@@ -76,7 +76,7 @@ ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
76 0x02, regvalue, cur_col, wrap)); 76 0x02, regvalue, cur_col, wrap));
77} 77}
78 78
79static ahd_reg_parse_entry_t CLRINT_parse_table[] = { 79static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
80 { "CLRSPLTINT", 0x01, 0x01 }, 80 { "CLRSPLTINT", 0x01, 0x01 },
81 { "CLRCMDINT", 0x02, 0x02 }, 81 { "CLRCMDINT", 0x02, 0x02 },
82 { "CLRSEQINT", 0x04, 0x04 }, 82 { "CLRSEQINT", 0x04, 0x04 },
@@ -94,7 +94,7 @@ ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
94 0x03, regvalue, cur_col, wrap)); 94 0x03, regvalue, cur_col, wrap));
95} 95}
96 96
97static ahd_reg_parse_entry_t ERROR_parse_table[] = { 97static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
98 { "DSCTMOUT", 0x02, 0x02 }, 98 { "DSCTMOUT", 0x02, 0x02 },
99 { "ILLOPCODE", 0x04, 0x04 }, 99 { "ILLOPCODE", 0x04, 0x04 },
100 { "SQPARERR", 0x08, 0x08 }, 100 { "SQPARERR", 0x08, 0x08 },
@@ -111,24 +111,7 @@ ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
111 0x04, regvalue, cur_col, wrap)); 111 0x04, regvalue, cur_col, wrap));
112} 112}
113 113
114static ahd_reg_parse_entry_t CLRERR_parse_table[] = { 114static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
115 { "CLRDSCTMOUT", 0x02, 0x02 },
116 { "CLRILLOPCODE", 0x04, 0x04 },
117 { "CLRSQPARERR", 0x08, 0x08 },
118 { "CLRDPARERR", 0x10, 0x10 },
119 { "CLRMPARERR", 0x20, 0x20 },
120 { "CLRCIOACCESFAIL", 0x40, 0x40 },
121 { "CLRCIOPARERR", 0x80, 0x80 }
122};
123
124int
125ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
126{
127 return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
128 0x04, regvalue, cur_col, wrap));
129}
130
131static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
132 { "CHIPRST", 0x01, 0x01 }, 115 { "CHIPRST", 0x01, 0x01 },
133 { "CHIPRSTACK", 0x01, 0x01 }, 116 { "CHIPRSTACK", 0x01, 0x01 },
134 { "INTEN", 0x02, 0x02 }, 117 { "INTEN", 0x02, 0x02 },
@@ -160,7 +143,7 @@ ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
160 0x08, regvalue, cur_col, wrap)); 143 0x08, regvalue, cur_col, wrap));
161} 144}
162 145
163static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 146static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
164 { "ENINT_COALESCE", 0x40, 0x40 }, 147 { "ENINT_COALESCE", 0x40, 0x40 },
165 { "HOST_TQINPOS", 0x80, 0x80 } 148 { "HOST_TQINPOS", 0x80, 0x80 }
166}; 149};
@@ -172,7 +155,7 @@ ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
172 0x0b, regvalue, cur_col, wrap)); 155 0x0b, regvalue, cur_col, wrap));
173} 156}
174 157
175static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = { 158static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
176 { "SEQ_SPLTINT", 0x01, 0x01 }, 159 { "SEQ_SPLTINT", 0x01, 0x01 },
177 { "SEQ_PCIINT", 0x02, 0x02 }, 160 { "SEQ_PCIINT", 0x02, 0x02 },
178 { "SEQ_SCSIINT", 0x04, 0x04 }, 161 { "SEQ_SCSIINT", 0x04, 0x04 },
@@ -187,7 +170,7 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
187 0x0c, regvalue, cur_col, wrap)); 170 0x0c, regvalue, cur_col, wrap));
188} 171}
189 172
190static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = { 173static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
191 { "CLRSEQ_SPLTINT", 0x01, 0x01 }, 174 { "CLRSEQ_SPLTINT", 0x01, 0x01 },
192 { "CLRSEQ_PCIINT", 0x02, 0x02 }, 175 { "CLRSEQ_PCIINT", 0x02, 0x02 },
193 { "CLRSEQ_SCSIINT", 0x04, 0x04 }, 176 { "CLRSEQ_SCSIINT", 0x04, 0x04 },
@@ -230,7 +213,7 @@ ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
230 0x14, regvalue, cur_col, wrap)); 213 0x14, regvalue, cur_col, wrap));
231} 214}
232 215
233static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = { 216static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
234 { "SCB_QSIZE_4", 0x00, 0x0f }, 217 { "SCB_QSIZE_4", 0x00, 0x0f },
235 { "SCB_QSIZE_8", 0x01, 0x0f }, 218 { "SCB_QSIZE_8", 0x01, 0x0f },
236 { "SCB_QSIZE_16", 0x02, 0x0f }, 219 { "SCB_QSIZE_16", 0x02, 0x0f },
@@ -258,7 +241,7 @@ ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
258 0x16, regvalue, cur_col, wrap)); 241 0x16, regvalue, cur_col, wrap));
259} 242}
260 243
261static ahd_reg_parse_entry_t INTCTL_parse_table[] = { 244static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
262 { "SPLTINTEN", 0x01, 0x01 }, 245 { "SPLTINTEN", 0x01, 0x01 },
263 { "SEQINTEN", 0x02, 0x02 }, 246 { "SEQINTEN", 0x02, 0x02 },
264 { "SCSIINTEN", 0x04, 0x04 }, 247 { "SCSIINTEN", 0x04, 0x04 },
@@ -276,7 +259,7 @@ ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
276 0x18, regvalue, cur_col, wrap)); 259 0x18, regvalue, cur_col, wrap));
277} 260}
278 261
279static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = { 262static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
280 { "DIRECTIONEN", 0x01, 0x01 }, 263 { "DIRECTIONEN", 0x01, 0x01 },
281 { "FIFOFLUSH", 0x02, 0x02 }, 264 { "FIFOFLUSH", 0x02, 0x02 },
282 { "FIFOFLUSHACK", 0x02, 0x02 }, 265 { "FIFOFLUSHACK", 0x02, 0x02 },
@@ -297,7 +280,7 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
297 0x19, regvalue, cur_col, wrap)); 280 0x19, regvalue, cur_col, wrap));
298} 281}
299 282
300static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = { 283static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
301 { "CIOPARCKEN", 0x01, 0x01 }, 284 { "CIOPARCKEN", 0x01, 0x01 },
302 { "DISABLE_TWATE", 0x02, 0x02 }, 285 { "DISABLE_TWATE", 0x02, 0x02 },
303 { "EXTREQLCK", 0x10, 0x10 }, 286 { "EXTREQLCK", 0x10, 0x10 },
@@ -313,7 +296,7 @@ ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
313 0x19, regvalue, cur_col, wrap)); 296 0x19, regvalue, cur_col, wrap));
314} 297}
315 298
316static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = { 299static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
317 { "FIFOEMP", 0x01, 0x01 }, 300 { "FIFOEMP", 0x01, 0x01 },
318 { "FIFOFULL", 0x02, 0x02 }, 301 { "FIFOFULL", 0x02, 0x02 },
319 { "DFTHRESH", 0x04, 0x04 }, 302 { "DFTHRESH", 0x04, 0x04 },
@@ -330,7 +313,7 @@ ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
330 0x1a, regvalue, cur_col, wrap)); 313 0x1a, regvalue, cur_col, wrap));
331} 314}
332 315
333static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = { 316static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
334 { "LAST_SEG_DONE", 0x01, 0x01 }, 317 { "LAST_SEG_DONE", 0x01, 0x01 },
335 { "LAST_SEG", 0x02, 0x02 }, 318 { "LAST_SEG", 0x02, 0x02 },
336 { "ODD_SEG", 0x04, 0x04 }, 319 { "ODD_SEG", 0x04, 0x04 },
@@ -344,20 +327,7 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
344 0x1b, regvalue, cur_col, wrap)); 327 0x1b, regvalue, cur_col, wrap));
345} 328}
346 329
347static ahd_reg_parse_entry_t ARBCTL_parse_table[] = { 330static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
348 { "USE_TIME", 0x07, 0x07 },
349 { "RETRY_SWEN", 0x08, 0x08 },
350 { "RESET_HARB", 0x80, 0x80 }
351};
352
353int
354ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
355{
356 return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
357 0x1b, regvalue, cur_col, wrap));
358}
359
360static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
361 { "LAST_SEG", 0x02, 0x02 }, 331 { "LAST_SEG", 0x02, 0x02 },
362 { "ODD_SEG", 0x04, 0x04 }, 332 { "ODD_SEG", 0x04, 0x04 },
363 { "SG_ADDR_MASK", 0xf8, 0xf8 } 333 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -378,20 +348,6 @@ ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
378} 348}
379 349
380int 350int
381ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
382{
383 return (ahd_print_register(NULL, 0, "TYPEPTR",
384 0x20, regvalue, cur_col, wrap));
385}
386
387int
388ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
389{
390 return (ahd_print_register(NULL, 0, "TAGPTR",
391 0x21, regvalue, cur_col, wrap));
392}
393
394int
395ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 351ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
396{ 352{
397 return (ahd_print_register(NULL, 0, "LUNPTR", 353 return (ahd_print_register(NULL, 0, "LUNPTR",
@@ -399,20 +355,6 @@ ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
399} 355}
400 356
401int 357int
402ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
403{
404 return (ahd_print_register(NULL, 0, "DATALENPTR",
405 0x23, regvalue, cur_col, wrap));
406}
407
408int
409ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
410{
411 return (ahd_print_register(NULL, 0, "STATLENPTR",
412 0x24, regvalue, cur_col, wrap));
413}
414
415int
416ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 358ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
417{ 359{
418 return (ahd_print_register(NULL, 0, "CMDLENPTR", 360 return (ahd_print_register(NULL, 0, "CMDLENPTR",
@@ -448,13 +390,6 @@ ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
448} 390}
449 391
450int 392int
451ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
452{
453 return (ahd_print_register(NULL, 0, "IDPTR",
454 0x2a, regvalue, cur_col, wrap));
455}
456
457int
458ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 393ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
459{ 394{
460 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR", 395 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
@@ -468,28 +403,7 @@ ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
468 0x2c, regvalue, cur_col, wrap)); 403 0x2c, regvalue, cur_col, wrap));
469} 404}
470 405
471int 406static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
472ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
473{
474 return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
475 0x2d, regvalue, cur_col, wrap));
476}
477
478int
479ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
480{
481 return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
482 0x2e, regvalue, cur_col, wrap));
483}
484
485int
486ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
487{
488 return (ahd_print_register(NULL, 0, "SHORTTHRESH",
489 0x2f, regvalue, cur_col, wrap));
490}
491
492static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
493 { "ILUNLEN", 0x0f, 0x0f }, 407 { "ILUNLEN", 0x0f, 0x0f },
494 { "TLUNLEN", 0xf0, 0xf0 } 408 { "TLUNLEN", 0xf0, 0xf0 }
495}; 409};
@@ -522,49 +436,7 @@ ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
522 0x33, regvalue, cur_col, wrap)); 436 0x33, regvalue, cur_col, wrap));
523} 437}
524 438
525int 439static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
526ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
527{
528 return (ahd_print_register(NULL, 0, "LQRSVD01",
529 0x34, regvalue, cur_col, wrap));
530}
531
532int
533ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
534{
535 return (ahd_print_register(NULL, 0, "LQRSVD16",
536 0x35, regvalue, cur_col, wrap));
537}
538
539int
540ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
541{
542 return (ahd_print_register(NULL, 0, "LQRSVD17",
543 0x36, regvalue, cur_col, wrap));
544}
545
546int
547ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
548{
549 return (ahd_print_register(NULL, 0, "CMDRSVD0",
550 0x37, regvalue, cur_col, wrap));
551}
552
553static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
554 { "LQ0INITGCLT", 0x03, 0x03 },
555 { "LQ0TARGCLT", 0x0c, 0x0c },
556 { "LQIINITGCLT", 0x30, 0x30 },
557 { "LQITARGCLT", 0xc0, 0xc0 }
558};
559
560int
561ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
562{
563 return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
564 0x38, regvalue, cur_col, wrap));
565}
566
567static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
568 { "ABORTPENDING", 0x01, 0x01 }, 440 { "ABORTPENDING", 0x01, 0x01 },
569 { "SINGLECMD", 0x02, 0x02 }, 441 { "SINGLECMD", 0x02, 0x02 },
570 { "PCI2PCI", 0x04, 0x04 } 442 { "PCI2PCI", 0x04, 0x04 }
@@ -577,23 +449,7 @@ ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
577 0x38, regvalue, cur_col, wrap)); 449 0x38, regvalue, cur_col, wrap));
578} 450}
579 451
580static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = { 452static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
581 { "OSBISTRUN", 0x01, 0x01 },
582 { "OSBISTDONE", 0x02, 0x02 },
583 { "OSBISTERR", 0x04, 0x04 },
584 { "GSBISTRUN", 0x10, 0x10 },
585 { "GSBISTDONE", 0x20, 0x20 },
586 { "GSBISTERR", 0x40, 0x40 }
587};
588
589int
590ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
591{
592 return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
593 0x39, regvalue, cur_col, wrap));
594}
595
596static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
597 { "LQOPAUSE", 0x01, 0x01 }, 453 { "LQOPAUSE", 0x01, 0x01 },
598 { "LQOTOIDLE", 0x02, 0x02 }, 454 { "LQOTOIDLE", 0x02, 0x02 },
599 { "LQOCONTINUE", 0x04, 0x04 }, 455 { "LQOCONTINUE", 0x04, 0x04 },
@@ -611,20 +467,7 @@ ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
611 0x39, regvalue, cur_col, wrap)); 467 0x39, regvalue, cur_col, wrap));
612} 468}
613 469
614static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = { 470static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
615 { "NTBISTRUN", 0x01, 0x01 },
616 { "NTBISTDONE", 0x02, 0x02 },
617 { "NTBISTERR", 0x04, 0x04 }
618};
619
620int
621ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
622{
623 return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
624 0x3a, regvalue, cur_col, wrap));
625}
626
627static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
628 { "SCSIRSTO", 0x01, 0x01 }, 471 { "SCSIRSTO", 0x01, 0x01 },
629 { "FORCEBUSFREE", 0x10, 0x10 }, 472 { "FORCEBUSFREE", 0x10, 0x10 },
630 { "ENARBO", 0x20, 0x20 }, 473 { "ENARBO", 0x20, 0x20 },
@@ -639,7 +482,7 @@ ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
639 0x3a, regvalue, cur_col, wrap)); 482 0x3a, regvalue, cur_col, wrap));
640} 483}
641 484
642static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = { 485static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
643 { "ALTSTIM", 0x01, 0x01 }, 486 { "ALTSTIM", 0x01, 0x01 },
644 { "ENAUTOATNP", 0x02, 0x02 }, 487 { "ENAUTOATNP", 0x02, 0x02 },
645 { "MANUALP", 0x0c, 0x0c }, 488 { "MANUALP", 0x0c, 0x0c },
@@ -655,7 +498,7 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
655 0x3b, regvalue, cur_col, wrap)); 498 0x3b, regvalue, cur_col, wrap));
656} 499}
657 500
658static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = { 501static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
659 { "SPIOEN", 0x08, 0x08 }, 502 { "SPIOEN", 0x08, 0x08 },
660 { "BIOSCANCELEN", 0x10, 0x10 }, 503 { "BIOSCANCELEN", 0x10, 0x10 },
661 { "DFPEXP", 0x40, 0x40 }, 504 { "DFPEXP", 0x40, 0x40 },
@@ -669,21 +512,7 @@ ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
669 0x3c, regvalue, cur_col, wrap)); 512 0x3c, regvalue, cur_col, wrap));
670} 513}
671 514
672int 515static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
673ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
674{
675 return (ahd_print_register(NULL, 0, "DLCOUNT",
676 0x3c, regvalue, cur_col, wrap));
677}
678
679int
680ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
681{
682 return (ahd_print_register(NULL, 0, "BUSINITID",
683 0x3c, regvalue, cur_col, wrap));
684}
685
686static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
687 { "STPWEN", 0x01, 0x01 }, 516 { "STPWEN", 0x01, 0x01 },
688 { "ACTNEGEN", 0x02, 0x02 }, 517 { "ACTNEGEN", 0x02, 0x02 },
689 { "ENSTIMER", 0x04, 0x04 }, 518 { "ENSTIMER", 0x04, 0x04 },
@@ -700,27 +529,7 @@ ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
700 0x3d, regvalue, cur_col, wrap)); 529 0x3d, regvalue, cur_col, wrap));
701} 530}
702 531
703int 532static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
704ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
705{
706 return (ahd_print_register(NULL, 0, "BUSTARGID",
707 0x3e, regvalue, cur_col, wrap));
708}
709
710static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
711 { "ASU", 0x07, 0x07 },
712 { "CMDDMAEN", 0x08, 0x08 },
713 { "AUTORSTDIS", 0x10, 0x10 }
714};
715
716int
717ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
718{
719 return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
720 0x3e, regvalue, cur_col, wrap));
721}
722
723static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
724 { "CURRFIFO_0", 0x00, 0x03 }, 533 { "CURRFIFO_0", 0x00, 0x03 },
725 { "CURRFIFO_1", 0x01, 0x03 }, 534 { "CURRFIFO_1", 0x01, 0x03 },
726 { "CURRFIFO_NONE", 0x03, 0x03 }, 535 { "CURRFIFO_NONE", 0x03, 0x03 },
@@ -736,7 +545,14 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
736 0x3f, regvalue, cur_col, wrap)); 545 0x3f, regvalue, cur_col, wrap));
737} 546}
738 547
739static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = { 548int
549ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
550{
551 return (ahd_print_register(NULL, 0, "MULTARGID",
552 0x40, regvalue, cur_col, wrap));
553}
554
555static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
740 { "P_DATAOUT", 0x00, 0xe0 }, 556 { "P_DATAOUT", 0x00, 0xe0 },
741 { "P_DATAOUT_DT", 0x20, 0xe0 }, 557 { "P_DATAOUT_DT", 0x20, 0xe0 },
742 { "P_DATAIN", 0x40, 0xe0 }, 558 { "P_DATAIN", 0x40, 0xe0 },
@@ -763,14 +579,7 @@ ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
763 0x40, regvalue, cur_col, wrap)); 579 0x40, regvalue, cur_col, wrap));
764} 580}
765 581
766int 582static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
767ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
768{
769 return (ahd_print_register(NULL, 0, "MULTARGID",
770 0x40, regvalue, cur_col, wrap));
771}
772
773static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
774 { "P_DATAOUT", 0x00, 0xe0 }, 583 { "P_DATAOUT", 0x00, 0xe0 },
775 { "P_DATAOUT_DT", 0x20, 0xe0 }, 584 { "P_DATAOUT_DT", 0x20, 0xe0 },
776 { "P_DATAIN", 0x40, 0xe0 }, 585 { "P_DATAIN", 0x40, 0xe0 },
@@ -797,7 +606,7 @@ ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
797 0x41, regvalue, cur_col, wrap)); 606 0x41, regvalue, cur_col, wrap));
798} 607}
799 608
800static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = { 609static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
801 { "DATA_OUT_PHASE", 0x01, 0x03 }, 610 { "DATA_OUT_PHASE", 0x01, 0x03 },
802 { "DATA_IN_PHASE", 0x02, 0x03 }, 611 { "DATA_IN_PHASE", 0x02, 0x03 },
803 { "DATA_PHASE_MASK", 0x03, 0x03 }, 612 { "DATA_PHASE_MASK", 0x03, 0x03 },
@@ -815,13 +624,6 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
815} 624}
816 625
817int 626int
818ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
819{
820 return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
821 0x43, regvalue, cur_col, wrap));
822}
823
824int
825ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap) 627ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
826{ 628{
827 return (ahd_print_register(NULL, 0, "SCSIDAT", 629 return (ahd_print_register(NULL, 0, "SCSIDAT",
@@ -835,7 +637,7 @@ ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
835 0x46, regvalue, cur_col, wrap)); 637 0x46, regvalue, cur_col, wrap));
836} 638}
837 639
838static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = { 640static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
839 { "TARGID", 0x0f, 0x0f }, 641 { "TARGID", 0x0f, 0x0f },
840 { "CLKOUT", 0x80, 0x80 } 642 { "CLKOUT", 0x80, 0x80 }
841}; 643};
@@ -847,7 +649,7 @@ ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
847 0x48, regvalue, cur_col, wrap)); 649 0x48, regvalue, cur_col, wrap));
848} 650}
849 651
850static ahd_reg_parse_entry_t SELID_parse_table[] = { 652static const ahd_reg_parse_entry_t SELID_parse_table[] = {
851 { "ONEBIT", 0x08, 0x08 }, 653 { "ONEBIT", 0x08, 0x08 },
852 { "SELID_MASK", 0xf0, 0xf0 } 654 { "SELID_MASK", 0xf0, 0xf0 }
853}; 655};
@@ -859,7 +661,7 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
859 0x49, regvalue, cur_col, wrap)); 661 0x49, regvalue, cur_col, wrap));
860} 662}
861 663
862static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = { 664static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
863 { "AUTO_MSGOUT_DE", 0x02, 0x02 }, 665 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
864 { "ENDGFORMCHK", 0x04, 0x04 }, 666 { "ENDGFORMCHK", 0x04, 0x04 },
865 { "BUSFREEREV", 0x10, 0x10 }, 667 { "BUSFREEREV", 0x10, 0x10 },
@@ -876,7 +678,7 @@ ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
876 0x4a, regvalue, cur_col, wrap)); 678 0x4a, regvalue, cur_col, wrap));
877} 679}
878 680
879static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = { 681static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
880 { "SELWIDE", 0x02, 0x02 }, 682 { "SELWIDE", 0x02, 0x02 },
881 { "ENAB20", 0x04, 0x04 }, 683 { "ENAB20", 0x04, 0x04 },
882 { "ENAB40", 0x08, 0x08 }, 684 { "ENAB40", 0x08, 0x08 },
@@ -891,24 +693,7 @@ ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
891 0x4a, regvalue, cur_col, wrap)); 693 0x4a, regvalue, cur_col, wrap));
892} 694}
893 695
894static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = { 696static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
895 { "CLRARBDO", 0x01, 0x01 },
896 { "CLRSPIORDY", 0x02, 0x02 },
897 { "CLROVERRUN", 0x04, 0x04 },
898 { "CLRIOERR", 0x08, 0x08 },
899 { "CLRSELINGO", 0x10, 0x10 },
900 { "CLRSELDI", 0x20, 0x20 },
901 { "CLRSELDO", 0x40, 0x40 }
902};
903
904int
905ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
906{
907 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
908 0x4b, regvalue, cur_col, wrap));
909}
910
911static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
912 { "ARBDO", 0x01, 0x01 }, 697 { "ARBDO", 0x01, 0x01 },
913 { "SPIORDY", 0x02, 0x02 }, 698 { "SPIORDY", 0x02, 0x02 },
914 { "OVERRUN", 0x04, 0x04 }, 699 { "OVERRUN", 0x04, 0x04 },
@@ -926,7 +711,7 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
926 0x4b, regvalue, cur_col, wrap)); 711 0x4b, regvalue, cur_col, wrap));
927} 712}
928 713
929static ahd_reg_parse_entry_t SIMODE0_parse_table[] = { 714static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
930 { "ENARBDO", 0x01, 0x01 }, 715 { "ENARBDO", 0x01, 0x01 },
931 { "ENSPIORDY", 0x02, 0x02 }, 716 { "ENSPIORDY", 0x02, 0x02 },
932 { "ENOVERRUN", 0x04, 0x04 }, 717 { "ENOVERRUN", 0x04, 0x04 },
@@ -943,24 +728,24 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
943 0x4b, regvalue, cur_col, wrap)); 728 0x4b, regvalue, cur_col, wrap));
944} 729}
945 730
946static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = { 731static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
947 { "CLRREQINIT", 0x01, 0x01 }, 732 { "CLRARBDO", 0x01, 0x01 },
948 { "CLRSTRB2FAST", 0x02, 0x02 }, 733 { "CLRSPIORDY", 0x02, 0x02 },
949 { "CLRSCSIPERR", 0x04, 0x04 }, 734 { "CLROVERRUN", 0x04, 0x04 },
950 { "CLRBUSFREE", 0x08, 0x08 }, 735 { "CLRIOERR", 0x08, 0x08 },
951 { "CLRSCSIRSTI", 0x20, 0x20 }, 736 { "CLRSELINGO", 0x10, 0x10 },
952 { "CLRATNO", 0x40, 0x40 }, 737 { "CLRSELDI", 0x20, 0x20 },
953 { "CLRSELTIMEO", 0x80, 0x80 } 738 { "CLRSELDO", 0x40, 0x40 }
954}; 739};
955 740
956int 741int
957ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap) 742ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
958{ 743{
959 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1", 744 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
960 0x4c, regvalue, cur_col, wrap)); 745 0x4b, regvalue, cur_col, wrap));
961} 746}
962 747
963static ahd_reg_parse_entry_t SSTAT1_parse_table[] = { 748static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
964 { "REQINIT", 0x01, 0x01 }, 749 { "REQINIT", 0x01, 0x01 },
965 { "STRB2FAST", 0x02, 0x02 }, 750 { "STRB2FAST", 0x02, 0x02 },
966 { "SCSIPERR", 0x04, 0x04 }, 751 { "SCSIPERR", 0x04, 0x04 },
@@ -978,7 +763,24 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
978 0x4c, regvalue, cur_col, wrap)); 763 0x4c, regvalue, cur_col, wrap));
979} 764}
980 765
981static ahd_reg_parse_entry_t SSTAT2_parse_table[] = { 766static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
767 { "CLRREQINIT", 0x01, 0x01 },
768 { "CLRSTRB2FAST", 0x02, 0x02 },
769 { "CLRSCSIPERR", 0x04, 0x04 },
770 { "CLRBUSFREE", 0x08, 0x08 },
771 { "CLRSCSIRSTI", 0x20, 0x20 },
772 { "CLRATNO", 0x40, 0x40 },
773 { "CLRSELTIMEO", 0x80, 0x80 }
774};
775
776int
777ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
778{
779 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
780 0x4c, regvalue, cur_col, wrap));
781}
782
783static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
982 { "BUSFREE_LQO", 0x40, 0xc0 }, 784 { "BUSFREE_LQO", 0x40, 0xc0 },
983 { "BUSFREE_DFF0", 0x80, 0xc0 }, 785 { "BUSFREE_DFF0", 0x80, 0xc0 },
984 { "BUSFREE_DFF1", 0xc0, 0xc0 }, 786 { "BUSFREE_DFF1", 0xc0, 0xc0 },
@@ -998,20 +800,7 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
998 0x4d, regvalue, cur_col, wrap)); 800 0x4d, regvalue, cur_col, wrap));
999} 801}
1000 802
1001static ahd_reg_parse_entry_t SIMODE2_parse_table[] = { 803static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
1002 { "ENDMADONE", 0x01, 0x01 },
1003 { "ENSDONE", 0x02, 0x02 },
1004 { "ENWIDE_RES", 0x04, 0x04 }
1005};
1006
1007int
1008ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1009{
1010 return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
1011 0x4d, regvalue, cur_col, wrap));
1012}
1013
1014static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
1015 { "CLRDMADONE", 0x01, 0x01 }, 804 { "CLRDMADONE", 0x01, 0x01 },
1016 { "CLRSDONE", 0x02, 0x02 }, 805 { "CLRSDONE", 0x02, 0x02 },
1017 { "CLRWIDE_RES", 0x04, 0x04 }, 806 { "CLRWIDE_RES", 0x04, 0x04 },
@@ -1025,7 +814,7 @@ ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1025 0x4d, regvalue, cur_col, wrap)); 814 0x4d, regvalue, cur_col, wrap));
1026} 815}
1027 816
1028static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = { 817static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
1029 { "DTERR", 0x01, 0x01 }, 818 { "DTERR", 0x01, 0x01 },
1030 { "DGFORMERR", 0x02, 0x02 }, 819 { "DGFORMERR", 0x02, 0x02 },
1031 { "CRCERR", 0x04, 0x04 }, 820 { "CRCERR", 0x04, 0x04 },
@@ -1064,7 +853,7 @@ ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
1064 0x4f, regvalue, cur_col, wrap)); 853 0x4f, regvalue, cur_col, wrap));
1065} 854}
1066 855
1067static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = { 856static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
1068 { "LQIATNCMD", 0x01, 0x01 }, 857 { "LQIATNCMD", 0x01, 0x01 },
1069 { "LQIATNLQ", 0x02, 0x02 }, 858 { "LQIATNLQ", 0x02, 0x02 },
1070 { "LQIBADLQT", 0x04, 0x04 }, 859 { "LQIBADLQT", 0x04, 0x04 },
@@ -1080,23 +869,7 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1080 0x50, regvalue, cur_col, wrap)); 869 0x50, regvalue, cur_col, wrap));
1081} 870}
1082 871
1083static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = { 872static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
1084 { "CLRLQIATNCMD", 0x01, 0x01 },
1085 { "CLRLQIATNLQ", 0x02, 0x02 },
1086 { "CLRLQIBADLQT", 0x04, 0x04 },
1087 { "CLRLQICRCT2", 0x08, 0x08 },
1088 { "CLRLQICRCT1", 0x10, 0x10 },
1089 { "CLRLQIATNQAS", 0x20, 0x20 }
1090};
1091
1092int
1093ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1094{
1095 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
1096 0x50, regvalue, cur_col, wrap));
1097}
1098
1099static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
1100 { "ENLQIATNCMD", 0x01, 0x01 }, 873 { "ENLQIATNCMD", 0x01, 0x01 },
1101 { "ENLQIATNLQ", 0x02, 0x02 }, 874 { "ENLQIATNLQ", 0x02, 0x02 },
1102 { "ENLQIBADLQT", 0x04, 0x04 }, 875 { "ENLQIBADLQT", 0x04, 0x04 },
@@ -1112,7 +885,23 @@ ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1112 0x50, regvalue, cur_col, wrap)); 885 0x50, regvalue, cur_col, wrap));
1113} 886}
1114 887
1115static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = { 888static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
889 { "CLRLQIATNCMD", 0x01, 0x01 },
890 { "CLRLQIATNLQ", 0x02, 0x02 },
891 { "CLRLQIBADLQT", 0x04, 0x04 },
892 { "CLRLQICRCT2", 0x08, 0x08 },
893 { "CLRLQICRCT1", 0x10, 0x10 },
894 { "CLRLQIATNQAS", 0x20, 0x20 }
895};
896
897int
898ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
899{
900 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
901 0x50, regvalue, cur_col, wrap));
902}
903
904static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
1116 { "ENLQIOVERI_NLQ", 0x01, 0x01 }, 905 { "ENLQIOVERI_NLQ", 0x01, 0x01 },
1117 { "ENLQIOVERI_LQ", 0x02, 0x02 }, 906 { "ENLQIOVERI_LQ", 0x02, 0x02 },
1118 { "ENLQIBADLQI", 0x04, 0x04 }, 907 { "ENLQIBADLQI", 0x04, 0x04 },
@@ -1130,7 +919,7 @@ ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1130 0x51, regvalue, cur_col, wrap)); 919 0x51, regvalue, cur_col, wrap));
1131} 920}
1132 921
1133static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = { 922static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
1134 { "LQIOVERI_NLQ", 0x01, 0x01 }, 923 { "LQIOVERI_NLQ", 0x01, 0x01 },
1135 { "LQIOVERI_LQ", 0x02, 0x02 }, 924 { "LQIOVERI_LQ", 0x02, 0x02 },
1136 { "LQIBADLQI", 0x04, 0x04 }, 925 { "LQIBADLQI", 0x04, 0x04 },
@@ -1148,7 +937,7 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1148 0x51, regvalue, cur_col, wrap)); 937 0x51, regvalue, cur_col, wrap));
1149} 938}
1150 939
1151static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = { 940static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
1152 { "CLRLQIOVERI_NLQ", 0x01, 0x01 }, 941 { "CLRLQIOVERI_NLQ", 0x01, 0x01 },
1153 { "CLRLQIOVERI_LQ", 0x02, 0x02 }, 942 { "CLRLQIOVERI_LQ", 0x02, 0x02 },
1154 { "CLRLQIBADLQI", 0x04, 0x04 }, 943 { "CLRLQIBADLQI", 0x04, 0x04 },
@@ -1166,7 +955,7 @@ ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1166 0x51, regvalue, cur_col, wrap)); 955 0x51, regvalue, cur_col, wrap));
1167} 956}
1168 957
1169static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = { 958static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
1170 { "LQIGSAVAIL", 0x01, 0x01 }, 959 { "LQIGSAVAIL", 0x01, 0x01 },
1171 { "LQISTOPCMD", 0x02, 0x02 }, 960 { "LQISTOPCMD", 0x02, 0x02 },
1172 { "LQISTOPLQ", 0x04, 0x04 }, 961 { "LQISTOPLQ", 0x04, 0x04 },
@@ -1184,7 +973,7 @@ ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1184 0x52, regvalue, cur_col, wrap)); 973 0x52, regvalue, cur_col, wrap));
1185} 974}
1186 975
1187static ahd_reg_parse_entry_t SSTAT3_parse_table[] = { 976static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
1188 { "OSRAMPERR", 0x01, 0x01 }, 977 { "OSRAMPERR", 0x01, 0x01 },
1189 { "NTRAMPERR", 0x02, 0x02 } 978 { "NTRAMPERR", 0x02, 0x02 }
1190}; 979};
@@ -1196,7 +985,7 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1196 0x53, regvalue, cur_col, wrap)); 985 0x53, regvalue, cur_col, wrap));
1197} 986}
1198 987
1199static ahd_reg_parse_entry_t SIMODE3_parse_table[] = { 988static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
1200 { "ENOSRAMPERR", 0x01, 0x01 }, 989 { "ENOSRAMPERR", 0x01, 0x01 },
1201 { "ENNTRAMPERR", 0x02, 0x02 } 990 { "ENNTRAMPERR", 0x02, 0x02 }
1202}; 991};
@@ -1208,7 +997,7 @@ ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1208 0x53, regvalue, cur_col, wrap)); 997 0x53, regvalue, cur_col, wrap));
1209} 998}
1210 999
1211static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = { 1000static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
1212 { "CLROSRAMPERR", 0x01, 0x01 }, 1001 { "CLROSRAMPERR", 0x01, 0x01 },
1213 { "CLRNTRAMPERR", 0x02, 0x02 } 1002 { "CLRNTRAMPERR", 0x02, 0x02 }
1214}; 1003};
@@ -1220,7 +1009,7 @@ ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1220 0x53, regvalue, cur_col, wrap)); 1009 0x53, regvalue, cur_col, wrap));
1221} 1010}
1222 1011
1223static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = { 1012static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
1224 { "LQOTCRC", 0x01, 0x01 }, 1013 { "LQOTCRC", 0x01, 0x01 },
1225 { "LQOATNPKT", 0x02, 0x02 }, 1014 { "LQOATNPKT", 0x02, 0x02 },
1226 { "LQOATNLQ", 0x04, 0x04 }, 1015 { "LQOATNLQ", 0x04, 0x04 },
@@ -1235,7 +1024,7 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1235 0x54, regvalue, cur_col, wrap)); 1024 0x54, regvalue, cur_col, wrap));
1236} 1025}
1237 1026
1238static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = { 1027static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
1239 { "CLRLQOTCRC", 0x01, 0x01 }, 1028 { "CLRLQOTCRC", 0x01, 0x01 },
1240 { "CLRLQOATNPKT", 0x02, 0x02 }, 1029 { "CLRLQOATNPKT", 0x02, 0x02 },
1241 { "CLRLQOATNLQ", 0x04, 0x04 }, 1030 { "CLRLQOATNLQ", 0x04, 0x04 },
@@ -1250,7 +1039,7 @@ ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1250 0x54, regvalue, cur_col, wrap)); 1039 0x54, regvalue, cur_col, wrap));
1251} 1040}
1252 1041
1253static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = { 1042static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
1254 { "ENLQOTCRC", 0x01, 0x01 }, 1043 { "ENLQOTCRC", 0x01, 0x01 },
1255 { "ENLQOATNPKT", 0x02, 0x02 }, 1044 { "ENLQOATNPKT", 0x02, 0x02 },
1256 { "ENLQOATNLQ", 0x04, 0x04 }, 1045 { "ENLQOATNLQ", 0x04, 0x04 },
@@ -1265,7 +1054,7 @@ ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265 0x54, regvalue, cur_col, wrap)); 1054 0x54, regvalue, cur_col, wrap));
1266} 1055}
1267 1056
1268static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = { 1057static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
1269 { "ENLQOPHACHGINPKT", 0x01, 0x01 }, 1058 { "ENLQOPHACHGINPKT", 0x01, 0x01 },
1270 { "ENLQOBUSFREE", 0x02, 0x02 }, 1059 { "ENLQOBUSFREE", 0x02, 0x02 },
1271 { "ENLQOBADQAS", 0x04, 0x04 }, 1060 { "ENLQOBADQAS", 0x04, 0x04 },
@@ -1280,7 +1069,7 @@ ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1280 0x55, regvalue, cur_col, wrap)); 1069 0x55, regvalue, cur_col, wrap));
1281} 1070}
1282 1071
1283static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = { 1072static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
1284 { "LQOPHACHGINPKT", 0x01, 0x01 }, 1073 { "LQOPHACHGINPKT", 0x01, 0x01 },
1285 { "LQOBUSFREE", 0x02, 0x02 }, 1074 { "LQOBUSFREE", 0x02, 0x02 },
1286 { "LQOBADQAS", 0x04, 0x04 }, 1075 { "LQOBADQAS", 0x04, 0x04 },
@@ -1295,7 +1084,7 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1295 0x55, regvalue, cur_col, wrap)); 1084 0x55, regvalue, cur_col, wrap));
1296} 1085}
1297 1086
1298static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = { 1087static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
1299 { "CLRLQOPHACHGINPKT", 0x01, 0x01 }, 1088 { "CLRLQOPHACHGINPKT", 0x01, 0x01 },
1300 { "CLRLQOBUSFREE", 0x02, 0x02 }, 1089 { "CLRLQOBUSFREE", 0x02, 0x02 },
1301 { "CLRLQOBADQAS", 0x04, 0x04 }, 1090 { "CLRLQOBADQAS", 0x04, 0x04 },
@@ -1310,7 +1099,7 @@ ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1310 0x55, regvalue, cur_col, wrap)); 1099 0x55, regvalue, cur_col, wrap));
1311} 1100}
1312 1101
1313static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = { 1102static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
1314 { "LQOSTOP0", 0x01, 0x01 }, 1103 { "LQOSTOP0", 0x01, 0x01 },
1315 { "LQOPHACHGOUTPKT", 0x02, 0x02 }, 1104 { "LQOPHACHGOUTPKT", 0x02, 0x02 },
1316 { "LQOWAITFIFO", 0x10, 0x10 }, 1105 { "LQOWAITFIFO", 0x10, 0x10 },
@@ -1331,7 +1120,7 @@ ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1331 0x56, regvalue, cur_col, wrap)); 1120 0x56, regvalue, cur_col, wrap));
1332} 1121}
1333 1122
1334static ahd_reg_parse_entry_t SIMODE1_parse_table[] = { 1123static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
1335 { "ENREQINIT", 0x01, 0x01 }, 1124 { "ENREQINIT", 0x01, 0x01 },
1336 { "ENSTRB2FAST", 0x02, 0x02 }, 1125 { "ENSTRB2FAST", 0x02, 0x02 },
1337 { "ENSCSIPERR", 0x04, 0x04 }, 1126 { "ENSCSIPERR", 0x04, 0x04 },
@@ -1356,7 +1145,7 @@ ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1356 0x58, regvalue, cur_col, wrap)); 1145 0x58, regvalue, cur_col, wrap));
1357} 1146}
1358 1147
1359static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = { 1148static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
1360 { "RSTCHN", 0x01, 0x01 }, 1149 { "RSTCHN", 0x01, 0x01 },
1361 { "CLRCHN", 0x02, 0x02 }, 1150 { "CLRCHN", 0x02, 0x02 },
1362 { "CLRSHCNT", 0x04, 0x04 }, 1151 { "CLRSHCNT", 0x04, 0x04 },
@@ -1370,15 +1159,17 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1370 0x5a, regvalue, cur_col, wrap)); 1159 0x5a, regvalue, cur_col, wrap));
1371} 1160}
1372 1161
1373static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = { 1162static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
1374 { "LQONOCHKOVER", 0x01, 0x01 }, 1163 { "LQONOCHKOVER", 0x01, 0x01 },
1164 { "LQONOHOLDLACK", 0x02, 0x02 },
1165 { "LQOBUSETDLY", 0x40, 0x40 },
1375 { "LQOH2A_VERSION", 0x80, 0x80 } 1166 { "LQOH2A_VERSION", 0x80, 0x80 }
1376}; 1167};
1377 1168
1378int 1169int
1379ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap) 1170ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1380{ 1171{
1381 return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL", 1172 return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
1382 0x5a, regvalue, cur_col, wrap)); 1173 0x5a, regvalue, cur_col, wrap));
1383} 1174}
1384 1175
@@ -1389,7 +1180,7 @@ ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1389 0x5a, regvalue, cur_col, wrap)); 1180 0x5a, regvalue, cur_col, wrap));
1390} 1181}
1391 1182
1392static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = { 1183static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
1393 { "CLRCFG4TCMD", 0x01, 0x01 }, 1184 { "CLRCFG4TCMD", 0x01, 0x01 },
1394 { "CLRCFG4ICMD", 0x02, 0x02 }, 1185 { "CLRCFG4ICMD", 0x02, 0x02 },
1395 { "CLRCFG4TSTAT", 0x04, 0x04 }, 1186 { "CLRCFG4TSTAT", 0x04, 0x04 },
@@ -1406,7 +1197,7 @@ ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1406 0x5b, regvalue, cur_col, wrap)); 1197 0x5b, regvalue, cur_col, wrap));
1407} 1198}
1408 1199
1409static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = { 1200static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
1410 { "CFG4TCMD", 0x01, 0x01 }, 1201 { "CFG4TCMD", 0x01, 0x01 },
1411 { "CFG4ICMD", 0x02, 0x02 }, 1202 { "CFG4ICMD", 0x02, 0x02 },
1412 { "CFG4TSTAT", 0x04, 0x04 }, 1203 { "CFG4TSTAT", 0x04, 0x04 },
@@ -1423,14 +1214,7 @@ ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1423 0x5b, regvalue, cur_col, wrap)); 1214 0x5b, regvalue, cur_col, wrap));
1424} 1215}
1425 1216
1426int 1217static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
1427ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1428{
1429 return (ahd_print_register(NULL, 0, "CURRSCB",
1430 0x5c, regvalue, cur_col, wrap));
1431}
1432
1433static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
1434 { "ENCFG4TCMD", 0x01, 0x01 }, 1218 { "ENCFG4TCMD", 0x01, 0x01 },
1435 { "ENCFG4ICMD", 0x02, 0x02 }, 1219 { "ENCFG4ICMD", 0x02, 0x02 },
1436 { "ENCFG4TSTAT", 0x04, 0x04 }, 1220 { "ENCFG4TSTAT", 0x04, 0x04 },
@@ -1447,7 +1231,14 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
1447 0x5c, regvalue, cur_col, wrap)); 1231 0x5c, regvalue, cur_col, wrap));
1448} 1232}
1449 1233
1450static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = { 1234int
1235ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1236{
1237 return (ahd_print_register(NULL, 0, "CURRSCB",
1238 0x5c, regvalue, cur_col, wrap));
1239}
1240
1241static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
1451 { "FIFOFREE", 0x01, 0x01 }, 1242 { "FIFOFREE", 0x01, 0x01 },
1452 { "DATAINFIFO", 0x02, 0x02 }, 1243 { "DATAINFIFO", 0x02, 0x02 },
1453 { "DLZERO", 0x04, 0x04 }, 1244 { "DLZERO", 0x04, 0x04 },
@@ -1464,24 +1255,6 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1464 0x5d, regvalue, cur_col, wrap)); 1255 0x5d, regvalue, cur_col, wrap));
1465} 1256}
1466 1257
1467static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
1468 { "CRCVALCHKEN", 0x40, 0x40 }
1469};
1470
1471int
1472ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
1473{
1474 return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
1475 0x5d, regvalue, cur_col, wrap));
1476}
1477
1478int
1479ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1480{
1481 return (ahd_print_register(NULL, 0, "DFFTAG",
1482 0x5e, regvalue, cur_col, wrap));
1483}
1484
1485int 1258int
1486ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap) 1259ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1487{ 1260{
@@ -1489,31 +1262,6 @@ ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1489 0x5e, regvalue, cur_col, wrap)); 1262 0x5e, regvalue, cur_col, wrap));
1490} 1263}
1491 1264
1492static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
1493 { "SEL_TXPLL_DEBUG", 0x04, 0x04 },
1494 { "CNTRTEST", 0x08, 0x08 }
1495};
1496
1497int
1498ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
1499{
1500 return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
1501 0x5e, regvalue, cur_col, wrap));
1502}
1503
1504static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
1505 { "PDN_DIFFSENSE", 0x01, 0x01 },
1506 { "PDN_IDIST", 0x04, 0x04 },
1507 { "DISABLE_OE", 0x80, 0x80 }
1508};
1509
1510int
1511ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1512{
1513 return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
1514 0x5f, regvalue, cur_col, wrap));
1515}
1516
1517int 1265int
1518ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1266ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1519{ 1267{
@@ -1529,13 +1277,6 @@ ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1529} 1277}
1530 1278
1531int 1279int
1532ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
1533{
1534 return (ahd_print_register(NULL, 0, "DGRPCRCI",
1535 0x60, regvalue, cur_col, wrap));
1536}
1537
1538int
1539ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap) 1280ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1540{ 1281{
1541 return (ahd_print_register(NULL, 0, "NEGPERIOD", 1282 return (ahd_print_register(NULL, 0, "NEGPERIOD",
@@ -1543,20 +1284,13 @@ ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1543} 1284}
1544 1285
1545int 1286int
1546ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
1547{
1548 return (ahd_print_register(NULL, 0, "PACKCRCI",
1549 0x62, regvalue, cur_col, wrap));
1550}
1551
1552int
1553ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap) 1287ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1554{ 1288{
1555 return (ahd_print_register(NULL, 0, "NEGOFFSET", 1289 return (ahd_print_register(NULL, 0, "NEGOFFSET",
1556 0x62, regvalue, cur_col, wrap)); 1290 0x62, regvalue, cur_col, wrap));
1557} 1291}
1558 1292
1559static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = { 1293static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
1560 { "PPROPT_IUT", 0x01, 0x01 }, 1294 { "PPROPT_IUT", 0x01, 0x01 },
1561 { "PPROPT_DT", 0x02, 0x02 }, 1295 { "PPROPT_DT", 0x02, 0x02 },
1562 { "PPROPT_QAS", 0x04, 0x04 }, 1296 { "PPROPT_QAS", 0x04, 0x04 },
@@ -1570,7 +1304,7 @@ ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1570 0x63, regvalue, cur_col, wrap)); 1304 0x63, regvalue, cur_col, wrap));
1571} 1305}
1572 1306
1573static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = { 1307static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
1574 { "WIDEXFER", 0x01, 0x01 }, 1308 { "WIDEXFER", 0x01, 0x01 },
1575 { "ENAUTOATNO", 0x02, 0x02 }, 1309 { "ENAUTOATNO", 0x02, 0x02 },
1576 { "ENAUTOATNI", 0x04, 0x04 }, 1310 { "ENAUTOATNI", 0x04, 0x04 },
@@ -1601,20 +1335,21 @@ ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1601 0x66, regvalue, cur_col, wrap)); 1335 0x66, regvalue, cur_col, wrap));
1602} 1336}
1603 1337
1604static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = { 1338static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
1605 { "LSTSGCLRDIS", 0x01, 0x01 }, 1339 { "LSTSGCLRDIS", 0x01, 0x01 },
1606 { "SHVALIDSTDIS", 0x02, 0x02 }, 1340 { "SHVALIDSTDIS", 0x02, 0x02 },
1607 { "DFFACTCLR", 0x04, 0x04 }, 1341 { "DFFACTCLR", 0x04, 0x04 },
1608 { "SDONEMSKDIS", 0x08, 0x08 }, 1342 { "SDONEMSKDIS", 0x08, 0x08 },
1609 { "WIDERESEN", 0x10, 0x10 }, 1343 { "WIDERESEN", 0x10, 0x10 },
1610 { "CURRFIFODEF", 0x20, 0x20 }, 1344 { "CURRFIFODEF", 0x20, 0x20 },
1611 { "STSELSKIDDIS", 0x40, 0x40 } 1345 { "STSELSKIDDIS", 0x40, 0x40 },
1346 { "BIDICHKDIS", 0x80, 0x80 }
1612}; 1347};
1613 1348
1614int 1349int
1615ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap) 1350ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
1616{ 1351{
1617 return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN", 1352 return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
1618 0x66, regvalue, cur_col, wrap)); 1353 0x66, regvalue, cur_col, wrap));
1619} 1354}
1620 1355
@@ -1625,23 +1360,6 @@ ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1625 0x67, regvalue, cur_col, wrap)); 1360 0x67, regvalue, cur_col, wrap));
1626} 1361}
1627 1362
1628static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
1629 { "PLL_ENFBM", 0x01, 0x01 },
1630 { "PLL_DLPF", 0x02, 0x02 },
1631 { "PLL_ENLPF", 0x04, 0x04 },
1632 { "PLL_ENLUD", 0x08, 0x08 },
1633 { "PLL_NS", 0x30, 0x30 },
1634 { "PLL_PWDN", 0x40, 0x40 },
1635 { "PLL_VCOSEL", 0x80, 0x80 }
1636};
1637
1638int
1639ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1640{
1641 return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
1642 0x68, regvalue, cur_col, wrap));
1643}
1644
1645int 1363int
1646ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 1364ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1647{ 1365{
@@ -1656,33 +1374,6 @@ ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1656 0x69, regvalue, cur_col, wrap)); 1374 0x69, regvalue, cur_col, wrap));
1657} 1375}
1658 1376
1659static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
1660 { "PLL_RST", 0x01, 0x01 },
1661 { "PLL_CNTCLR", 0x40, 0x40 },
1662 { "PLL_CNTEN", 0x80, 0x80 }
1663};
1664
1665int
1666ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1667{
1668 return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
1669 0x69, regvalue, cur_col, wrap));
1670}
1671
1672int
1673ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1674{
1675 return (ahd_print_register(NULL, 0, "PLL960CNT0",
1676 0x6a, regvalue, cur_col, wrap));
1677}
1678
1679int
1680ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
1681{
1682 return (ahd_print_register(NULL, 0, "XSIG",
1683 0x6a, regvalue, cur_col, wrap));
1684}
1685
1686int 1377int
1687ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap) 1378ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1688{ 1379{
@@ -1690,57 +1381,6 @@ ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1690 0x6b, regvalue, cur_col, wrap)); 1381 0x6b, regvalue, cur_col, wrap));
1691} 1382}
1692 1383
1693static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
1694 { "PLL_ENFBM", 0x01, 0x01 },
1695 { "PLL_DLPF", 0x02, 0x02 },
1696 { "PLL_ENLPF", 0x04, 0x04 },
1697 { "PLL_ENLUD", 0x08, 0x08 },
1698 { "PLL_NS", 0x30, 0x30 },
1699 { "PLL_PWDN", 0x40, 0x40 },
1700 { "PLL_VCOSEL", 0x80, 0x80 }
1701};
1702
1703int
1704ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1705{
1706 return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
1707 0x6c, regvalue, cur_col, wrap));
1708}
1709
1710int
1711ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
1712{
1713 return (ahd_print_register(NULL, 0, "FAIRNESS",
1714 0x6c, regvalue, cur_col, wrap));
1715}
1716
1717static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
1718 { "PLL_RST", 0x01, 0x01 },
1719 { "PLL_CNTCLR", 0x40, 0x40 },
1720 { "PLL_CNTEN", 0x80, 0x80 }
1721};
1722
1723int
1724ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1725{
1726 return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
1727 0x6d, regvalue, cur_col, wrap));
1728}
1729
1730int
1731ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
1732{
1733 return (ahd_print_register(NULL, 0, "UNFAIRNESS",
1734 0x6e, regvalue, cur_col, wrap));
1735}
1736
1737int
1738ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1739{
1740 return (ahd_print_register(NULL, 0, "PLL400CNT0",
1741 0x6e, regvalue, cur_col, wrap));
1742}
1743
1744int 1384int
1745ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1385ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1746{ 1386{
@@ -1748,31 +1388,6 @@ ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1748 0x70, regvalue, cur_col, wrap)); 1388 0x70, regvalue, cur_col, wrap));
1749} 1389}
1750 1390
1751static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
1752 { "SPLIT_DROP_REQ", 0x80, 0x80 }
1753};
1754
1755int
1756ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
1757{
1758 return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
1759 0x70, regvalue, cur_col, wrap));
1760}
1761
1762int
1763ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1764{
1765 return (ahd_print_register(NULL, 0, "HODMAADR",
1766 0x70, regvalue, cur_col, wrap));
1767}
1768
1769int
1770ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1771{
1772 return (ahd_print_register(NULL, 0, "HODMACNT",
1773 0x78, regvalue, cur_col, wrap));
1774}
1775
1776int 1391int
1777ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 1392ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1778{ 1393{
@@ -1781,10 +1396,10 @@ ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1781} 1396}
1782 1397
1783int 1398int
1784ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap) 1399ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1785{ 1400{
1786 return (ahd_print_register(NULL, 0, "HODMAEN", 1401 return (ahd_print_register(NULL, 0, "SGHADDR",
1787 0x7a, regvalue, cur_col, wrap)); 1402 0x7c, regvalue, cur_col, wrap));
1788} 1403}
1789 1404
1790int 1405int
@@ -1795,10 +1410,10 @@ ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1795} 1410}
1796 1411
1797int 1412int
1798ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1413ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1799{ 1414{
1800 return (ahd_print_register(NULL, 0, "SGHADDR", 1415 return (ahd_print_register(NULL, 0, "SGHCNT",
1801 0x7c, regvalue, cur_col, wrap)); 1416 0x84, regvalue, cur_col, wrap));
1802} 1417}
1803 1418
1804int 1419int
@@ -1808,14 +1423,7 @@ ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1808 0x84, regvalue, cur_col, wrap)); 1423 0x84, regvalue, cur_col, wrap));
1809} 1424}
1810 1425
1811int 1426static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1812ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1813{
1814 return (ahd_print_register(NULL, 0, "SGHCNT",
1815 0x84, regvalue, cur_col, wrap));
1816}
1817
1818static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1819 { "WR_DFTHRSH_MIN", 0x00, 0x70 }, 1427 { "WR_DFTHRSH_MIN", 0x00, 0x70 },
1820 { "RD_DFTHRSH_MIN", 0x00, 0x07 }, 1428 { "RD_DFTHRSH_MIN", 0x00, 0x07 },
1821 { "RD_DFTHRSH_25", 0x01, 0x07 }, 1429 { "RD_DFTHRSH_25", 0x01, 0x07 },
@@ -1843,209 +1451,7 @@ ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1843 0x88, regvalue, cur_col, wrap)); 1451 0x88, regvalue, cur_col, wrap));
1844} 1452}
1845 1453
1846int 1454static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
1847ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1848{
1849 return (ahd_print_register(NULL, 0, "ROMADDR",
1850 0x8a, regvalue, cur_col, wrap));
1851}
1852
1853static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
1854 { "RDY", 0x01, 0x01 },
1855 { "REPEAT", 0x02, 0x02 },
1856 { "ROMSPD", 0x18, 0x18 },
1857 { "ROMOP", 0xe0, 0xe0 }
1858};
1859
1860int
1861ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1862{
1863 return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
1864 0x8d, regvalue, cur_col, wrap));
1865}
1866
1867int
1868ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
1869{
1870 return (ahd_print_register(NULL, 0, "ROMDATA",
1871 0x8e, regvalue, cur_col, wrap));
1872}
1873
1874static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
1875 { "CFNUM", 0x07, 0x07 },
1876 { "CDNUM", 0xf8, 0xf8 }
1877};
1878
1879int
1880ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1881{
1882 return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
1883 0x90, regvalue, cur_col, wrap));
1884}
1885
1886static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
1887 { "DCH0ROEN", 0x01, 0x01 },
1888 { "DCH1ROEN", 0x02, 0x02 },
1889 { "SGROEN", 0x04, 0x04 },
1890 { "CMCROEN", 0x08, 0x08 },
1891 { "OVLYROEN", 0x10, 0x10 },
1892 { "MSIROEN", 0x20, 0x20 }
1893};
1894
1895int
1896ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
1897{
1898 return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
1899 0x90, regvalue, cur_col, wrap));
1900}
1901
1902static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
1903 { "CFNUM", 0x07, 0x07 },
1904 { "CDNUM", 0xf8, 0xf8 }
1905};
1906
1907int
1908ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1909{
1910 return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
1911 0x90, regvalue, cur_col, wrap));
1912}
1913
1914static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
1915 { "CFNUM", 0x07, 0x07 },
1916 { "CDNUM", 0xf8, 0xf8 }
1917};
1918
1919int
1920ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1921{
1922 return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
1923 0x90, regvalue, cur_col, wrap));
1924}
1925
1926static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
1927 { "CBNUM", 0xff, 0xff }
1928};
1929
1930int
1931ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1932{
1933 return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
1934 0x91, regvalue, cur_col, wrap));
1935}
1936
1937static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
1938 { "DCH0NSEN", 0x01, 0x01 },
1939 { "DCH1NSEN", 0x02, 0x02 },
1940 { "SGNSEN", 0x04, 0x04 },
1941 { "CMCNSEN", 0x08, 0x08 },
1942 { "OVLYNSEN", 0x10, 0x10 },
1943 { "MSINSEN", 0x20, 0x20 }
1944};
1945
1946int
1947ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
1948{
1949 return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
1950 0x91, regvalue, cur_col, wrap));
1951}
1952
1953static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
1954 { "CBNUM", 0xff, 0xff }
1955};
1956
1957int
1958ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1959{
1960 return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
1961 0x91, regvalue, cur_col, wrap));
1962}
1963
1964static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
1965 { "CBNUM", 0xff, 0xff }
1966};
1967
1968int
1969ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1970{
1971 return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
1972 0x91, regvalue, cur_col, wrap));
1973}
1974
1975static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
1976 { "MINDEX", 0xff, 0xff }
1977};
1978
1979int
1980ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1981{
1982 return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
1983 0x92, regvalue, cur_col, wrap));
1984}
1985
1986static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
1987 { "MINDEX", 0xff, 0xff }
1988};
1989
1990int
1991ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1992{
1993 return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
1994 0x92, regvalue, cur_col, wrap));
1995}
1996
1997int
1998ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
1999{
2000 return (ahd_print_register(NULL, 0, "OST",
2001 0x92, regvalue, cur_col, wrap));
2002}
2003
2004static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
2005 { "MINDEX", 0xff, 0xff }
2006};
2007
2008int
2009ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2010{
2011 return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
2012 0x92, regvalue, cur_col, wrap));
2013}
2014
2015static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
2016 { "MCLASS", 0x0f, 0x0f }
2017};
2018
2019int
2020ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2021{
2022 return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
2023 0x93, regvalue, cur_col, wrap));
2024}
2025
2026static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
2027 { "MCLASS", 0x0f, 0x0f }
2028};
2029
2030int
2031ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2032{
2033 return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
2034 0x93, regvalue, cur_col, wrap));
2035}
2036
2037static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
2038 { "MCLASS", 0x0f, 0x0f }
2039};
2040
2041int
2042ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2043{
2044 return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
2045 0x93, regvalue, cur_col, wrap));
2046}
2047
2048static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
2049 { "CMPABCDIS", 0x01, 0x01 }, 1455 { "CMPABCDIS", 0x01, 0x01 },
2050 { "TSCSERREN", 0x02, 0x02 }, 1456 { "TSCSERREN", 0x02, 0x02 },
2051 { "SRSPDPEEN", 0x04, 0x04 }, 1457 { "SRSPDPEEN", 0x04, 0x04 },
@@ -2062,46 +1468,7 @@ ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2062 0x93, regvalue, cur_col, wrap)); 1468 0x93, regvalue, cur_col, wrap));
2063} 1469}
2064 1470
2065int 1471static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
2066ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2067{
2068 return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
2069 0x94, regvalue, cur_col, wrap));
2070}
2071
2072int
2073ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2074{
2075 return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
2076 0x94, regvalue, cur_col, wrap));
2077}
2078
2079int
2080ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2081{
2082 return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
2083 0x94, regvalue, cur_col, wrap));
2084}
2085
2086static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
2087 { "RXSPLTRSP", 0x01, 0x01 },
2088 { "RXSCEMSG", 0x02, 0x02 },
2089 { "RXOVRUN", 0x04, 0x04 },
2090 { "CNTNOTCMPLT", 0x08, 0x08 },
2091 { "SCDATBUCKET", 0x10, 0x10 },
2092 { "SCADERR", 0x20, 0x20 },
2093 { "SCBCERR", 0x40, 0x40 },
2094 { "STAETERM", 0x80, 0x80 }
2095};
2096
2097int
2098ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2099{
2100 return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
2101 0x96, regvalue, cur_col, wrap));
2102}
2103
2104static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
2105 { "RXSPLTRSP", 0x01, 0x01 }, 1472 { "RXSPLTRSP", 0x01, 0x01 },
2106 { "RXSCEMSG", 0x02, 0x02 }, 1473 { "RXSCEMSG", 0x02, 0x02 },
2107 { "RXOVRUN", 0x04, 0x04 }, 1474 { "RXOVRUN", 0x04, 0x04 },
@@ -2119,47 +1486,7 @@ ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2119 0x96, regvalue, cur_col, wrap)); 1486 0x96, regvalue, cur_col, wrap));
2120} 1487}
2121 1488
2122static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = { 1489static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
2123 { "RXSPLTRSP", 0x01, 0x01 },
2124 { "RXSCEMSG", 0x02, 0x02 },
2125 { "RXOVRUN", 0x04, 0x04 },
2126 { "CNTNOTCMPLT", 0x08, 0x08 },
2127 { "SCDATBUCKET", 0x10, 0x10 },
2128 { "SCADERR", 0x20, 0x20 },
2129 { "SCBCERR", 0x40, 0x40 },
2130 { "STAETERM", 0x80, 0x80 }
2131};
2132
2133int
2134ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2135{
2136 return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
2137 0x96, regvalue, cur_col, wrap));
2138}
2139
2140static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
2141 { "RXDATABUCKET", 0x01, 0x01 }
2142};
2143
2144int
2145ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2146{
2147 return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
2148 0x97, regvalue, cur_col, wrap));
2149}
2150
2151static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
2152 { "RXDATABUCKET", 0x01, 0x01 }
2153};
2154
2155int
2156ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2157{
2158 return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
2159 0x97, regvalue, cur_col, wrap));
2160}
2161
2162static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
2163 { "RXDATABUCKET", 0x01, 0x01 } 1490 { "RXDATABUCKET", 0x01, 0x01 }
2164}; 1491};
2165 1492
@@ -2170,139 +1497,7 @@ ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2170 0x97, regvalue, cur_col, wrap)); 1497 0x97, regvalue, cur_col, wrap));
2171} 1498}
2172 1499
2173static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = { 1500static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
2174 { "CFNUM", 0x07, 0x07 },
2175 { "CDNUM", 0xf8, 0xf8 }
2176};
2177
2178int
2179ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2180{
2181 return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
2182 0x98, regvalue, cur_col, wrap));
2183}
2184
2185static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
2186 { "LOWER_ADDR", 0x7f, 0x7f }
2187};
2188
2189int
2190ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2191{
2192 return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
2193 0x98, regvalue, cur_col, wrap));
2194}
2195
2196static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
2197 { "CBNUM", 0xff, 0xff }
2198};
2199
2200int
2201ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2202{
2203 return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
2204 0x99, regvalue, cur_col, wrap));
2205}
2206
2207static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
2208 { "REQ_FNUM", 0x07, 0x07 },
2209 { "REQ_DNUM", 0xf8, 0xf8 }
2210};
2211
2212int
2213ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2214{
2215 return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
2216 0x99, regvalue, cur_col, wrap));
2217}
2218
2219static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
2220 { "MINDEX", 0xff, 0xff }
2221};
2222
2223int
2224ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2225{
2226 return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
2227 0x9a, regvalue, cur_col, wrap));
2228}
2229
2230static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
2231 { "REQ_BNUM", 0xff, 0xff }
2232};
2233
2234int
2235ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2236{
2237 return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
2238 0x9a, regvalue, cur_col, wrap));
2239}
2240
2241static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
2242 { "MCLASS", 0x0f, 0x0f }
2243};
2244
2245int
2246ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2247{
2248 return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
2249 0x9b, regvalue, cur_col, wrap));
2250}
2251
2252static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
2253 { "RLXORD", 0x10, 0x10 },
2254 { "TAG_NUM", 0x1f, 0x1f }
2255};
2256
2257int
2258ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2259{
2260 return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
2261 0x9b, regvalue, cur_col, wrap));
2262}
2263
2264int
2265ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2266{
2267 return (ahd_print_register(NULL, 0, "SGSEQBCNT",
2268 0x9c, regvalue, cur_col, wrap));
2269}
2270
2271static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
2272 { "LOWER_BCNT", 0xff, 0xff }
2273};
2274
2275int
2276ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2277{
2278 return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
2279 0x9c, regvalue, cur_col, wrap));
2280}
2281
2282static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
2283 { "CMPLT_FNUM", 0x07, 0x07 },
2284 { "CMPLT_DNUM", 0xf8, 0xf8 }
2285};
2286
2287int
2288ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2289{
2290 return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
2291 0x9d, regvalue, cur_col, wrap));
2292}
2293
2294static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
2295 { "CMPLT_BNUM", 0xff, 0xff }
2296};
2297
2298int
2299ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2300{
2301 return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
2302 0x9e, regvalue, cur_col, wrap));
2303}
2304
2305static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
2306 { "RXSPLTRSP", 0x01, 0x01 }, 1501 { "RXSPLTRSP", 0x01, 0x01 },
2307 { "RXSCEMSG", 0x02, 0x02 }, 1502 { "RXSCEMSG", 0x02, 0x02 },
2308 { "RXOVRUN", 0x04, 0x04 }, 1503 { "RXOVRUN", 0x04, 0x04 },
@@ -2320,7 +1515,7 @@ ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2320 0x9e, regvalue, cur_col, wrap)); 1515 0x9e, regvalue, cur_col, wrap));
2321} 1516}
2322 1517
2323static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = { 1518static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
2324 { "RXDATABUCKET", 0x01, 0x01 } 1519 { "RXDATABUCKET", 0x01, 0x01 }
2325}; 1520};
2326 1521
@@ -2331,19 +1526,7 @@ ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2331 0x9f, regvalue, cur_col, wrap)); 1526 0x9f, regvalue, cur_col, wrap));
2332} 1527}
2333 1528
2334static ahd_reg_parse_entry_t SFUNCT_parse_table[] = { 1529static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
2335 { "TEST_NUM", 0x0f, 0x0f },
2336 { "TEST_GROUP", 0xf0, 0xf0 }
2337};
2338
2339int
2340ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
2341{
2342 return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
2343 0x9f, regvalue, cur_col, wrap));
2344}
2345
2346static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
2347 { "DPR", 0x01, 0x01 }, 1530 { "DPR", 0x01, 0x01 },
2348 { "TWATERR", 0x02, 0x02 }, 1531 { "TWATERR", 0x02, 0x02 },
2349 { "RDPERR", 0x04, 0x04 }, 1532 { "RDPERR", 0x04, 0x04 },
@@ -2368,83 +1551,6 @@ ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2368 0xa0, regvalue, cur_col, wrap)); 1551 0xa0, regvalue, cur_col, wrap));
2369} 1552}
2370 1553
2371static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
2372 { "DPR", 0x01, 0x01 },
2373 { "TWATERR", 0x02, 0x02 },
2374 { "RDPERR", 0x04, 0x04 },
2375 { "SCAAPERR", 0x08, 0x08 },
2376 { "RTA", 0x10, 0x10 },
2377 { "RMA", 0x20, 0x20 },
2378 { "SSE", 0x40, 0x40 },
2379 { "DPE", 0x80, 0x80 }
2380};
2381
2382int
2383ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2384{
2385 return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
2386 0xa1, regvalue, cur_col, wrap));
2387}
2388
2389static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
2390 { "DPR", 0x01, 0x01 },
2391 { "RDPERR", 0x04, 0x04 },
2392 { "SCAAPERR", 0x08, 0x08 },
2393 { "RTA", 0x10, 0x10 },
2394 { "RMA", 0x20, 0x20 },
2395 { "SSE", 0x40, 0x40 },
2396 { "DPE", 0x80, 0x80 }
2397};
2398
2399int
2400ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2401{
2402 return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
2403 0xa2, regvalue, cur_col, wrap));
2404}
2405
2406int
2407ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2408{
2409 return (ahd_print_register(NULL, 0, "REG1",
2410 0xa2, regvalue, cur_col, wrap));
2411}
2412
2413static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
2414 { "DPR", 0x01, 0x01 },
2415 { "TWATERR", 0x02, 0x02 },
2416 { "RDPERR", 0x04, 0x04 },
2417 { "SCAAPERR", 0x08, 0x08 },
2418 { "RTA", 0x10, 0x10 },
2419 { "RMA", 0x20, 0x20 },
2420 { "SSE", 0x40, 0x40 },
2421 { "DPE", 0x80, 0x80 }
2422};
2423
2424int
2425ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2426{
2427 return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
2428 0xa3, regvalue, cur_col, wrap));
2429}
2430
2431static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
2432 { "DPR", 0x01, 0x01 },
2433 { "RDPERR", 0x04, 0x04 },
2434 { "SCAAPERR", 0x08, 0x08 },
2435 { "RTA", 0x10, 0x10 },
2436 { "RMA", 0x20, 0x20 },
2437 { "SSE", 0x40, 0x40 },
2438 { "DPE", 0x80, 0x80 }
2439};
2440
2441int
2442ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2443{
2444 return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
2445 0xa4, regvalue, cur_col, wrap));
2446}
2447
2448int 1554int
2449ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1555ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2450{ 1556{
@@ -2452,7 +1558,7 @@ ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2452 0xa4, regvalue, cur_col, wrap)); 1558 0xa4, regvalue, cur_col, wrap));
2453} 1559}
2454 1560
2455static ahd_reg_parse_entry_t SG_STATE_parse_table[] = { 1561static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
2456 { "SEGS_AVAIL", 0x01, 0x01 }, 1562 { "SEGS_AVAIL", 0x01, 0x01 },
2457 { "LOADING_NEEDED", 0x02, 0x02 }, 1563 { "LOADING_NEEDED", 0x02, 0x02 },
2458 { "FETCH_INPROG", 0x04, 0x04 } 1564 { "FETCH_INPROG", 0x04, 0x04 }
@@ -2465,23 +1571,7 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
2465 0xa6, regvalue, cur_col, wrap)); 1571 0xa6, regvalue, cur_col, wrap));
2466} 1572}
2467 1573
2468static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = { 1574static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
2469 { "DPR", 0x01, 0x01 },
2470 { "TWATERR", 0x02, 0x02 },
2471 { "CLRPENDMSI", 0x08, 0x08 },
2472 { "RTA", 0x10, 0x10 },
2473 { "RMA", 0x20, 0x20 },
2474 { "SSE", 0x40, 0x40 }
2475};
2476
2477int
2478ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2479{
2480 return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
2481 0xa6, regvalue, cur_col, wrap));
2482}
2483
2484static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
2485 { "TWATERR", 0x02, 0x02 }, 1575 { "TWATERR", 0x02, 0x02 },
2486 { "STA", 0x08, 0x08 }, 1576 { "STA", 0x08, 0x08 },
2487 { "SSE", 0x40, 0x40 }, 1577 { "SSE", 0x40, 0x40 },
@@ -2496,27 +1586,13 @@ ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2496} 1586}
2497 1587
2498int 1588int
2499ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
2500{
2501 return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
2502 0xa7, regvalue, cur_col, wrap));
2503}
2504
2505int
2506ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1589ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2507{ 1590{
2508 return (ahd_print_register(NULL, 0, "SCBPTR", 1591 return (ahd_print_register(NULL, 0, "SCBPTR",
2509 0xa8, regvalue, cur_col, wrap)); 1592 0xa8, regvalue, cur_col, wrap));
2510} 1593}
2511 1594
2512int 1595static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
2513ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2514{
2515 return (ahd_print_register(NULL, 0, "CCSCBACNT",
2516 0xab, regvalue, cur_col, wrap));
2517}
2518
2519static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
2520 { "SCBPTR_OFF", 0x07, 0x07 }, 1596 { "SCBPTR_OFF", 0x07, 0x07 },
2521 { "SCBPTR_ADDR", 0x38, 0x38 }, 1597 { "SCBPTR_ADDR", 0x38, 0x38 },
2522 { "AUSCBPTR_EN", 0x80, 0x80 } 1598 { "AUSCBPTR_EN", 0x80, 0x80 }
@@ -2537,36 +1613,13 @@ ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2537} 1613}
2538 1614
2539int 1615int
2540ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
2541{
2542 return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
2543 0xac, regvalue, cur_col, wrap));
2544}
2545
2546int
2547ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1616ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2548{ 1617{
2549 return (ahd_print_register(NULL, 0, "CCSCBADDR", 1618 return (ahd_print_register(NULL, 0, "CCSCBADDR",
2550 0xac, regvalue, cur_col, wrap)); 1619 0xac, regvalue, cur_col, wrap));
2551} 1620}
2552 1621
2553static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = { 1622static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
2554 { "CMC_BUFFER_BIST_EN", 0x01, 0x01 },
2555 { "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
2556 { "SG_BIST_EN", 0x10, 0x10 },
2557 { "SG_BIST_FAIL", 0x20, 0x20 },
2558 { "SCBRAMBIST_FAIL", 0x40, 0x40 },
2559 { "SG_ELEMENT_SIZE", 0x80, 0x80 }
2560};
2561
2562int
2563ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
2564{
2565 return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
2566 0xad, regvalue, cur_col, wrap));
2567}
2568
2569static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
2570 { "CCSCBRESET", 0x01, 0x01 }, 1623 { "CCSCBRESET", 0x01, 0x01 },
2571 { "CCSCBDIR", 0x04, 0x04 }, 1624 { "CCSCBDIR", 0x04, 0x04 },
2572 { "CCSCBEN", 0x08, 0x08 }, 1625 { "CCSCBEN", 0x08, 0x08 },
@@ -2582,7 +1635,7 @@ ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2582 0xad, regvalue, cur_col, wrap)); 1635 0xad, regvalue, cur_col, wrap));
2583} 1636}
2584 1637
2585static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = { 1638static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
2586 { "CCSGRESET", 0x01, 0x01 }, 1639 { "CCSGRESET", 0x01, 0x01 },
2587 { "SG_FETCH_REQ", 0x02, 0x02 }, 1640 { "SG_FETCH_REQ", 0x02, 0x02 },
2588 { "CCSGENACK", 0x08, 0x08 }, 1641 { "CCSGENACK", 0x08, 0x08 },
@@ -2606,13 +1659,6 @@ ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2606} 1659}
2607 1660
2608int 1661int
2609ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2610{
2611 return (ahd_print_register(NULL, 0, "FLEXADR",
2612 0xb0, regvalue, cur_col, wrap));
2613}
2614
2615int
2616ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap) 1662ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2617{ 1663{
2618 return (ahd_print_register(NULL, 0, "CCSCBRAM", 1664 return (ahd_print_register(NULL, 0, "CCSCBRAM",
@@ -2620,39 +1666,13 @@ ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2620} 1666}
2621 1667
2622int 1668int
2623ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2624{
2625 return (ahd_print_register(NULL, 0, "FLEXCNT",
2626 0xb3, regvalue, cur_col, wrap));
2627}
2628
2629static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
2630 { "FLEXDMADONE", 0x01, 0x01 },
2631 { "FLEXDMAERR", 0x02, 0x02 }
2632};
2633
2634int
2635ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2636{
2637 return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
2638 0xb5, regvalue, cur_col, wrap));
2639}
2640
2641int
2642ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
2643{
2644 return (ahd_print_register(NULL, 0, "FLEXDATA",
2645 0xb6, regvalue, cur_col, wrap));
2646}
2647
2648int
2649ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1669ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2650{ 1670{
2651 return (ahd_print_register(NULL, 0, "BRDDAT", 1671 return (ahd_print_register(NULL, 0, "BRDDAT",
2652 0xb8, regvalue, cur_col, wrap)); 1672 0xb8, regvalue, cur_col, wrap));
2653} 1673}
2654 1674
2655static ahd_reg_parse_entry_t BRDCTL_parse_table[] = { 1675static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
2656 { "BRDSTB", 0x01, 0x01 }, 1676 { "BRDSTB", 0x01, 0x01 },
2657 { "BRDRW", 0x02, 0x02 }, 1677 { "BRDRW", 0x02, 0x02 },
2658 { "BRDEN", 0x04, 0x04 }, 1678 { "BRDEN", 0x04, 0x04 },
@@ -2682,7 +1702,7 @@ ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2682 0xbc, regvalue, cur_col, wrap)); 1702 0xbc, regvalue, cur_col, wrap));
2683} 1703}
2684 1704
2685static ahd_reg_parse_entry_t SEECTL_parse_table[] = { 1705static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
2686 { "SEEOP_ERAL", 0x40, 0x70 }, 1706 { "SEEOP_ERAL", 0x40, 0x70 },
2687 { "SEEOP_WRITE", 0x50, 0x70 }, 1707 { "SEEOP_WRITE", 0x50, 0x70 },
2688 { "SEEOP_READ", 0x60, 0x70 }, 1708 { "SEEOP_READ", 0x60, 0x70 },
@@ -2702,7 +1722,7 @@ ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2702 0xbe, regvalue, cur_col, wrap)); 1722 0xbe, regvalue, cur_col, wrap));
2703} 1723}
2704 1724
2705static ahd_reg_parse_entry_t SEESTAT_parse_table[] = { 1725static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
2706 { "SEESTART", 0x01, 0x01 }, 1726 { "SEESTART", 0x01, 0x01 },
2707 { "SEEBUSY", 0x02, 0x02 }, 1727 { "SEEBUSY", 0x02, 0x02 },
2708 { "SEEARBACK", 0x04, 0x04 }, 1728 { "SEEARBACK", 0x04, 0x04 },
@@ -2718,34 +1738,7 @@ ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2718 0xbe, regvalue, cur_col, wrap)); 1738 0xbe, regvalue, cur_col, wrap));
2719} 1739}
2720 1740
2721int 1741static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
2722ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2723{
2724 return (ahd_print_register(NULL, 0, "SCBCNT",
2725 0xbf, regvalue, cur_col, wrap));
2726}
2727
2728int
2729ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2730{
2731 return (ahd_print_register(NULL, 0, "DFWADDR",
2732 0xc0, regvalue, cur_col, wrap));
2733}
2734
2735static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
2736 { "DSPFCNTSEL", 0x0f, 0x0f },
2737 { "EDGESENSE", 0x10, 0x10 },
2738 { "FLTRDISABLE", 0x20, 0x20 }
2739};
2740
2741int
2742ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2743{
2744 return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
2745 0xc0, regvalue, cur_col, wrap));
2746}
2747
2748static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
2749 { "XMITOFFSTDIS", 0x02, 0x02 }, 1742 { "XMITOFFSTDIS", 0x02, 0x02 },
2750 { "RCVROFFSTDIS", 0x04, 0x04 }, 1743 { "RCVROFFSTDIS", 0x04, 0x04 },
2751 { "DESQDIS", 0x10, 0x10 }, 1744 { "DESQDIS", 0x10, 0x10 },
@@ -2760,44 +1753,13 @@ ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2760} 1753}
2761 1754
2762int 1755int
2763ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2764{
2765 return (ahd_print_register(NULL, 0, "DFRADDR",
2766 0xc2, regvalue, cur_col, wrap));
2767}
2768
2769static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
2770 { "MANREQDLY", 0x3f, 0x3f },
2771 { "MANREQCTL", 0xc0, 0xc0 }
2772};
2773
2774int
2775ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2776{
2777 return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
2778 0xc2, regvalue, cur_col, wrap));
2779}
2780
2781static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
2782 { "MANACKDLY", 0x3f, 0x3f },
2783 { "MANACKCTL", 0xc0, 0xc0 }
2784};
2785
2786int
2787ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2788{
2789 return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
2790 0xc3, regvalue, cur_col, wrap));
2791}
2792
2793int
2794ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1756ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2795{ 1757{
2796 return (ahd_print_register(NULL, 0, "DFDAT", 1758 return (ahd_print_register(NULL, 0, "DFDAT",
2797 0xc4, regvalue, cur_col, wrap)); 1759 0xc4, regvalue, cur_col, wrap));
2798} 1760}
2799 1761
2800static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = { 1762static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
2801 { "DSPSEL", 0x1f, 0x1f }, 1763 { "DSPSEL", 0x1f, 0x1f },
2802 { "AUTOINCEN", 0x80, 0x80 } 1764 { "AUTOINCEN", 0x80, 0x80 }
2803}; 1765};
@@ -2809,7 +1771,7 @@ ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
2809 0xc4, regvalue, cur_col, wrap)); 1771 0xc4, regvalue, cur_col, wrap));
2810} 1772}
2811 1773
2812static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = { 1774static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
2813 { "XMITMANVAL", 0x3f, 0x3f }, 1775 { "XMITMANVAL", 0x3f, 0x3f },
2814 { "AUTOXBCDIS", 0x80, 0x80 } 1776 { "AUTOXBCDIS", 0x80, 0x80 }
2815}; 1777};
@@ -2821,91 +1783,7 @@ ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2821 0xc5, regvalue, cur_col, wrap)); 1783 0xc5, regvalue, cur_col, wrap));
2822} 1784}
2823 1785
2824static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = { 1786static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
2825 { "RCVRMANVAL", 0x3f, 0x3f },
2826 { "AUTORBCDIS", 0x80, 0x80 }
2827};
2828
2829int
2830ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2831{
2832 return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
2833 0xc6, regvalue, cur_col, wrap));
2834}
2835
2836int
2837ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2838{
2839 return (ahd_print_register(NULL, 0, "WRTBIASCALC",
2840 0xc7, regvalue, cur_col, wrap));
2841}
2842
2843int
2844ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2845{
2846 return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
2847 0xc8, regvalue, cur_col, wrap));
2848}
2849
2850int
2851ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
2852{
2853 return (ahd_print_register(NULL, 0, "DFPTRS",
2854 0xc8, regvalue, cur_col, wrap));
2855}
2856
2857int
2858ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2859{
2860 return (ahd_print_register(NULL, 0, "SKEWCALC",
2861 0xc9, regvalue, cur_col, wrap));
2862}
2863
2864int
2865ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2866{
2867 return (ahd_print_register(NULL, 0, "DFBKPTR",
2868 0xc9, regvalue, cur_col, wrap));
2869}
2870
2871static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
2872 { "DFF_RAMBIST_EN", 0x01, 0x01 },
2873 { "DFF_RAMBIST_DONE", 0x02, 0x02 },
2874 { "DFF_RAMBIST_FAIL", 0x04, 0x04 },
2875 { "DFF_DIR_ERR", 0x08, 0x08 },
2876 { "DFF_CIO_RD_RDY", 0x10, 0x10 },
2877 { "DFF_CIO_WR_RDY", 0x20, 0x20 }
2878};
2879
2880int
2881ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2882{
2883 return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
2884 0xcb, regvalue, cur_col, wrap));
2885}
2886
2887int
2888ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2889{
2890 return (ahd_print_register(NULL, 0, "DFSCNT",
2891 0xcc, regvalue, cur_col, wrap));
2892}
2893
2894int
2895ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2896{
2897 return (ahd_print_register(NULL, 0, "DFBCNT",
2898 0xce, regvalue, cur_col, wrap));
2899}
2900
2901int
2902ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2903{
2904 return (ahd_print_register(NULL, 0, "OVLYADDR",
2905 0xd4, regvalue, cur_col, wrap));
2906}
2907
2908static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
2909 { "LOADRAM", 0x01, 0x01 }, 1787 { "LOADRAM", 0x01, 0x01 },
2910 { "SEQRESET", 0x02, 0x02 }, 1788 { "SEQRESET", 0x02, 0x02 },
2911 { "STEP", 0x04, 0x04 }, 1789 { "STEP", 0x04, 0x04 },
@@ -2923,21 +1801,7 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2923 0xd6, regvalue, cur_col, wrap)); 1801 0xd6, regvalue, cur_col, wrap));
2924} 1802}
2925 1803
2926static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = { 1804static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
2927 { "RAMBIST_EN", 0x01, 0x01 },
2928 { "RAMBIST_FAIL", 0x02, 0x02 },
2929 { "RAMBIST_DONE", 0x04, 0x04 },
2930 { "OVRLAY_DATA_CHK", 0x08, 0x08 }
2931};
2932
2933int
2934ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2935{
2936 return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
2937 0xd7, regvalue, cur_col, wrap));
2938}
2939
2940static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
2941 { "CARRY", 0x01, 0x01 }, 1805 { "CARRY", 0x01, 0x01 },
2942 { "ZERO", 0x02, 0x02 } 1806 { "ZERO", 0x02, 0x02 }
2943}; 1807};
@@ -2949,7 +1813,7 @@ ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
2949 0xd8, regvalue, cur_col, wrap)); 1813 0xd8, regvalue, cur_col, wrap));
2950} 1814}
2951 1815
2952static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = { 1816static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
2953 { "IRET", 0x01, 0x01 }, 1817 { "IRET", 0x01, 0x01 },
2954 { "INTMASK1", 0x02, 0x02 }, 1818 { "INTMASK1", 0x02, 0x02 },
2955 { "INTMASK2", 0x04, 0x04 }, 1819 { "INTMASK2", 0x04, 0x04 },
@@ -3002,24 +1866,6 @@ ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
3002} 1866}
3003 1867
3004int 1868int
3005ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
3006{
3007 return (ahd_print_register(NULL, 0, "BRKADDR0",
3008 0xe6, regvalue, cur_col, wrap));
3009}
3010
3011static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
3012 { "BRKDIS", 0x80, 0x80 }
3013};
3014
3015int
3016ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
3017{
3018 return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
3019 0xe6, regvalue, cur_col, wrap));
3020}
3021
3022int
3023ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap) 1869ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
3024{ 1870{
3025 return (ahd_print_register(NULL, 0, "ALLONES", 1871 return (ahd_print_register(NULL, 0, "ALLONES",
@@ -3055,13 +1901,6 @@ ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
3055} 1901}
3056 1902
3057int 1903int
3058ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
3059{
3060 return (ahd_print_register(NULL, 0, "FUNCTION1",
3061 0xf0, regvalue, cur_col, wrap));
3062}
3063
3064int
3065ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap) 1904ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
3066{ 1905{
3067 return (ahd_print_register(NULL, 0, "STACK", 1906 return (ahd_print_register(NULL, 0, "STACK",
@@ -3083,13 +1922,6 @@ ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3083} 1922}
3084 1923
3085int 1924int
3086ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3087{
3088 return (ahd_print_register(NULL, 0, "LASTADDR",
3089 0xf6, regvalue, cur_col, wrap));
3090}
3091
3092int
3093ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1925ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3094{ 1926{
3095 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR", 1927 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
@@ -3111,23 +1943,16 @@ ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
3111} 1943}
3112 1944
3113int 1945int
3114ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap) 1946ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3115{
3116 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
3117 0x100, regvalue, cur_col, wrap));
3118}
3119
3120int
3121ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3122{ 1947{
3123 return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 1948 return (ahd_print_register(NULL, 0, "SRAM_BASE",
3124 0x100, regvalue, cur_col, wrap)); 1949 0x100, regvalue, cur_col, wrap));
3125} 1950}
3126 1951
3127int 1952int
3128ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 1953ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
3129{ 1954{
3130 return (ahd_print_register(NULL, 0, "SRAM_BASE", 1955 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
3131 0x100, regvalue, cur_col, wrap)); 1956 0x100, regvalue, cur_col, wrap));
3132} 1957}
3133 1958
@@ -3215,7 +2040,7 @@ ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
3215 0x137, regvalue, cur_col, wrap)); 2040 0x137, regvalue, cur_col, wrap));
3216} 2041}
3217 2042
3218static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = { 2043static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
3219 { "FIFORESET", 0x01, 0x01 }, 2044 { "FIFORESET", 0x01, 0x01 },
3220 { "FIFOFLUSH", 0x02, 0x02 }, 2045 { "FIFOFLUSH", 0x02, 0x02 },
3221 { "DIRECTION", 0x04, 0x04 }, 2046 { "DIRECTION", 0x04, 0x04 },
@@ -3235,7 +2060,7 @@ ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
3235 0x138, regvalue, cur_col, wrap)); 2060 0x138, regvalue, cur_col, wrap));
3236} 2061}
3237 2062
3238static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 2063static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
3239 { "NO_DISCONNECT", 0x01, 0x01 }, 2064 { "NO_DISCONNECT", 0x01, 0x01 },
3240 { "SPHASE_PENDING", 0x02, 0x02 }, 2065 { "SPHASE_PENDING", 0x02, 0x02 },
3241 { "DPHASE_PENDING", 0x04, 0x04 }, 2066 { "DPHASE_PENDING", 0x04, 0x04 },
@@ -3268,7 +2093,7 @@ ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
3268 0x13b, regvalue, cur_col, wrap)); 2093 0x13b, regvalue, cur_col, wrap));
3269} 2094}
3270 2095
3271static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = { 2096static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
3272 { "P_DATAOUT", 0x00, 0xe0 }, 2097 { "P_DATAOUT", 0x00, 0xe0 },
3273 { "P_DATAOUT_DT", 0x20, 0xe0 }, 2098 { "P_DATAOUT_DT", 0x20, 0xe0 },
3274 { "P_DATAIN", 0x40, 0xe0 }, 2099 { "P_DATAIN", 0x40, 0xe0 },
@@ -3326,7 +2151,7 @@ ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3326 0x144, regvalue, cur_col, wrap)); 2151 0x144, regvalue, cur_col, wrap));
3327} 2152}
3328 2153
3329static ahd_reg_parse_entry_t ARG_1_parse_table[] = { 2154static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
3330 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 }, 2155 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
3331 { "CONT_MSG_LOOP_READ", 0x03, 0x03 }, 2156 { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
3332 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 }, 2157 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
@@ -3358,7 +2183,7 @@ ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
3358 0x14a, regvalue, cur_col, wrap)); 2183 0x14a, regvalue, cur_col, wrap));
3359} 2184}
3360 2185
3361static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = { 2186static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
3362 { "ALTSTIM", 0x01, 0x01 }, 2187 { "ALTSTIM", 0x01, 0x01 },
3363 { "ENAUTOATNP", 0x02, 0x02 }, 2188 { "ENAUTOATNP", 0x02, 0x02 },
3364 { "MANUALP", 0x0c, 0x0c }, 2189 { "MANUALP", 0x0c, 0x0c },
@@ -3381,7 +2206,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3381 0x14c, regvalue, cur_col, wrap)); 2206 0x14c, regvalue, cur_col, wrap));
3382} 2207}
3383 2208
3384static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 2209static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
3385 { "PENDING_MK_MESSAGE", 0x01, 0x01 }, 2210 { "PENDING_MK_MESSAGE", 0x01, 0x01 },
3386 { "TARGET_MSG_PENDING", 0x02, 0x02 }, 2211 { "TARGET_MSG_PENDING", 0x02, 0x02 },
3387 { "SELECTOUT_QFROZEN", 0x04, 0x04 } 2212 { "SELECTOUT_QFROZEN", 0x04, 0x04 }
@@ -3465,20 +2290,20 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
3465} 2290}
3466 2291
3467int 2292int
3468ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 2293ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
3469{ 2294{
3470 return (ahd_print_register(NULL, 0, "SCB_BASE", 2295 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
3471 0x180, regvalue, cur_col, wrap)); 2296 0x180, regvalue, cur_col, wrap));
3472} 2297}
3473 2298
3474int 2299int
3475ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 2300ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3476{ 2301{
3477 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 2302 return (ahd_print_register(NULL, 0, "SCB_BASE",
3478 0x180, regvalue, cur_col, wrap)); 2303 0x180, regvalue, cur_col, wrap));
3479} 2304}
3480 2305
3481static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = { 2306static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
3482 { "SG_LIST_NULL", 0x01, 0x01 }, 2307 { "SG_LIST_NULL", 0x01, 0x01 },
3483 { "SG_OVERRUN_RESID", 0x02, 0x02 }, 2308 { "SG_OVERRUN_RESID", 0x02, 0x02 },
3484 { "SG_ADDR_MASK", 0xf8, 0xf8 } 2309 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -3499,27 +2324,6 @@ ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
3499} 2324}
3500 2325
3501int 2326int
3502ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
3503{
3504 return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
3505 0x189, regvalue, cur_col, wrap));
3506}
3507
3508int
3509ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
3510{
3511 return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
3512 0x18a, regvalue, cur_col, wrap));
3513}
3514
3515int
3516ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3517{
3518 return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
3519 0x18b, regvalue, cur_col, wrap));
3520}
3521
3522int
3523ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 2327ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3524{ 2328{
3525 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 2329 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
@@ -3533,7 +2337,7 @@ ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3533 0x190, regvalue, cur_col, wrap)); 2337 0x190, regvalue, cur_col, wrap));
3534} 2338}
3535 2339
3536static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 2340static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
3537 { "SCB_TAG_TYPE", 0x03, 0x03 }, 2341 { "SCB_TAG_TYPE", 0x03, 0x03 },
3538 { "DISCONNECTED", 0x04, 0x04 }, 2342 { "DISCONNECTED", 0x04, 0x04 },
3539 { "STATUS_RCVD", 0x08, 0x08 }, 2343 { "STATUS_RCVD", 0x08, 0x08 },
@@ -3550,7 +2354,7 @@ ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
3550 0x192, regvalue, cur_col, wrap)); 2354 0x192, regvalue, cur_col, wrap));
3551} 2355}
3552 2356
3553static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = { 2357static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
3554 { "OID", 0x0f, 0x0f }, 2358 { "OID", 0x0f, 0x0f },
3555 { "TID", 0xf0, 0xf0 } 2359 { "TID", 0xf0, 0xf0 }
3556}; 2360};
@@ -3562,7 +2366,7 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
3562 0x193, regvalue, cur_col, wrap)); 2366 0x193, regvalue, cur_col, wrap));
3563} 2367}
3564 2368
3565static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = { 2369static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
3566 { "LID", 0xff, 0xff } 2370 { "LID", 0xff, 0xff }
3567}; 2371};
3568 2372
@@ -3573,7 +2377,7 @@ ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
3573 0x194, regvalue, cur_col, wrap)); 2377 0x194, regvalue, cur_col, wrap));
3574} 2378}
3575 2379
3576static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = { 2380static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
3577 { "SCB_XFERLEN_ODD", 0x01, 0x01 } 2381 { "SCB_XFERLEN_ODD", 0x01, 0x01 }
3578}; 2382};
3579 2383
@@ -3584,7 +2388,7 @@ ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
3584 0x195, regvalue, cur_col, wrap)); 2388 0x195, regvalue, cur_col, wrap));
3585} 2389}
3586 2390
3587static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = { 2391static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
3588 { "SCB_CDB_LEN_PTR", 0x80, 0x80 } 2392 { "SCB_CDB_LEN_PTR", 0x80, 0x80 }
3589}; 2393};
3590 2394
@@ -3609,7 +2413,7 @@ ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3609 0x198, regvalue, cur_col, wrap)); 2413 0x198, regvalue, cur_col, wrap));
3610} 2414}
3611 2415
3612static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = { 2416static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
3613 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }, 2417 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
3614 { "SG_LAST_SEG", 0x80, 0x80 } 2418 { "SG_LAST_SEG", 0x80, 0x80 }
3615}; 2419};
@@ -3621,7 +2425,7 @@ ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
3621 0x1a0, regvalue, cur_col, wrap)); 2425 0x1a0, regvalue, cur_col, wrap));
3622} 2426}
3623 2427
3624static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = { 2428static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
3625 { "SG_LIST_NULL", 0x01, 0x01 }, 2429 { "SG_LIST_NULL", 0x01, 0x01 },
3626 { "SG_FULL_RESID", 0x02, 0x02 }, 2430 { "SG_FULL_RESID", 0x02, 0x02 },
3627 { "SG_STATUS_VALID", 0x04, 0x04 } 2431 { "SG_STATUS_VALID", 0x04, 0x04 }
@@ -3656,13 +2460,6 @@ ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
3656} 2460}
3657 2461
3658int 2462int
3659ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
3660{
3661 return (ahd_print_register(NULL, 0, "SCB_SPARE",
3662 0x1b0, regvalue, cur_col, wrap));
3663}
3664
3665int
3666ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap) 2463ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
3667{ 2464{
3668 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 2465 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
index 11bed07e90b7..4b51e232392f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -5,7 +5,7 @@
5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static const uint8_t seqprog[] = {
9 0xff, 0x02, 0x06, 0x78, 9 0xff, 0x02, 0x06, 0x78,
10 0x00, 0xea, 0x6e, 0x59, 10 0x00, 0xea, 0x6e, 0x59,
11 0x01, 0xea, 0x04, 0x30, 11 0x01, 0xea, 0x04, 0x30,
@@ -1027,7 +1027,7 @@ ahd_patch0_func(struct ahd_softc *ahd)
1027 return (0); 1027 return (0);
1028} 1028}
1029 1029
1030static struct patch { 1030static const struct patch {
1031 ahd_patch_func_t *patch_func; 1031 ahd_patch_func_t *patch_func;
1032 uint32_t begin :10, 1032 uint32_t begin :10,
1033 skip_instr :10, 1033 skip_instr :10,
@@ -1166,7 +1166,7 @@ static struct patch {
1166 { ahd_patch23_func, 815, 11, 1 } 1166 { ahd_patch23_func, 815, 11, 1 }
1167}; 1167};
1168 1168
1169static struct cs { 1169static const struct cs {
1170 uint16_t begin; 1170 uint16_t begin;
1171 uint16_t end; 1171 uint16_t end;
1172} critical_sections[] = { 1172} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index c0344e617651..e4e651cca3e4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -736,7 +736,7 @@ struct ahc_syncrate {
736#define ST_SXFR 0x010 /* Rate Single Transition Only */ 736#define ST_SXFR 0x010 /* Rate Single Transition Only */
737#define DT_SXFR 0x040 /* Rate Double Transition Only */ 737#define DT_SXFR 0x040 /* Rate Double Transition Only */
738 uint8_t period; /* Period to send to SCSI target */ 738 uint8_t period; /* Period to send to SCSI target */
739 char *rate; 739 const char *rate;
740}; 740};
741 741
742/* Safe and valid period for async negotiations. */ 742/* Safe and valid period for async negotiations. */
@@ -1114,7 +1114,7 @@ typedef int (ahc_device_setup_t)(struct ahc_softc *);
1114struct ahc_pci_identity { 1114struct ahc_pci_identity {
1115 uint64_t full_id; 1115 uint64_t full_id;
1116 uint64_t id_mask; 1116 uint64_t id_mask;
1117 char *name; 1117 const char *name;
1118 ahc_device_setup_t *setup; 1118 ahc_device_setup_t *setup;
1119}; 1119};
1120 1120
@@ -1133,15 +1133,11 @@ extern const int ahc_num_aic7770_devs;
1133 1133
1134/*************************** Function Declarations ****************************/ 1134/*************************** Function Declarations ****************************/
1135/******************************************************************************/ 1135/******************************************************************************/
1136u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
1137void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
1138void ahc_busy_tcl(struct ahc_softc *ahc,
1139 u_int tcl, u_int busyid);
1140 1136
1141/***************************** PCI Front End *********************************/ 1137/***************************** PCI Front End *********************************/
1142struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t); 1138const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
1143int ahc_pci_config(struct ahc_softc *, 1139int ahc_pci_config(struct ahc_softc *,
1144 struct ahc_pci_identity *); 1140 const struct ahc_pci_identity *);
1145int ahc_pci_test_register_access(struct ahc_softc *); 1141int ahc_pci_test_register_access(struct ahc_softc *);
1146#ifdef CONFIG_PM 1142#ifdef CONFIG_PM
1147void ahc_pci_resume(struct ahc_softc *ahc); 1143void ahc_pci_resume(struct ahc_softc *ahc);
@@ -1155,9 +1151,6 @@ int aic7770_config(struct ahc_softc *ahc,
1155 1151
1156/************************** SCB and SCB queue management **********************/ 1152/************************** SCB and SCB queue management **********************/
1157int ahc_probe_scbs(struct ahc_softc *); 1153int ahc_probe_scbs(struct ahc_softc *);
1158void ahc_run_untagged_queues(struct ahc_softc *ahc);
1159void ahc_run_untagged_queue(struct ahc_softc *ahc,
1160 struct scb_tailq *queue);
1161void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, 1154void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
1162 struct scb *scb); 1155 struct scb *scb);
1163int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, 1156int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
@@ -1178,22 +1171,8 @@ int ahc_resume(struct ahc_softc *ahc);
1178#endif 1171#endif
1179void ahc_set_unit(struct ahc_softc *, int); 1172void ahc_set_unit(struct ahc_softc *, int);
1180void ahc_set_name(struct ahc_softc *, char *); 1173void ahc_set_name(struct ahc_softc *, char *);
1181void ahc_alloc_scbs(struct ahc_softc *ahc);
1182void ahc_free(struct ahc_softc *ahc); 1174void ahc_free(struct ahc_softc *ahc);
1183int ahc_reset(struct ahc_softc *ahc, int reinit); 1175int ahc_reset(struct ahc_softc *ahc, int reinit);
1184void ahc_shutdown(void *arg);
1185
1186/*************************** Interrupt Services *******************************/
1187void ahc_clear_intstat(struct ahc_softc *ahc);
1188void ahc_run_qoutfifo(struct ahc_softc *ahc);
1189#ifdef AHC_TARGET_MODE
1190void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
1191#endif
1192void ahc_handle_brkadrint(struct ahc_softc *ahc);
1193void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
1194void ahc_handle_scsiint(struct ahc_softc *ahc,
1195 u_int intstat);
1196void ahc_clear_critical_section(struct ahc_softc *ahc);
1197 1176
1198/***************************** Error Recovery *********************************/ 1177/***************************** Error Recovery *********************************/
1199typedef enum { 1178typedef enum {
@@ -1214,36 +1193,19 @@ int ahc_search_disc_list(struct ahc_softc *ahc, int target,
1214 char channel, int lun, u_int tag, 1193 char channel, int lun, u_int tag,
1215 int stop_on_first, int remove, 1194 int stop_on_first, int remove,
1216 int save_state); 1195 int save_state);
1217void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
1218int ahc_reset_channel(struct ahc_softc *ahc, char channel, 1196int ahc_reset_channel(struct ahc_softc *ahc, char channel,
1219 int initiate_reset); 1197 int initiate_reset);
1220int ahc_abort_scbs(struct ahc_softc *ahc, int target, 1198
1221 char channel, int lun, u_int tag,
1222 role_t role, uint32_t status);
1223void ahc_restart(struct ahc_softc *ahc);
1224void ahc_calc_residual(struct ahc_softc *ahc,
1225 struct scb *scb);
1226/*************************** Utility Functions ********************************/ 1199/*************************** Utility Functions ********************************/
1227struct ahc_phase_table_entry*
1228 ahc_lookup_phase_entry(int phase);
1229void ahc_compile_devinfo(struct ahc_devinfo *devinfo, 1200void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
1230 u_int our_id, u_int target, 1201 u_int our_id, u_int target,
1231 u_int lun, char channel, 1202 u_int lun, char channel,
1232 role_t role); 1203 role_t role);
1233/************************** Transfer Negotiation ******************************/ 1204/************************** Transfer Negotiation ******************************/
1234struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1205const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1235 u_int *ppr_options, u_int maxsync); 1206 u_int *ppr_options, u_int maxsync);
1236u_int ahc_find_period(struct ahc_softc *ahc, 1207u_int ahc_find_period(struct ahc_softc *ahc,
1237 u_int scsirate, u_int maxsync); 1208 u_int scsirate, u_int maxsync);
1238void ahc_validate_offset(struct ahc_softc *ahc,
1239 struct ahc_initiator_tinfo *tinfo,
1240 struct ahc_syncrate *syncrate,
1241 u_int *offset, int wide,
1242 role_t role);
1243void ahc_validate_width(struct ahc_softc *ahc,
1244 struct ahc_initiator_tinfo *tinfo,
1245 u_int *bus_width,
1246 role_t role);
1247/* 1209/*
1248 * Negotiation types. These are used to qualify if we should renegotiate 1210 * Negotiation types. These are used to qualify if we should renegotiate
1249 * even if our goal and current transport parameters are identical. 1211 * even if our goal and current transport parameters are identical.
@@ -1263,7 +1225,7 @@ void ahc_set_width(struct ahc_softc *ahc,
1263 u_int width, u_int type, int paused); 1225 u_int width, u_int type, int paused);
1264void ahc_set_syncrate(struct ahc_softc *ahc, 1226void ahc_set_syncrate(struct ahc_softc *ahc,
1265 struct ahc_devinfo *devinfo, 1227 struct ahc_devinfo *devinfo,
1266 struct ahc_syncrate *syncrate, 1228 const struct ahc_syncrate *syncrate,
1267 u_int period, u_int offset, 1229 u_int period, u_int offset,
1268 u_int ppr_options, 1230 u_int ppr_options,
1269 u_int type, int paused); 1231 u_int type, int paused);
@@ -1305,11 +1267,10 @@ extern uint32_t ahc_debug;
1305#define AHC_SHOW_MASKED_ERRORS 0x1000 1267#define AHC_SHOW_MASKED_ERRORS 0x1000
1306#define AHC_DEBUG_SEQUENCER 0x2000 1268#define AHC_DEBUG_SEQUENCER 0x2000
1307#endif 1269#endif
1308void ahc_print_scb(struct scb *scb);
1309void ahc_print_devinfo(struct ahc_softc *ahc, 1270void ahc_print_devinfo(struct ahc_softc *ahc,
1310 struct ahc_devinfo *dev); 1271 struct ahc_devinfo *dev);
1311void ahc_dump_card_state(struct ahc_softc *ahc); 1272void ahc_dump_card_state(struct ahc_softc *ahc);
1312int ahc_print_register(ahc_reg_parse_entry_t *table, 1273int ahc_print_register(const ahc_reg_parse_entry_t *table,
1313 u_int num_entries, 1274 u_int num_entries,
1314 const char *name, 1275 const char *name,
1315 u_int address, 1276 u_int address,
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index e196d83b93c7..0d2f763c3427 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -238,6 +238,7 @@ register SXFRCTL2 {
238register OPTIONMODE { 238register OPTIONMODE {
239 address 0x008 239 address 0x008
240 access_mode RW 240 access_mode RW
241 count 2
241 field AUTORATEEN 0x80 242 field AUTORATEEN 0x80
242 field AUTOACKEN 0x40 243 field AUTOACKEN 0x40
243 field ATNMGMNTEN 0x20 244 field ATNMGMNTEN 0x20
@@ -254,6 +255,7 @@ register TARGCRCCNT {
254 address 0x00a 255 address 0x00a
255 size 2 256 size 2
256 access_mode RW 257 access_mode RW
258 count 2
257} 259}
258 260
259/* 261/*
@@ -344,6 +346,7 @@ register SSTAT2 {
344register SSTAT3 { 346register SSTAT3 {
345 address 0x00e 347 address 0x00e
346 access_mode RO 348 access_mode RO
349 count 2
347 mask SCSICNT 0xf0 350 mask SCSICNT 0xf0
348 mask OFFCNT 0x0f 351 mask OFFCNT 0x0f
349 mask U2OFFCNT 0x7f 352 mask U2OFFCNT 0x7f
@@ -367,6 +370,7 @@ register SCSIID_ULTRA2 {
367register SIMODE0 { 370register SIMODE0 {
368 address 0x010 371 address 0x010
369 access_mode RW 372 access_mode RW
373 count 2
370 field ENSELDO 0x40 374 field ENSELDO 0x40
371 field ENSELDI 0x20 375 field ENSELDI 0x20
372 field ENSELINGO 0x10 376 field ENSELINGO 0x10
@@ -429,6 +433,7 @@ register SHADDR {
429register SELTIMER { 433register SELTIMER {
430 address 0x018 434 address 0x018
431 access_mode RW 435 access_mode RW
436 count 1
432 field STAGE6 0x20 437 field STAGE6 0x20
433 field STAGE5 0x10 438 field STAGE5 0x10
434 field STAGE4 0x08 439 field STAGE4 0x08
@@ -467,6 +472,7 @@ register TARGID {
467 address 0x01b 472 address 0x01b
468 size 2 473 size 2
469 access_mode RW 474 access_mode RW
475 count 14
470} 476}
471 477
472/* 478/*
@@ -480,6 +486,7 @@ register TARGID {
480register SPIOCAP { 486register SPIOCAP {
481 address 0x01b 487 address 0x01b
482 access_mode RW 488 access_mode RW
489 count 10
483 field SOFT1 0x80 490 field SOFT1 0x80
484 field SOFT0 0x40 491 field SOFT0 0x40
485 field SOFTCMDEN 0x20 492 field SOFTCMDEN 0x20
@@ -492,6 +499,7 @@ register SPIOCAP {
492 499
493register BRDCTL { 500register BRDCTL {
494 address 0x01d 501 address 0x01d
502 count 11
495 field BRDDAT7 0x80 503 field BRDDAT7 0x80
496 field BRDDAT6 0x40 504 field BRDDAT6 0x40
497 field BRDDAT5 0x20 505 field BRDDAT5 0x20
@@ -534,6 +542,7 @@ register BRDCTL {
534 */ 542 */
535register SEECTL { 543register SEECTL {
536 address 0x01e 544 address 0x01e
545 count 11
537 field EXTARBACK 0x80 546 field EXTARBACK 0x80
538 field EXTARBREQ 0x40 547 field EXTARBREQ 0x40
539 field SEEMS 0x20 548 field SEEMS 0x20
@@ -570,6 +579,7 @@ register SBLKCTL {
570register SEQCTL { 579register SEQCTL {
571 address 0x060 580 address 0x060
572 access_mode RW 581 access_mode RW
582 count 15
573 field PERRORDIS 0x80 583 field PERRORDIS 0x80
574 field PAUSEDIS 0x40 584 field PAUSEDIS 0x40
575 field FAILDIS 0x20 585 field FAILDIS 0x20
@@ -590,6 +600,7 @@ register SEQCTL {
590register SEQRAM { 600register SEQRAM {
591 address 0x061 601 address 0x061
592 access_mode RW 602 access_mode RW
603 count 2
593} 604}
594 605
595/* 606/*
@@ -604,6 +615,7 @@ register SEQADDR0 {
604register SEQADDR1 { 615register SEQADDR1 {
605 address 0x063 616 address 0x063
606 access_mode RW 617 access_mode RW
618 count 8
607 mask SEQADDR1_MASK 0x01 619 mask SEQADDR1_MASK 0x01
608} 620}
609 621
@@ -649,6 +661,7 @@ register NONE {
649register FLAGS { 661register FLAGS {
650 address 0x06b 662 address 0x06b
651 access_mode RO 663 access_mode RO
664 count 18
652 field ZERO 0x02 665 field ZERO 0x02
653 field CARRY 0x01 666 field CARRY 0x01
654} 667}
@@ -671,6 +684,7 @@ register FUNCTION1 {
671register STACK { 684register STACK {
672 address 0x06f 685 address 0x06f
673 access_mode RO 686 access_mode RO
687 count 5
674} 688}
675 689
676const STACK_SIZE 4 690const STACK_SIZE 4
@@ -692,6 +706,7 @@ register BCTL {
692register DSCOMMAND0 { 706register DSCOMMAND0 {
693 address 0x084 707 address 0x084
694 access_mode RW 708 access_mode RW
709 count 7
695 field CACHETHEN 0x80 /* Cache Threshold enable */ 710 field CACHETHEN 0x80 /* Cache Threshold enable */
696 field DPARCKEN 0x40 /* Data Parity Check Enable */ 711 field DPARCKEN 0x40 /* Data Parity Check Enable */
697 field MPARCKEN 0x20 /* Memory Parity Check Enable */ 712 field MPARCKEN 0x20 /* Memory Parity Check Enable */
@@ -717,6 +732,7 @@ register DSCOMMAND1 {
717register BUSTIME { 732register BUSTIME {
718 address 0x085 733 address 0x085
719 access_mode RW 734 access_mode RW
735 count 2
720 mask BOFF 0xf0 736 mask BOFF 0xf0
721 mask BON 0x0f 737 mask BON 0x0f
722} 738}
@@ -727,6 +743,7 @@ register BUSTIME {
727register BUSSPD { 743register BUSSPD {
728 address 0x086 744 address 0x086
729 access_mode RW 745 access_mode RW
746 count 2
730 mask DFTHRSH 0xc0 747 mask DFTHRSH 0xc0
731 mask STBOFF 0x38 748 mask STBOFF 0x38
732 mask STBON 0x07 749 mask STBON 0x07
@@ -737,6 +754,7 @@ register BUSSPD {
737/* aic7850/55/60/70/80/95 only */ 754/* aic7850/55/60/70/80/95 only */
738register DSPCISTATUS { 755register DSPCISTATUS {
739 address 0x086 756 address 0x086
757 count 4
740 mask DFTHRSH_100 0xc0 758 mask DFTHRSH_100 0xc0
741} 759}
742 760
@@ -758,6 +776,7 @@ const SEQ_MAILBOX_SHIFT 0
758register HCNTRL { 776register HCNTRL {
759 address 0x087 777 address 0x087
760 access_mode RW 778 access_mode RW
779 count 14
761 field POWRDN 0x40 780 field POWRDN 0x40
762 field SWINT 0x10 781 field SWINT 0x10
763 field IRQMS 0x08 782 field IRQMS 0x08
@@ -869,6 +888,7 @@ register INTSTAT {
869register ERROR { 888register ERROR {
870 address 0x092 889 address 0x092
871 access_mode RO 890 access_mode RO
891 count 26
872 field CIOPARERR 0x80 /* Ultra2 only */ 892 field CIOPARERR 0x80 /* Ultra2 only */
873 field PCIERRSTAT 0x40 /* PCI only */ 893 field PCIERRSTAT 0x40 /* PCI only */
874 field MPARERR 0x20 /* PCI only */ 894 field MPARERR 0x20 /* PCI only */
@@ -885,6 +905,7 @@ register ERROR {
885register CLRINT { 905register CLRINT {
886 address 0x092 906 address 0x092
887 access_mode WO 907 access_mode WO
908 count 24
888 field CLRPARERR 0x10 /* PCI only */ 909 field CLRPARERR 0x10 /* PCI only */
889 field CLRBRKADRINT 0x08 910 field CLRBRKADRINT 0x08
890 field CLRSCSIINT 0x04 911 field CLRSCSIINT 0x04
@@ -943,6 +964,7 @@ register DFDAT {
943register SCBCNT { 964register SCBCNT {
944 address 0x09a 965 address 0x09a
945 access_mode RW 966 access_mode RW
967 count 1
946 field SCBAUTO 0x80 968 field SCBAUTO 0x80
947 mask SCBCNT_MASK 0x1f 969 mask SCBCNT_MASK 0x1f
948} 970}
@@ -954,6 +976,7 @@ register SCBCNT {
954register QINFIFO { 976register QINFIFO {
955 address 0x09b 977 address 0x09b
956 access_mode RW 978 access_mode RW
979 count 12
957} 980}
958 981
959/* 982/*
@@ -972,11 +995,13 @@ register QINCNT {
972register QOUTFIFO { 995register QOUTFIFO {
973 address 0x09d 996 address 0x09d
974 access_mode WO 997 access_mode WO
998 count 7
975} 999}
976 1000
977register CRCCONTROL1 { 1001register CRCCONTROL1 {
978 address 0x09d 1002 address 0x09d
979 access_mode RW 1003 access_mode RW
1004 count 3
980 field CRCONSEEN 0x80 1005 field CRCONSEEN 0x80
981 field CRCVALCHKEN 0x40 1006 field CRCVALCHKEN 0x40
982 field CRCENDCHKEN 0x20 1007 field CRCENDCHKEN 0x20
@@ -1013,6 +1038,7 @@ register SCSIPHASE {
1013register SFUNCT { 1038register SFUNCT {
1014 address 0x09f 1039 address 0x09f
1015 access_mode RW 1040 access_mode RW
1041 count 4
1016 field ALT_MODE 0x80 1042 field ALT_MODE 0x80
1017} 1043}
1018 1044
@@ -1095,6 +1121,7 @@ scb {
1095 } 1121 }
1096 SCB_SCSIOFFSET { 1122 SCB_SCSIOFFSET {
1097 size 1 1123 size 1
1124 count 1
1098 } 1125 }
1099 SCB_NEXT { 1126 SCB_NEXT {
1100 size 1 1127 size 1
@@ -1118,6 +1145,7 @@ const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
1118register SEECTL_2840 { 1145register SEECTL_2840 {
1119 address 0x0c0 1146 address 0x0c0
1120 access_mode RW 1147 access_mode RW
1148 count 2
1121 field CS_2840 0x04 1149 field CS_2840 0x04
1122 field CK_2840 0x02 1150 field CK_2840 0x02
1123 field DO_2840 0x01 1151 field DO_2840 0x01
@@ -1126,6 +1154,7 @@ register SEECTL_2840 {
1126register STATUS_2840 { 1154register STATUS_2840 {
1127 address 0x0c1 1155 address 0x0c1
1128 access_mode RW 1156 access_mode RW
1157 count 4
1129 field EEPROM_TF 0x80 1158 field EEPROM_TF 0x80
1130 mask BIOS_SEL 0x60 1159 mask BIOS_SEL 0x60
1131 mask ADSEL 0x1e 1160 mask ADSEL 0x1e
@@ -1161,6 +1190,7 @@ register CCSGCTL {
1161 1190
1162register CCSCBCNT { 1191register CCSCBCNT {
1163 address 0xEF 1192 address 0xEF
1193 count 1
1164} 1194}
1165 1195
1166register CCSCBCTL { 1196register CCSCBCTL {
@@ -1187,6 +1217,7 @@ register CCSCBRAM {
1187register SCBBADDR { 1217register SCBBADDR {
1188 address 0x0F0 1218 address 0x0F0
1189 access_mode RW 1219 access_mode RW
1220 count 3
1190} 1221}
1191 1222
1192register CCSCBPTR { 1223register CCSCBPTR {
@@ -1195,6 +1226,7 @@ register CCSCBPTR {
1195 1226
1196register HNSCB_QOFF { 1227register HNSCB_QOFF {
1197 address 0x0F4 1228 address 0x0F4
1229 count 4
1198} 1230}
1199 1231
1200register SNSCB_QOFF { 1232register SNSCB_QOFF {
@@ -1234,6 +1266,7 @@ register DFF_THRSH {
1234 mask WR_DFTHRSH_85 0x50 1266 mask WR_DFTHRSH_85 0x50
1235 mask WR_DFTHRSH_90 0x60 1267 mask WR_DFTHRSH_90 0x60
1236 mask WR_DFTHRSH_MAX 0x70 1268 mask WR_DFTHRSH_MAX 0x70
1269 count 4
1237} 1270}
1238 1271
1239register SG_CACHE_PRE { 1272register SG_CACHE_PRE {
@@ -1287,6 +1320,7 @@ scratch_ram {
1287 ULTRA_ENB { 1320 ULTRA_ENB {
1288 alias CMDSIZE_TABLE 1321 alias CMDSIZE_TABLE
1289 size 2 1322 size 2
1323 count 2
1290 } 1324 }
1291 /* 1325 /*
1292 * Bit vector of targets that have disconnection disabled as set by 1326 * Bit vector of targets that have disconnection disabled as set by
@@ -1296,6 +1330,7 @@ scratch_ram {
1296 */ 1330 */
1297 DISC_DSB { 1331 DISC_DSB {
1298 size 2 1332 size 2
1333 count 6
1299 } 1334 }
1300 CMDSIZE_TABLE_TAIL { 1335 CMDSIZE_TABLE_TAIL {
1301 size 4 1336 size 4
@@ -1323,6 +1358,7 @@ scratch_ram {
1323 /* Parameters for DMA Logic */ 1358 /* Parameters for DMA Logic */
1324 DMAPARAMS { 1359 DMAPARAMS {
1325 size 1 1360 size 1
1361 count 12
1326 field PRELOADEN 0x80 1362 field PRELOADEN 0x80
1327 field WIDEODD 0x40 1363 field WIDEODD 0x40
1328 field SCSIEN 0x20 1364 field SCSIEN 0x20
@@ -1436,11 +1472,12 @@ scratch_ram {
1436 KERNEL_TQINPOS { 1472 KERNEL_TQINPOS {
1437 size 1 1473 size 1
1438 } 1474 }
1439 TQINPOS { 1475 TQINPOS {
1440 size 1 1476 size 1
1441 } 1477 }
1442 ARG_1 { 1478 ARG_1 {
1443 size 1 1479 size 1
1480 count 1
1444 mask SEND_MSG 0x80 1481 mask SEND_MSG 0x80
1445 mask SEND_SENSE 0x40 1482 mask SEND_SENSE 0x40
1446 mask SEND_REJ 0x20 1483 mask SEND_REJ 0x20
@@ -1495,6 +1532,7 @@ scratch_ram {
1495 size 1 1532 size 1
1496 field HA_274_EXTENDED_TRANS 0x01 1533 field HA_274_EXTENDED_TRANS 0x01
1497 alias INITIATOR_TAG 1534 alias INITIATOR_TAG
1535 count 1
1498 } 1536 }
1499 1537
1500 SEQ_FLAGS2 { 1538 SEQ_FLAGS2 {
@@ -1518,6 +1556,7 @@ scratch_ram {
1518 */ 1556 */
1519 SCSICONF { 1557 SCSICONF {
1520 size 1 1558 size 1
1559 count 12
1521 field TERM_ENB 0x80 1560 field TERM_ENB 0x80
1522 field RESET_SCSI 0x40 1561 field RESET_SCSI 0x40
1523 field ENSPCHK 0x20 1562 field ENSPCHK 0x20
@@ -1527,16 +1566,19 @@ scratch_ram {
1527 INTDEF { 1566 INTDEF {
1528 address 0x05c 1567 address 0x05c
1529 size 1 1568 size 1
1569 count 1
1530 field EDGE_TRIG 0x80 1570 field EDGE_TRIG 0x80
1531 mask VECTOR 0x0f 1571 mask VECTOR 0x0f
1532 } 1572 }
1533 HOSTCONF { 1573 HOSTCONF {
1534 address 0x05d 1574 address 0x05d
1535 size 1 1575 size 1
1576 count 1
1536 } 1577 }
1537 HA_274_BIOSCTRL { 1578 HA_274_BIOSCTRL {
1538 address 0x05f 1579 address 0x05f
1539 size 1 1580 size 1
1581 count 1
1540 mask BIOSMODE 0x30 1582 mask BIOSMODE 0x30
1541 mask BIOSDISABLED 0x30 1583 mask BIOSDISABLED 0x30
1542 field CHANNEL_B_PRIMARY 0x08 1584 field CHANNEL_B_PRIMARY 0x08
@@ -1552,6 +1594,7 @@ scratch_ram {
1552 */ 1594 */
1553 TARG_OFFSET { 1595 TARG_OFFSET {
1554 size 16 1596 size 16
1597 count 1
1555 } 1598 }
1556} 1599}
1557 1600
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 3cb07e114e89..dd11999b77b6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -84,16 +84,16 @@ struct seeprom_cmd {
84}; 84};
85 85
86/* Short opcodes for the c46 */ 86/* Short opcodes for the c46 */
87static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 87static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
88static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 88static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
89 89
90/* Long opcodes for the C56/C66 */ 90/* Long opcodes for the C56/C66 */
91static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 91static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
92static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 92static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
93 93
94/* Common opcodes */ 94/* Common opcodes */
95static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; 95static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
96static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; 96static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
97 97
98/* 98/*
99 * Wait for the SEERDY to go high; about 800 ns. 99 * Wait for the SEERDY to go high; about 800 ns.
@@ -108,7 +108,7 @@ static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
108 * Send a START condition and the given command 108 * Send a START condition and the given command
109 */ 109 */
110static void 110static void
111send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd) 111send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
112{ 112{
113 uint8_t temp; 113 uint8_t temp;
114 int i = 0; 114 int i = 0;
@@ -227,7 +227,7 @@ int
227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, 227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
228 u_int start_addr, u_int count) 228 u_int start_addr, u_int count)
229{ 229{
230 struct seeprom_cmd *ewen, *ewds; 230 const struct seeprom_cmd *ewen, *ewds;
231 uint16_t v; 231 uint16_t v;
232 uint8_t temp; 232 uint8_t temp;
233 int i, k; 233 int i, k;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64e62ce59c15..0ae2b4605d09 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -51,8 +51,7 @@
51#endif 51#endif
52 52
53/***************************** Lookup Tables **********************************/ 53/***************************** Lookup Tables **********************************/
54char *ahc_chip_names[] = 54static const char *const ahc_chip_names[] = {
55{
56 "NONE", 55 "NONE",
57 "aic7770", 56 "aic7770",
58 "aic7850", 57 "aic7850",
@@ -75,10 +74,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
75 */ 74 */
76struct ahc_hard_error_entry { 75struct ahc_hard_error_entry {
77 uint8_t errno; 76 uint8_t errno;
78 char *errmesg; 77 const char *errmesg;
79}; 78};
80 79
81static struct ahc_hard_error_entry ahc_hard_errors[] = { 80static const struct ahc_hard_error_entry ahc_hard_errors[] = {
82 { ILLHADDR, "Illegal Host Access" }, 81 { ILLHADDR, "Illegal Host Access" },
83 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 82 { ILLSADDR, "Illegal Sequencer Address referrenced" },
84 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 83 { ILLOPCODE, "Illegal Opcode in sequencer program" },
@@ -90,7 +89,7 @@ static struct ahc_hard_error_entry ahc_hard_errors[] = {
90}; 89};
91static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); 90static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
92 91
93static struct ahc_phase_table_entry ahc_phase_table[] = 92static const struct ahc_phase_table_entry ahc_phase_table[] =
94{ 93{
95 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 94 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
96 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 95 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -115,7 +114,7 @@ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
115 * Provides a mapping of tranfer periods in ns to the proper value to 114 * Provides a mapping of tranfer periods in ns to the proper value to
116 * stick in the scsixfer reg. 115 * stick in the scsixfer reg.
117 */ 116 */
118static struct ahc_syncrate ahc_syncrates[] = 117static const struct ahc_syncrate ahc_syncrates[] =
119{ 118{
120 /* ultra2 fast/ultra period rate */ 119 /* ultra2 fast/ultra period rate */
121 { 0x42, 0x000, 9, "80.0" }, 120 { 0x42, 0x000, 9, "80.0" },
@@ -148,7 +147,7 @@ static struct ahc_tmode_tstate*
148static void ahc_free_tstate(struct ahc_softc *ahc, 147static void ahc_free_tstate(struct ahc_softc *ahc,
149 u_int scsi_id, char channel, int force); 148 u_int scsi_id, char channel, int force);
150#endif 149#endif
151static struct ahc_syncrate* 150static const struct ahc_syncrate*
152 ahc_devlimited_syncrate(struct ahc_softc *ahc, 151 ahc_devlimited_syncrate(struct ahc_softc *ahc,
153 struct ahc_initiator_tinfo *, 152 struct ahc_initiator_tinfo *,
154 u_int *period, 153 u_int *period,
@@ -204,9 +203,9 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
204#endif 203#endif
205 204
206static bus_dmamap_callback_t ahc_dmamap_cb; 205static bus_dmamap_callback_t ahc_dmamap_cb;
207static void ahc_build_free_scb_list(struct ahc_softc *ahc); 206static void ahc_build_free_scb_list(struct ahc_softc *ahc);
208static int ahc_init_scbdata(struct ahc_softc *ahc); 207static int ahc_init_scbdata(struct ahc_softc *ahc);
209static void ahc_fini_scbdata(struct ahc_softc *ahc); 208static void ahc_fini_scbdata(struct ahc_softc *ahc);
210static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 209static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
211 struct scb *prev_scb, 210 struct scb *prev_scb,
212 struct scb *scb); 211 struct scb *scb);
@@ -222,7 +221,7 @@ static void ahc_dumpseq(struct ahc_softc *ahc);
222#endif 221#endif
223static int ahc_loadseq(struct ahc_softc *ahc); 222static int ahc_loadseq(struct ahc_softc *ahc);
224static int ahc_check_patch(struct ahc_softc *ahc, 223static int ahc_check_patch(struct ahc_softc *ahc,
225 struct patch **start_patch, 224 const struct patch **start_patch,
226 u_int start_instr, u_int *skip_addr); 225 u_int start_instr, u_int *skip_addr);
227static void ahc_download_instr(struct ahc_softc *ahc, 226static void ahc_download_instr(struct ahc_softc *ahc,
228 u_int instrptr, uint8_t *dconsts); 227 u_int instrptr, uint8_t *dconsts);
@@ -237,11 +236,582 @@ static void ahc_update_scsiid(struct ahc_softc *ahc,
237static int ahc_handle_target_cmd(struct ahc_softc *ahc, 236static int ahc_handle_target_cmd(struct ahc_softc *ahc,
238 struct target_cmd *cmd); 237 struct target_cmd *cmd);
239#endif 238#endif
239
240static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
241static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
242static void ahc_busy_tcl(struct ahc_softc *ahc,
243 u_int tcl, u_int busyid);
244
245/************************** SCB and SCB queue management **********************/
246static void ahc_run_untagged_queues(struct ahc_softc *ahc);
247static void ahc_run_untagged_queue(struct ahc_softc *ahc,
248 struct scb_tailq *queue);
249
250/****************************** Initialization ********************************/
251static void ahc_alloc_scbs(struct ahc_softc *ahc);
252static void ahc_shutdown(void *arg);
253
254/*************************** Interrupt Services *******************************/
255static void ahc_clear_intstat(struct ahc_softc *ahc);
256static void ahc_run_qoutfifo(struct ahc_softc *ahc);
257#ifdef AHC_TARGET_MODE
258static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
259#endif
260static void ahc_handle_brkadrint(struct ahc_softc *ahc);
261static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
262static void ahc_handle_scsiint(struct ahc_softc *ahc,
263 u_int intstat);
264static void ahc_clear_critical_section(struct ahc_softc *ahc);
265
266/***************************** Error Recovery *********************************/
267static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
268static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
269 char channel, int lun, u_int tag,
270 role_t role, uint32_t status);
271static void ahc_calc_residual(struct ahc_softc *ahc,
272 struct scb *scb);
273
274/*********************** Untagged Transaction Routines ************************/
275static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
276static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
277
278/*
279 * Block our completion routine from starting the next untagged
280 * transaction for this target or target lun.
281 */
282static inline void
283ahc_freeze_untagged_queues(struct ahc_softc *ahc)
284{
285 if ((ahc->flags & AHC_SCB_BTT) == 0)
286 ahc->untagged_queue_lock++;
287}
288
289/*
290 * Allow the next untagged transaction for this target or target lun
291 * to be executed. We use a counting semaphore to allow the lock
292 * to be acquired recursively. Once the count drops to zero, the
293 * transaction queues will be run.
294 */
295static inline void
296ahc_release_untagged_queues(struct ahc_softc *ahc)
297{
298 if ((ahc->flags & AHC_SCB_BTT) == 0) {
299 ahc->untagged_queue_lock--;
300 if (ahc->untagged_queue_lock == 0)
301 ahc_run_untagged_queues(ahc);
302 }
303}
304
240/************************* Sequencer Execution Control ************************/ 305/************************* Sequencer Execution Control ************************/
241/* 306/*
242 * Restart the sequencer program from address zero 307 * Work around any chip bugs related to halting sequencer execution.
308 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
309 * reading a register that will set this signal and deassert it.
310 * Without this workaround, if the chip is paused, by an interrupt or
311 * manual pause while accessing scb ram, accesses to certain registers
312 * will hang the system (infinite pci retries).
313 */
314static void
315ahc_pause_bug_fix(struct ahc_softc *ahc)
316{
317 if ((ahc->features & AHC_ULTRA2) != 0)
318 (void)ahc_inb(ahc, CCSCBCTL);
319}
320
321/*
322 * Determine whether the sequencer has halted code execution.
323 * Returns non-zero status if the sequencer is stopped.
324 */
325int
326ahc_is_paused(struct ahc_softc *ahc)
327{
328 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
329}
330
331/*
332 * Request that the sequencer stop and wait, indefinitely, for it
333 * to stop. The sequencer will only acknowledge that it is paused
334 * once it has reached an instruction boundary and PAUSEDIS is
335 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
336 * for critical sections.
337 */
338void
339ahc_pause(struct ahc_softc *ahc)
340{
341 ahc_outb(ahc, HCNTRL, ahc->pause);
342
343 /*
344 * Since the sequencer can disable pausing in a critical section, we
345 * must loop until it actually stops.
346 */
347 while (ahc_is_paused(ahc) == 0)
348 ;
349
350 ahc_pause_bug_fix(ahc);
351}
352
353/*
354 * Allow the sequencer to continue program execution.
355 * We check here to ensure that no additional interrupt
356 * sources that would cause the sequencer to halt have been
357 * asserted. If, for example, a SCSI bus reset is detected
358 * while we are fielding a different, pausing, interrupt type,
359 * we don't want to release the sequencer before going back
360 * into our interrupt handler and dealing with this new
361 * condition.
362 */
363void
364ahc_unpause(struct ahc_softc *ahc)
365{
366 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
367 ahc_outb(ahc, HCNTRL, ahc->unpause);
368}
369
370/************************** Memory mapping routines ***************************/
371static struct ahc_dma_seg *
372ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
373{
374 int sg_index;
375
376 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
377 /* sg_list_phys points to entry 1, not 0 */
378 sg_index++;
379
380 return (&scb->sg_list[sg_index]);
381}
382
383static uint32_t
384ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
385{
386 int sg_index;
387
388 /* sg_list_phys points to entry 1, not 0 */
389 sg_index = sg - &scb->sg_list[1];
390
391 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
392}
393
394static uint32_t
395ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
396{
397 return (ahc->scb_data->hscb_busaddr
398 + (sizeof(struct hardware_scb) * index));
399}
400
401static void
402ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
403{
404 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
405 ahc->scb_data->hscb_dmamap,
406 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
407 /*len*/sizeof(*scb->hscb), op);
408}
409
410void
411ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
412{
413 if (scb->sg_count == 0)
414 return;
415
416 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
417 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
418 * sizeof(struct ahc_dma_seg),
419 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
420}
421
422#ifdef AHC_TARGET_MODE
423static uint32_t
424ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
425{
426 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
427}
428#endif
429
430/*********************** Miscelaneous Support Functions ***********************/
431/*
432 * Determine whether the sequencer reported a residual
433 * for this SCB/transaction.
434 */
435static void
436ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
437{
438 uint32_t sgptr;
439
440 sgptr = ahc_le32toh(scb->hscb->sgptr);
441 if ((sgptr & SG_RESID_VALID) != 0)
442 ahc_calc_residual(ahc, scb);
443}
444
445/*
446 * Return pointers to the transfer negotiation information
447 * for the specified our_id/remote_id pair.
448 */
449struct ahc_initiator_tinfo *
450ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
451 u_int remote_id, struct ahc_tmode_tstate **tstate)
452{
453 /*
454 * Transfer data structures are stored from the perspective
455 * of the target role. Since the parameters for a connection
456 * in the initiator role to a given target are the same as
457 * when the roles are reversed, we pretend we are the target.
458 */
459 if (channel == 'B')
460 our_id += 8;
461 *tstate = ahc->enabled_targets[our_id];
462 return (&(*tstate)->transinfo[remote_id]);
463}
464
465uint16_t
466ahc_inw(struct ahc_softc *ahc, u_int port)
467{
468 uint16_t r = ahc_inb(ahc, port+1) << 8;
469 return r | ahc_inb(ahc, port);
470}
471
472void
473ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
474{
475 ahc_outb(ahc, port, value & 0xFF);
476 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
477}
478
479uint32_t
480ahc_inl(struct ahc_softc *ahc, u_int port)
481{
482 return ((ahc_inb(ahc, port))
483 | (ahc_inb(ahc, port+1) << 8)
484 | (ahc_inb(ahc, port+2) << 16)
485 | (ahc_inb(ahc, port+3) << 24));
486}
487
488void
489ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
490{
491 ahc_outb(ahc, port, (value) & 0xFF);
492 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
493 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
494 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
495}
496
497uint64_t
498ahc_inq(struct ahc_softc *ahc, u_int port)
499{
500 return ((ahc_inb(ahc, port))
501 | (ahc_inb(ahc, port+1) << 8)
502 | (ahc_inb(ahc, port+2) << 16)
503 | (ahc_inb(ahc, port+3) << 24)
504 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
505 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
506 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
507 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
508}
509
510void
511ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
512{
513 ahc_outb(ahc, port, value & 0xFF);
514 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
515 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
516 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
517 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
518 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
519 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
520 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
521}
522
523/*
524 * Get a free scb. If there are none, see if we can allocate a new SCB.
525 */
526struct scb *
527ahc_get_scb(struct ahc_softc *ahc)
528{
529 struct scb *scb;
530
531 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
532 ahc_alloc_scbs(ahc);
533 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
534 if (scb == NULL)
535 return (NULL);
536 }
537 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
538 return (scb);
539}
540
541/*
542 * Return an SCB resource to the free list.
543 */
544void
545ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
546{
547 struct hardware_scb *hscb;
548
549 hscb = scb->hscb;
550 /* Clean up for the next user */
551 ahc->scb_data->scbindex[hscb->tag] = NULL;
552 scb->flags = SCB_FREE;
553 hscb->control = 0;
554
555 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
556
557 /* Notify the OSM that a resource is now available. */
558 ahc_platform_scb_free(ahc, scb);
559}
560
561struct scb *
562ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
563{
564 struct scb* scb;
565
566 scb = ahc->scb_data->scbindex[tag];
567 if (scb != NULL)
568 ahc_sync_scb(ahc, scb,
569 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
570 return (scb);
571}
572
573static void
574ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
575{
576 struct hardware_scb *q_hscb;
577 u_int saved_tag;
578
579 /*
580 * Our queuing method is a bit tricky. The card
581 * knows in advance which HSCB to download, and we
582 * can't disappoint it. To achieve this, the next
583 * SCB to download is saved off in ahc->next_queued_scb.
584 * When we are called to queue "an arbitrary scb",
585 * we copy the contents of the incoming HSCB to the one
586 * the sequencer knows about, swap HSCB pointers and
587 * finally assign the SCB to the tag indexed location
588 * in the scb_array. This makes sure that we can still
589 * locate the correct SCB by SCB_TAG.
590 */
591 q_hscb = ahc->next_queued_scb->hscb;
592 saved_tag = q_hscb->tag;
593 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
594 if ((scb->flags & SCB_CDB32_PTR) != 0) {
595 q_hscb->shared_data.cdb_ptr =
596 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
597 + offsetof(struct hardware_scb, cdb32));
598 }
599 q_hscb->tag = saved_tag;
600 q_hscb->next = scb->hscb->tag;
601
602 /* Now swap HSCB pointers. */
603 ahc->next_queued_scb->hscb = scb->hscb;
604 scb->hscb = q_hscb;
605
606 /* Now define the mapping from tag to SCB in the scbindex */
607 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
608}
609
610/*
611 * Tell the sequencer about a new transaction to execute.
243 */ 612 */
244void 613void
614ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
615{
616 ahc_swap_with_next_hscb(ahc, scb);
617
618 if (scb->hscb->tag == SCB_LIST_NULL
619 || scb->hscb->next == SCB_LIST_NULL)
620 panic("Attempt to queue invalid SCB tag %x:%x\n",
621 scb->hscb->tag, scb->hscb->next);
622
623 /*
624 * Setup data "oddness".
625 */
626 scb->hscb->lun &= LID;
627 if (ahc_get_transfer_length(scb) & 0x1)
628 scb->hscb->lun |= SCB_XFERLEN_ODD;
629
630 /*
631 * Keep a history of SCBs we've downloaded in the qinfifo.
632 */
633 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
634
635 /*
636 * Make sure our data is consistent from the
637 * perspective of the adapter.
638 */
639 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
640
641 /* Tell the adapter about the newly queued SCB */
642 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
643 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
644 } else {
645 if ((ahc->features & AHC_AUTOPAUSE) == 0)
646 ahc_pause(ahc);
647 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
648 if ((ahc->features & AHC_AUTOPAUSE) == 0)
649 ahc_unpause(ahc);
650 }
651}
652
653struct scsi_sense_data *
654ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
655{
656 int offset;
657
658 offset = scb - ahc->scb_data->scbarray;
659 return (&ahc->scb_data->sense[offset]);
660}
661
662static uint32_t
663ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
664{
665 int offset;
666
667 offset = scb - ahc->scb_data->scbarray;
668 return (ahc->scb_data->sense_busaddr
669 + (offset * sizeof(struct scsi_sense_data)));
670}
671
672/************************** Interrupt Processing ******************************/
673static void
674ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
675{
676 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
677 /*offset*/0, /*len*/256, op);
678}
679
680static void
681ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
682{
683#ifdef AHC_TARGET_MODE
684 if ((ahc->flags & AHC_TARGETROLE) != 0) {
685 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
686 ahc->shared_data_dmamap,
687 ahc_targetcmd_offset(ahc, 0),
688 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
689 op);
690 }
691#endif
692}
693
694/*
695 * See if the firmware has posted any completed commands
696 * into our in-core command complete fifos.
697 */
698#define AHC_RUN_QOUTFIFO 0x1
699#define AHC_RUN_TQINFIFO 0x2
700static u_int
701ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
702{
703 u_int retval;
704
705 retval = 0;
706 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
707 /*offset*/ahc->qoutfifonext, /*len*/1,
708 BUS_DMASYNC_POSTREAD);
709 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
710 retval |= AHC_RUN_QOUTFIFO;
711#ifdef AHC_TARGET_MODE
712 if ((ahc->flags & AHC_TARGETROLE) != 0
713 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
714 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
715 ahc->shared_data_dmamap,
716 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
717 /*len*/sizeof(struct target_cmd),
718 BUS_DMASYNC_POSTREAD);
719 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
720 retval |= AHC_RUN_TQINFIFO;
721 }
722#endif
723 return (retval);
724}
725
726/*
727 * Catch an interrupt from the adapter
728 */
729int
730ahc_intr(struct ahc_softc *ahc)
731{
732 u_int intstat;
733
734 if ((ahc->pause & INTEN) == 0) {
735 /*
736 * Our interrupt is not enabled on the chip
737 * and may be disabled for re-entrancy reasons,
738 * so just return. This is likely just a shared
739 * interrupt.
740 */
741 return (0);
742 }
743 /*
744 * Instead of directly reading the interrupt status register,
745 * infer the cause of the interrupt by checking our in-core
746 * completion queues. This avoids a costly PCI bus read in
747 * most cases.
748 */
749 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
750 && (ahc_check_cmdcmpltqueues(ahc) != 0))
751 intstat = CMDCMPLT;
752 else {
753 intstat = ahc_inb(ahc, INTSTAT);
754 }
755
756 if ((intstat & INT_PEND) == 0) {
757#if AHC_PCI_CONFIG > 0
758 if (ahc->unsolicited_ints > 500) {
759 ahc->unsolicited_ints = 0;
760 if ((ahc->chip & AHC_PCI) != 0
761 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
762 ahc->bus_intr(ahc);
763 }
764#endif
765 ahc->unsolicited_ints++;
766 return (0);
767 }
768 ahc->unsolicited_ints = 0;
769
770 if (intstat & CMDCMPLT) {
771 ahc_outb(ahc, CLRINT, CLRCMDINT);
772
773 /*
774 * Ensure that the chip sees that we've cleared
775 * this interrupt before we walk the output fifo.
776 * Otherwise, we may, due to posted bus writes,
777 * clear the interrupt after we finish the scan,
778 * and after the sequencer has added new entries
779 * and asserted the interrupt again.
780 */
781 ahc_flush_device_writes(ahc);
782 ahc_run_qoutfifo(ahc);
783#ifdef AHC_TARGET_MODE
784 if ((ahc->flags & AHC_TARGETROLE) != 0)
785 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
786#endif
787 }
788
789 /*
790 * Handle statuses that may invalidate our cached
791 * copy of INTSTAT separately.
792 */
793 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
794 /* Hot eject. Do nothing */
795 } else if (intstat & BRKADRINT) {
796 ahc_handle_brkadrint(ahc);
797 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
798
799 ahc_pause_bug_fix(ahc);
800
801 if ((intstat & SEQINT) != 0)
802 ahc_handle_seqint(ahc, intstat);
803
804 if ((intstat & SCSIINT) != 0)
805 ahc_handle_scsiint(ahc, intstat);
806 }
807 return (1);
808}
809
810/************************* Sequencer Execution Control ************************/
811/*
812 * Restart the sequencer program from address zero
813 */
814static void
245ahc_restart(struct ahc_softc *ahc) 815ahc_restart(struct ahc_softc *ahc)
246{ 816{
247 817
@@ -302,7 +872,7 @@ ahc_restart(struct ahc_softc *ahc)
302} 872}
303 873
304/************************* Input/Output Queues ********************************/ 874/************************* Input/Output Queues ********************************/
305void 875static void
306ahc_run_qoutfifo(struct ahc_softc *ahc) 876ahc_run_qoutfifo(struct ahc_softc *ahc)
307{ 877{
308 struct scb *scb; 878 struct scb *scb;
@@ -349,7 +919,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
349 } 919 }
350} 920}
351 921
352void 922static void
353ahc_run_untagged_queues(struct ahc_softc *ahc) 923ahc_run_untagged_queues(struct ahc_softc *ahc)
354{ 924{
355 int i; 925 int i;
@@ -358,7 +928,7 @@ ahc_run_untagged_queues(struct ahc_softc *ahc)
358 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 928 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
359} 929}
360 930
361void 931static void
362ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 932ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
363{ 933{
364 struct scb *scb; 934 struct scb *scb;
@@ -374,7 +944,7 @@ ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
374} 944}
375 945
376/************************* Interrupt Handling *********************************/ 946/************************* Interrupt Handling *********************************/
377void 947static void
378ahc_handle_brkadrint(struct ahc_softc *ahc) 948ahc_handle_brkadrint(struct ahc_softc *ahc)
379{ 949{
380 /* 950 /*
@@ -403,7 +973,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
403 ahc_shutdown(ahc); 973 ahc_shutdown(ahc);
404} 974}
405 975
406void 976static void
407ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 977ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
408{ 978{
409 struct scb *scb; 979 struct scb *scb;
@@ -954,7 +1524,7 @@ unpause:
954 ahc_unpause(ahc); 1524 ahc_unpause(ahc);
955} 1525}
956 1526
957void 1527static void
958ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 1528ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
959{ 1529{
960 u_int scb_index; 1530 u_int scb_index;
@@ -1407,7 +1977,7 @@ ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1407} 1977}
1408 1978
1409#define AHC_MAX_STEPS 2000 1979#define AHC_MAX_STEPS 2000
1410void 1980static void
1411ahc_clear_critical_section(struct ahc_softc *ahc) 1981ahc_clear_critical_section(struct ahc_softc *ahc)
1412{ 1982{
1413 int stepping; 1983 int stepping;
@@ -1500,7 +2070,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
1500/* 2070/*
1501 * Clear any pending interrupt status. 2071 * Clear any pending interrupt status.
1502 */ 2072 */
1503void 2073static void
1504ahc_clear_intstat(struct ahc_softc *ahc) 2074ahc_clear_intstat(struct ahc_softc *ahc)
1505{ 2075{
1506 /* Clear any interrupt conditions this may have caused */ 2076 /* Clear any interrupt conditions this may have caused */
@@ -1519,7 +2089,8 @@ ahc_clear_intstat(struct ahc_softc *ahc)
1519uint32_t ahc_debug = AHC_DEBUG_OPTS; 2089uint32_t ahc_debug = AHC_DEBUG_OPTS;
1520#endif 2090#endif
1521 2091
1522void 2092#if 0 /* unused */
2093static void
1523ahc_print_scb(struct scb *scb) 2094ahc_print_scb(struct scb *scb)
1524{ 2095{
1525 int i; 2096 int i;
@@ -1551,6 +2122,7 @@ ahc_print_scb(struct scb *scb)
1551 } 2122 }
1552 } 2123 }
1553} 2124}
2125#endif
1554 2126
1555/************************* Transfer Negotiation *******************************/ 2127/************************* Transfer Negotiation *******************************/
1556/* 2128/*
@@ -1634,7 +2206,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1634 * by the capabilities of the bus connectivity of and sync settings for 2206 * by the capabilities of the bus connectivity of and sync settings for
1635 * the target. 2207 * the target.
1636 */ 2208 */
1637struct ahc_syncrate * 2209const struct ahc_syncrate *
1638ahc_devlimited_syncrate(struct ahc_softc *ahc, 2210ahc_devlimited_syncrate(struct ahc_softc *ahc,
1639 struct ahc_initiator_tinfo *tinfo, 2211 struct ahc_initiator_tinfo *tinfo,
1640 u_int *period, u_int *ppr_options, role_t role) 2212 u_int *period, u_int *ppr_options, role_t role)
@@ -1689,11 +2261,11 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
1689 * Return the period and offset that should be sent to the target 2261 * Return the period and offset that should be sent to the target
1690 * if this was the beginning of an SDTR. 2262 * if this was the beginning of an SDTR.
1691 */ 2263 */
1692struct ahc_syncrate * 2264const struct ahc_syncrate *
1693ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 2265ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1694 u_int *ppr_options, u_int maxsync) 2266 u_int *ppr_options, u_int maxsync)
1695{ 2267{
1696 struct ahc_syncrate *syncrate; 2268 const struct ahc_syncrate *syncrate;
1697 2269
1698 if ((ahc->features & AHC_DT) == 0) 2270 if ((ahc->features & AHC_DT) == 0)
1699 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 2271 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
@@ -1768,7 +2340,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1768u_int 2340u_int
1769ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 2341ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1770{ 2342{
1771 struct ahc_syncrate *syncrate; 2343 const struct ahc_syncrate *syncrate;
1772 2344
1773 if ((ahc->features & AHC_ULTRA2) != 0) 2345 if ((ahc->features & AHC_ULTRA2) != 0)
1774 scsirate &= SXFR_ULTRA2; 2346 scsirate &= SXFR_ULTRA2;
@@ -1806,10 +2378,10 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1806 * Truncate the given synchronous offset to a value the 2378 * Truncate the given synchronous offset to a value the
1807 * current adapter type and syncrate are capable of. 2379 * current adapter type and syncrate are capable of.
1808 */ 2380 */
1809void 2381static void
1810ahc_validate_offset(struct ahc_softc *ahc, 2382ahc_validate_offset(struct ahc_softc *ahc,
1811 struct ahc_initiator_tinfo *tinfo, 2383 struct ahc_initiator_tinfo *tinfo,
1812 struct ahc_syncrate *syncrate, 2384 const struct ahc_syncrate *syncrate,
1813 u_int *offset, int wide, role_t role) 2385 u_int *offset, int wide, role_t role)
1814{ 2386{
1815 u_int maxoffset; 2387 u_int maxoffset;
@@ -1838,7 +2410,7 @@ ahc_validate_offset(struct ahc_softc *ahc,
1838 * Truncate the given transfer width parameter to a value the 2410 * Truncate the given transfer width parameter to a value the
1839 * current adapter type is capable of. 2411 * current adapter type is capable of.
1840 */ 2412 */
1841void 2413static void
1842ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 2414ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1843 u_int *bus_width, role_t role) 2415 u_int *bus_width, role_t role)
1844{ 2416{
@@ -1913,7 +2485,7 @@ ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1913 */ 2485 */
1914void 2486void
1915ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2487ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1916 struct ahc_syncrate *syncrate, u_int period, 2488 const struct ahc_syncrate *syncrate, u_int period,
1917 u_int offset, u_int ppr_options, u_int type, int paused) 2489 u_int offset, u_int ppr_options, u_int type, int paused)
1918{ 2490{
1919 struct ahc_initiator_tinfo *tinfo; 2491 struct ahc_initiator_tinfo *tinfo;
@@ -2220,11 +2792,11 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2220 role); 2792 role);
2221} 2793}
2222 2794
2223struct ahc_phase_table_entry* 2795static const struct ahc_phase_table_entry*
2224ahc_lookup_phase_entry(int phase) 2796ahc_lookup_phase_entry(int phase)
2225{ 2797{
2226 struct ahc_phase_table_entry *entry; 2798 const struct ahc_phase_table_entry *entry;
2227 struct ahc_phase_table_entry *last_entry; 2799 const struct ahc_phase_table_entry *last_entry;
2228 2800
2229 /* 2801 /*
2230 * num_phases doesn't include the default entry which 2802 * num_phases doesn't include the default entry which
@@ -2390,7 +2962,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2390 */ 2962 */
2391 struct ahc_initiator_tinfo *tinfo; 2963 struct ahc_initiator_tinfo *tinfo;
2392 struct ahc_tmode_tstate *tstate; 2964 struct ahc_tmode_tstate *tstate;
2393 struct ahc_syncrate *rate; 2965 const struct ahc_syncrate *rate;
2394 int dowide; 2966 int dowide;
2395 int dosync; 2967 int dosync;
2396 int doppr; 2968 int doppr;
@@ -2655,7 +3227,7 @@ proto_violation_reset:
2655 */ 3227 */
2656static void 3228static void
2657ahc_handle_message_phase(struct ahc_softc *ahc) 3229ahc_handle_message_phase(struct ahc_softc *ahc)
2658{ 3230{
2659 struct ahc_devinfo devinfo; 3231 struct ahc_devinfo devinfo;
2660 u_int bus_phase; 3232 u_int bus_phase;
2661 int end_session; 3233 int end_session;
@@ -3056,7 +3628,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3056 switch (ahc->msgin_buf[2]) { 3628 switch (ahc->msgin_buf[2]) {
3057 case MSG_EXT_SDTR: 3629 case MSG_EXT_SDTR:
3058 { 3630 {
3059 struct ahc_syncrate *syncrate; 3631 const struct ahc_syncrate *syncrate;
3060 u_int period; 3632 u_int period;
3061 u_int ppr_options; 3633 u_int ppr_options;
3062 u_int offset; 3634 u_int offset;
@@ -3231,7 +3803,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3231 } 3803 }
3232 case MSG_EXT_PPR: 3804 case MSG_EXT_PPR:
3233 { 3805 {
3234 struct ahc_syncrate *syncrate; 3806 const struct ahc_syncrate *syncrate;
3235 u_int period; 3807 u_int period;
3236 u_int offset; 3808 u_int offset;
3237 u_int bus_width; 3809 u_int bus_width;
@@ -3984,7 +4556,7 @@ ahc_free(struct ahc_softc *ahc)
3984 return; 4556 return;
3985} 4557}
3986 4558
3987void 4559static void
3988ahc_shutdown(void *arg) 4560ahc_shutdown(void *arg)
3989{ 4561{
3990 struct ahc_softc *ahc; 4562 struct ahc_softc *ahc;
@@ -4388,7 +4960,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
4388 free(scb_data->scbarray, M_DEVBUF); 4960 free(scb_data->scbarray, M_DEVBUF);
4389} 4961}
4390 4962
4391void 4963static void
4392ahc_alloc_scbs(struct ahc_softc *ahc) 4964ahc_alloc_scbs(struct ahc_softc *ahc)
4393{ 4965{
4394 struct scb_data *scb_data; 4966 struct scb_data *scb_data;
@@ -5121,7 +5693,7 @@ ahc_resume(struct ahc_softc *ahc)
5121 * Return the untagged transaction id for a given target/channel lun. 5693 * Return the untagged transaction id for a given target/channel lun.
5122 * Optionally, clear the entry. 5694 * Optionally, clear the entry.
5123 */ 5695 */
5124u_int 5696static u_int
5125ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5697ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5126{ 5698{
5127 u_int scbid; 5699 u_int scbid;
@@ -5142,7 +5714,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5142 return (scbid); 5714 return (scbid);
5143} 5715}
5144 5716
5145void 5717static void
5146ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5718ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5147{ 5719{
5148 u_int target_offset; 5720 u_int target_offset;
@@ -5160,7 +5732,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5160 } 5732 }
5161} 5733}
5162 5734
5163void 5735static void
5164ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5736ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5165{ 5737{
5166 u_int target_offset; 5738 u_int target_offset;
@@ -5215,7 +5787,7 @@ ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5215 return match; 5787 return match;
5216} 5788}
5217 5789
5218void 5790static void
5219ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5791ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5220{ 5792{
5221 int target; 5793 int target;
@@ -5707,7 +6279,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5707 */ 6279 */
5708static u_int 6280static u_int
5709ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6281ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5710{ 6282{
5711 u_int curscb, next; 6283 u_int curscb, next;
5712 6284
5713 /* 6285 /*
@@ -5756,7 +6328,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5756 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 6328 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5757 * is paused before it is called. 6329 * is paused before it is called.
5758 */ 6330 */
5759int 6331static int
5760ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 6332ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5761 int lun, u_int tag, role_t role, uint32_t status) 6333 int lun, u_int tag, role_t role, uint32_t status)
5762{ 6334{
@@ -6078,7 +6650,7 @@ ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6078/* 6650/*
6079 * Calculate the residual for a just completed SCB. 6651 * Calculate the residual for a just completed SCB.
6080 */ 6652 */
6081void 6653static void
6082ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6654ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6083{ 6655{
6084 struct hardware_scb *hscb; 6656 struct hardware_scb *hscb;
@@ -6279,7 +6851,7 @@ ahc_loadseq(struct ahc_softc *ahc)
6279 struct cs cs_table[num_critical_sections]; 6851 struct cs cs_table[num_critical_sections];
6280 u_int begin_set[num_critical_sections]; 6852 u_int begin_set[num_critical_sections];
6281 u_int end_set[num_critical_sections]; 6853 u_int end_set[num_critical_sections];
6282 struct patch *cur_patch; 6854 const struct patch *cur_patch;
6283 u_int cs_count; 6855 u_int cs_count;
6284 u_int cur_cs; 6856 u_int cur_cs;
6285 u_int i; 6857 u_int i;
@@ -6384,11 +6956,11 @@ ahc_loadseq(struct ahc_softc *ahc)
6384} 6956}
6385 6957
6386static int 6958static int
6387ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6959ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
6388 u_int start_instr, u_int *skip_addr) 6960 u_int start_instr, u_int *skip_addr)
6389{ 6961{
6390 struct patch *cur_patch; 6962 const struct patch *cur_patch;
6391 struct patch *last_patch; 6963 const struct patch *last_patch;
6392 u_int num_patches; 6964 u_int num_patches;
6393 6965
6394 num_patches = ARRAY_SIZE(patches); 6966 num_patches = ARRAY_SIZE(patches);
@@ -6447,7 +7019,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6447 case AIC_OP_JE: 7019 case AIC_OP_JE:
6448 case AIC_OP_JZ: 7020 case AIC_OP_JZ:
6449 { 7021 {
6450 struct patch *cur_patch; 7022 const struct patch *cur_patch;
6451 int address_offset; 7023 int address_offset;
6452 u_int address; 7024 u_int address;
6453 u_int skip_addr; 7025 u_int skip_addr;
@@ -6545,7 +7117,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6545} 7117}
6546 7118
6547int 7119int
6548ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 7120ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
6549 const char *name, u_int address, u_int value, 7121 const char *name, u_int address, u_int value,
6550 u_int *cur_column, u_int wrap_point) 7122 u_int *cur_column, u_int wrap_point)
6551{ 7123{
@@ -7229,7 +7801,7 @@ ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7229 ahc_outb(ahc, SCSIID, scsiid); 7801 ahc_outb(ahc, SCSIID, scsiid);
7230} 7802}
7231 7803
7232void 7804static void
7233ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7805ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7234{ 7806{
7235 struct target_cmd *cmd; 7807 struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index cba2f23bbe79..09bf2f4d78d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -46,179 +46,13 @@
46#define _AIC7XXX_INLINE_H_ 46#define _AIC7XXX_INLINE_H_
47 47
48/************************* Sequencer Execution Control ************************/ 48/************************* Sequencer Execution Control ************************/
49static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 49int ahc_is_paused(struct ahc_softc *ahc);
50static __inline int ahc_is_paused(struct ahc_softc *ahc); 50void ahc_pause(struct ahc_softc *ahc);
51static __inline void ahc_pause(struct ahc_softc *ahc); 51void ahc_unpause(struct ahc_softc *ahc);
52static __inline void ahc_unpause(struct ahc_softc *ahc);
53
54/*
55 * Work around any chip bugs related to halting sequencer execution.
56 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
57 * reading a register that will set this signal and deassert it.
58 * Without this workaround, if the chip is paused, by an interrupt or
59 * manual pause while accessing scb ram, accesses to certain registers
60 * will hang the system (infinite pci retries).
61 */
62static __inline void
63ahc_pause_bug_fix(struct ahc_softc *ahc)
64{
65 if ((ahc->features & AHC_ULTRA2) != 0)
66 (void)ahc_inb(ahc, CCSCBCTL);
67}
68
69/*
70 * Determine whether the sequencer has halted code execution.
71 * Returns non-zero status if the sequencer is stopped.
72 */
73static __inline int
74ahc_is_paused(struct ahc_softc *ahc)
75{
76 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
77}
78
79/*
80 * Request that the sequencer stop and wait, indefinitely, for it
81 * to stop. The sequencer will only acknowledge that it is paused
82 * once it has reached an instruction boundary and PAUSEDIS is
83 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
84 * for critical sections.
85 */
86static __inline void
87ahc_pause(struct ahc_softc *ahc)
88{
89 ahc_outb(ahc, HCNTRL, ahc->pause);
90
91 /*
92 * Since the sequencer can disable pausing in a critical section, we
93 * must loop until it actually stops.
94 */
95 while (ahc_is_paused(ahc) == 0)
96 ;
97
98 ahc_pause_bug_fix(ahc);
99}
100
101/*
102 * Allow the sequencer to continue program execution.
103 * We check here to ensure that no additional interrupt
104 * sources that would cause the sequencer to halt have been
105 * asserted. If, for example, a SCSI bus reset is detected
106 * while we are fielding a different, pausing, interrupt type,
107 * we don't want to release the sequencer before going back
108 * into our interrupt handler and dealing with this new
109 * condition.
110 */
111static __inline void
112ahc_unpause(struct ahc_softc *ahc)
113{
114 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
115 ahc_outb(ahc, HCNTRL, ahc->unpause);
116}
117
118/*********************** Untagged Transaction Routines ************************/
119static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
120static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
121
122/*
123 * Block our completion routine from starting the next untagged
124 * transaction for this target or target lun.
125 */
126static __inline void
127ahc_freeze_untagged_queues(struct ahc_softc *ahc)
128{
129 if ((ahc->flags & AHC_SCB_BTT) == 0)
130 ahc->untagged_queue_lock++;
131}
132
133/*
134 * Allow the next untagged transaction for this target or target lun
135 * to be executed. We use a counting semaphore to allow the lock
136 * to be acquired recursively. Once the count drops to zero, the
137 * transaction queues will be run.
138 */
139static __inline void
140ahc_release_untagged_queues(struct ahc_softc *ahc)
141{
142 if ((ahc->flags & AHC_SCB_BTT) == 0) {
143 ahc->untagged_queue_lock--;
144 if (ahc->untagged_queue_lock == 0)
145 ahc_run_untagged_queues(ahc);
146 }
147}
148 52
149/************************** Memory mapping routines ***************************/ 53/************************** Memory mapping routines ***************************/
150static __inline struct ahc_dma_seg * 54void ahc_sync_sglist(struct ahc_softc *ahc,
151 ahc_sg_bus_to_virt(struct scb *scb, 55 struct scb *scb, int op);
152 uint32_t sg_busaddr);
153static __inline uint32_t
154 ahc_sg_virt_to_bus(struct scb *scb,
155 struct ahc_dma_seg *sg);
156static __inline uint32_t
157 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
158static __inline void ahc_sync_scb(struct ahc_softc *ahc,
159 struct scb *scb, int op);
160static __inline void ahc_sync_sglist(struct ahc_softc *ahc,
161 struct scb *scb, int op);
162static __inline uint32_t
163 ahc_targetcmd_offset(struct ahc_softc *ahc,
164 u_int index);
165
166static __inline struct ahc_dma_seg *
167ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
168{
169 int sg_index;
170
171 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
172 /* sg_list_phys points to entry 1, not 0 */
173 sg_index++;
174
175 return (&scb->sg_list[sg_index]);
176}
177
178static __inline uint32_t
179ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
180{
181 int sg_index;
182
183 /* sg_list_phys points to entry 1, not 0 */
184 sg_index = sg - &scb->sg_list[1];
185
186 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
187}
188
189static __inline uint32_t
190ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
191{
192 return (ahc->scb_data->hscb_busaddr
193 + (sizeof(struct hardware_scb) * index));
194}
195
196static __inline void
197ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
198{
199 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
200 ahc->scb_data->hscb_dmamap,
201 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
202 /*len*/sizeof(*scb->hscb), op);
203}
204
205static __inline void
206ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
207{
208 if (scb->sg_count == 0)
209 return;
210
211 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
212 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
213 * sizeof(struct ahc_dma_seg),
214 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
215}
216
217static __inline uint32_t
218ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
219{
220 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
221}
222 56
223/******************************** Debugging ***********************************/ 57/******************************** Debugging ***********************************/
224static __inline char *ahc_name(struct ahc_softc *ahc); 58static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +65,34 @@ ahc_name(struct ahc_softc *ahc)
231 65
232/*********************** Miscellaneous Support Functions ***********************/ 66/*********************** Miscellaneous Support Functions ***********************/
233 67
234static __inline void ahc_update_residual(struct ahc_softc *ahc, 68struct ahc_initiator_tinfo *
235 struct scb *scb); 69 ahc_fetch_transinfo(struct ahc_softc *ahc,
236static __inline struct ahc_initiator_tinfo * 70 char channel, u_int our_id,
237 ahc_fetch_transinfo(struct ahc_softc *ahc, 71 u_int remote_id,
238 char channel, u_int our_id, 72 struct ahc_tmode_tstate **tstate);
239 u_int remote_id, 73uint16_t
240 struct ahc_tmode_tstate **tstate); 74 ahc_inw(struct ahc_softc *ahc, u_int port);
241static __inline uint16_t 75void ahc_outw(struct ahc_softc *ahc, u_int port,
242 ahc_inw(struct ahc_softc *ahc, u_int port); 76 u_int value);
243static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, 77uint32_t
244 u_int value); 78 ahc_inl(struct ahc_softc *ahc, u_int port);
245static __inline uint32_t 79void ahc_outl(struct ahc_softc *ahc, u_int port,
246 ahc_inl(struct ahc_softc *ahc, u_int port); 80 uint32_t value);
247static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, 81uint64_t
248 uint32_t value); 82 ahc_inq(struct ahc_softc *ahc, u_int port);
249static __inline uint64_t 83void ahc_outq(struct ahc_softc *ahc, u_int port,
250 ahc_inq(struct ahc_softc *ahc, u_int port); 84 uint64_t value);
251static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, 85struct scb*
252 uint64_t value); 86 ahc_get_scb(struct ahc_softc *ahc);
253static __inline struct scb* 87void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
254 ahc_get_scb(struct ahc_softc *ahc); 88struct scb *
255static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 89 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
256static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 90void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
257 struct scb *scb); 91struct scsi_sense_data *
258static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 92 ahc_get_sense_buf(struct ahc_softc *ahc,
259static __inline struct scsi_sense_data * 93 struct scb *scb);
260 ahc_get_sense_buf(struct ahc_softc *ahc,
261 struct scb *scb);
262static __inline uint32_t
263 ahc_get_sense_bufaddr(struct ahc_softc *ahc,
264 struct scb *scb);
265
266/*
267 * Determine whether the sequencer reported a residual
268 * for this SCB/transaction.
269 */
270static __inline void
271ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
272{
273 uint32_t sgptr;
274
275 sgptr = ahc_le32toh(scb->hscb->sgptr);
276 if ((sgptr & SG_RESID_VALID) != 0)
277 ahc_calc_residual(ahc, scb);
278}
279
280/*
281 * Return pointers to the transfer negotiation information
282 * for the specified our_id/remote_id pair.
283 */
284static __inline struct ahc_initiator_tinfo *
285ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
286 u_int remote_id, struct ahc_tmode_tstate **tstate)
287{
288 /*
289 * Transfer data structures are stored from the perspective
290 * of the target role. Since the parameters for a connection
291 * in the initiator role to a given target are the same as
292 * when the roles are reversed, we pretend we are the target.
293 */
294 if (channel == 'B')
295 our_id += 8;
296 *tstate = ahc->enabled_targets[our_id];
297 return (&(*tstate)->transinfo[remote_id]);
298}
299
300static __inline uint16_t
301ahc_inw(struct ahc_softc *ahc, u_int port)
302{
303 uint16_t r = ahc_inb(ahc, port+1) << 8;
304 return r | ahc_inb(ahc, port);
305}
306
307static __inline void
308ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
309{
310 ahc_outb(ahc, port, value & 0xFF);
311 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
312}
313
314static __inline uint32_t
315ahc_inl(struct ahc_softc *ahc, u_int port)
316{
317 return ((ahc_inb(ahc, port))
318 | (ahc_inb(ahc, port+1) << 8)
319 | (ahc_inb(ahc, port+2) << 16)
320 | (ahc_inb(ahc, port+3) << 24));
321}
322
323static __inline void
324ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
325{
326 ahc_outb(ahc, port, (value) & 0xFF);
327 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
328 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
329 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
330}
331
332static __inline uint64_t
333ahc_inq(struct ahc_softc *ahc, u_int port)
334{
335 return ((ahc_inb(ahc, port))
336 | (ahc_inb(ahc, port+1) << 8)
337 | (ahc_inb(ahc, port+2) << 16)
338 | (ahc_inb(ahc, port+3) << 24)
339 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
340 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
341 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
342 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
343}
344
345static __inline void
346ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
347{
348 ahc_outb(ahc, port, value & 0xFF);
349 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
350 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
351 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
352 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
353 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
354 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
355 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
356}
357
358/*
359 * Get a free scb. If there are none, see if we can allocate a new SCB.
360 */
361static __inline struct scb *
362ahc_get_scb(struct ahc_softc *ahc)
363{
364 struct scb *scb;
365
366 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
367 ahc_alloc_scbs(ahc);
368 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
369 if (scb == NULL)
370 return (NULL);
371 }
372 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
373 return (scb);
374}
375
376/*
377 * Return an SCB resource to the free list.
378 */
379static __inline void
380ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
381{
382 struct hardware_scb *hscb;
383
384 hscb = scb->hscb;
385 /* Clean up for the next user */
386 ahc->scb_data->scbindex[hscb->tag] = NULL;
387 scb->flags = SCB_FREE;
388 hscb->control = 0;
389
390 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
391
392 /* Notify the OSM that a resource is now available. */
393 ahc_platform_scb_free(ahc, scb);
394}
395
396static __inline struct scb *
397ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
398{
399 struct scb* scb;
400
401 scb = ahc->scb_data->scbindex[tag];
402 if (scb != NULL)
403 ahc_sync_scb(ahc, scb,
404 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
405 return (scb);
406}
407
408static __inline void
409ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
410{
411 struct hardware_scb *q_hscb;
412 u_int saved_tag;
413
414 /*
415 * Our queuing method is a bit tricky. The card
416 * knows in advance which HSCB to download, and we
417 * can't disappoint it. To achieve this, the next
418 * SCB to download is saved off in ahc->next_queued_scb.
419 * When we are called to queue "an arbitrary scb",
420 * we copy the contents of the incoming HSCB to the one
421 * the sequencer knows about, swap HSCB pointers and
422 * finally assign the SCB to the tag indexed location
423 * in the scb_array. This makes sure that we can still
424 * locate the correct SCB by SCB_TAG.
425 */
426 q_hscb = ahc->next_queued_scb->hscb;
427 saved_tag = q_hscb->tag;
428 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
429 if ((scb->flags & SCB_CDB32_PTR) != 0) {
430 q_hscb->shared_data.cdb_ptr =
431 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
432 + offsetof(struct hardware_scb, cdb32));
433 }
434 q_hscb->tag = saved_tag;
435 q_hscb->next = scb->hscb->tag;
436
437 /* Now swap HSCB pointers. */
438 ahc->next_queued_scb->hscb = scb->hscb;
439 scb->hscb = q_hscb;
440
441 /* Now define the mapping from tag to SCB in the scbindex */
442 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
443}
444
445/*
446 * Tell the sequencer about a new transaction to execute.
447 */
448static __inline void
449ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
450{
451 ahc_swap_with_next_hscb(ahc, scb);
452
453 if (scb->hscb->tag == SCB_LIST_NULL
454 || scb->hscb->next == SCB_LIST_NULL)
455 panic("Attempt to queue invalid SCB tag %x:%x\n",
456 scb->hscb->tag, scb->hscb->next);
457
458 /*
459 * Setup data "oddness".
460 */
461 scb->hscb->lun &= LID;
462 if (ahc_get_transfer_length(scb) & 0x1)
463 scb->hscb->lun |= SCB_XFERLEN_ODD;
464
465 /*
466 * Keep a history of SCBs we've downloaded in the qinfifo.
467 */
468 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
469
470 /*
471 * Make sure our data is consistent from the
472 * perspective of the adapter.
473 */
474 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
475
476 /* Tell the adapter about the newly queued SCB */
477 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
478 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
479 } else {
480 if ((ahc->features & AHC_AUTOPAUSE) == 0)
481 ahc_pause(ahc);
482 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
483 if ((ahc->features & AHC_AUTOPAUSE) == 0)
484 ahc_unpause(ahc);
485 }
486}
487
488static __inline struct scsi_sense_data *
489ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
490{
491 int offset;
492
493 offset = scb - ahc->scb_data->scbarray;
494 return (&ahc->scb_data->sense[offset]);
495}
496
497static __inline uint32_t
498ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
499{
500 int offset;
501
502 offset = scb - ahc->scb_data->scbarray;
503 return (ahc->scb_data->sense_busaddr
504 + (offset * sizeof(struct scsi_sense_data)));
505}
506 94
507/************************** Interrupt Processing ******************************/ 95/************************** Interrupt Processing ******************************/
508static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); 96int ahc_intr(struct ahc_softc *ahc);
509static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
510static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
511static __inline int ahc_intr(struct ahc_softc *ahc);
512
513static __inline void
514ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
515{
516 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
517 /*offset*/0, /*len*/256, op);
518}
519
520static __inline void
521ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
522{
523#ifdef AHC_TARGET_MODE
524 if ((ahc->flags & AHC_TARGETROLE) != 0) {
525 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
526 ahc->shared_data_dmamap,
527 ahc_targetcmd_offset(ahc, 0),
528 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
529 op);
530 }
531#endif
532}
533
534/*
535 * See if the firmware has posted any completed commands
536 * into our in-core command complete fifos.
537 */
538#define AHC_RUN_QOUTFIFO 0x1
539#define AHC_RUN_TQINFIFO 0x2
540static __inline u_int
541ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
542{
543 u_int retval;
544
545 retval = 0;
546 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
547 /*offset*/ahc->qoutfifonext, /*len*/1,
548 BUS_DMASYNC_POSTREAD);
549 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
550 retval |= AHC_RUN_QOUTFIFO;
551#ifdef AHC_TARGET_MODE
552 if ((ahc->flags & AHC_TARGETROLE) != 0
553 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
554 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
555 ahc->shared_data_dmamap,
556 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
557 /*len*/sizeof(struct target_cmd),
558 BUS_DMASYNC_POSTREAD);
559 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
560 retval |= AHC_RUN_TQINFIFO;
561 }
562#endif
563 return (retval);
564}
565
566/*
567 * Catch an interrupt from the adapter
568 */
569static __inline int
570ahc_intr(struct ahc_softc *ahc)
571{
572 u_int intstat;
573
574 if ((ahc->pause & INTEN) == 0) {
575 /*
576 * Our interrupt is not enabled on the chip
577 * and may be disabled for re-entrancy reasons,
578 * so just return. This is likely just a shared
579 * interrupt.
580 */
581 return (0);
582 }
583 /*
584 * Instead of directly reading the interrupt status register,
585 * infer the cause of the interrupt by checking our in-core
586 * completion queues. This avoids a costly PCI bus read in
587 * most cases.
588 */
589 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
590 && (ahc_check_cmdcmpltqueues(ahc) != 0))
591 intstat = CMDCMPLT;
592 else {
593 intstat = ahc_inb(ahc, INTSTAT);
594 }
595
596 if ((intstat & INT_PEND) == 0) {
597#if AHC_PCI_CONFIG > 0
598 if (ahc->unsolicited_ints > 500) {
599 ahc->unsolicited_ints = 0;
600 if ((ahc->chip & AHC_PCI) != 0
601 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
602 ahc->bus_intr(ahc);
603 }
604#endif
605 ahc->unsolicited_ints++;
606 return (0);
607 }
608 ahc->unsolicited_ints = 0;
609
610 if (intstat & CMDCMPLT) {
611 ahc_outb(ahc, CLRINT, CLRCMDINT);
612
613 /*
614 * Ensure that the chip sees that we've cleared
615 * this interrupt before we walk the output fifo.
616 * Otherwise, we may, due to posted bus writes,
617 * clear the interrupt after we finish the scan,
618 * and after the sequencer has added new entries
619 * and asserted the interrupt again.
620 */
621 ahc_flush_device_writes(ahc);
622 ahc_run_qoutfifo(ahc);
623#ifdef AHC_TARGET_MODE
624 if ((ahc->flags & AHC_TARGETROLE) != 0)
625 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
626#endif
627 }
628
629 /*
630 * Handle statuses that may invalidate our cached
631 * copy of INTSTAT separately.
632 */
633 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
634 /* Hot eject. Do nothing */
635 } else if (intstat & BRKADRINT) {
636 ahc_handle_brkadrint(ahc);
637 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
638
639 ahc_pause_bug_fix(ahc);
640
641 if ((intstat & SEQINT) != 0)
642 ahc_handle_seqint(ahc, intstat);
643
644 if ((intstat & SCSIINT) != 0)
645 ahc_handle_scsiint(ahc, intstat);
646 }
647 return (1);
648}
649 97
650#endif /* _AIC7XXX_INLINE_H_ */ 98#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 42ad48e09f02..fd2b9785ff4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -388,14 +388,83 @@ static int aic7xxx_setup(char *s);
388static int ahc_linux_unit; 388static int ahc_linux_unit;
389 389
390 390
391/************************** OS Utility Wrappers *******************************/
392void
393ahc_delay(long usec)
394{
395 /*
396 * udelay on Linux can have problems for
397 * multi-millisecond waits. Wait at most
398 * 1024us per call.
399 */
400 while (usec > 0) {
401 udelay(usec % 1024);
402 usec -= 1024;
403 }
404}
405
406/***************************** Low Level I/O **********************************/
407uint8_t
408ahc_inb(struct ahc_softc * ahc, long port)
409{
410 uint8_t x;
411
412 if (ahc->tag == BUS_SPACE_MEMIO) {
413 x = readb(ahc->bsh.maddr + port);
414 } else {
415 x = inb(ahc->bsh.ioport + port);
416 }
417 mb();
418 return (x);
419}
420
421void
422ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
423{
424 if (ahc->tag == BUS_SPACE_MEMIO) {
425 writeb(val, ahc->bsh.maddr + port);
426 } else {
427 outb(val, ahc->bsh.ioport + port);
428 }
429 mb();
430}
431
432void
433ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
434{
435 int i;
436
437 /*
438 * There is probably a more efficient way to do this on Linux
439 * but we don't use this for anything speed critical and this
440 * should work.
441 */
442 for (i = 0; i < count; i++)
443 ahc_outb(ahc, port, *array++);
444}
445
446void
447ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
448{
449 int i;
450
451 /*
452 * There is probably a more efficient way to do this on Linux
453 * but we don't use this for anything speed critical and this
454 * should work.
455 */
456 for (i = 0; i < count; i++)
457 *array++ = ahc_inb(ahc, port);
458}
459
391/********************************* Inlines ************************************/ 460/********************************* Inlines ************************************/
392static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 461static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
393 462
394static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 463static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
395 struct ahc_dma_seg *sg, 464 struct ahc_dma_seg *sg,
396 dma_addr_t addr, bus_size_t len); 465 dma_addr_t addr, bus_size_t len);
397 466
398static __inline void 467static void
399ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) 468ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
400{ 469{
401 struct scsi_cmnd *cmd; 470 struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
406 scsi_dma_unmap(cmd); 475 scsi_dma_unmap(cmd);
407} 476}
408 477
409static __inline int 478static int
410ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 479ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
411 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) 480 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
412{ 481{
@@ -442,13 +511,11 @@ ahc_linux_info(struct Scsi_Host *host)
442 bp = &buffer[0]; 511 bp = &buffer[0];
443 ahc = *(struct ahc_softc **)host->hostdata; 512 ahc = *(struct ahc_softc **)host->hostdata;
444 memset(bp, 0, sizeof(buffer)); 513 memset(bp, 0, sizeof(buffer));
445 strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev "); 514 strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
446 strcat(bp, AIC7XXX_DRIVER_VERSION); 515 " <");
447 strcat(bp, "\n");
448 strcat(bp, " <");
449 strcat(bp, ahc->description); 516 strcat(bp, ahc->description);
450 strcat(bp, ">\n"); 517 strcat(bp, ">\n"
451 strcat(bp, " "); 518 " ");
452 ahc_controller_info(ahc, ahc_info); 519 ahc_controller_info(ahc, ahc_info);
453 strcat(bp, ahc_info); 520 strcat(bp, ahc_info);
454 strcat(bp, "\n"); 521 strcat(bp, "\n");
@@ -964,7 +1031,7 @@ aic7xxx_setup(char *s)
964 char *p; 1031 char *p;
965 char *end; 1032 char *end;
966 1033
967 static struct { 1034 static const struct {
968 const char *name; 1035 const char *name;
969 uint32_t *flag; 1036 uint32_t *flag;
970 } options[] = { 1037 } options[] = {
@@ -2317,7 +2384,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
2317 unsigned int ppr_options = tinfo->goal.ppr_options; 2384 unsigned int ppr_options = tinfo->goal.ppr_options;
2318 unsigned long flags; 2385 unsigned long flags;
2319 unsigned long offset = tinfo->goal.offset; 2386 unsigned long offset = tinfo->goal.offset;
2320 struct ahc_syncrate *syncrate; 2387 const struct ahc_syncrate *syncrate;
2321 2388
2322 if (offset == 0) 2389 if (offset == 0)
2323 offset = MAX_OFFSET; 2390 offset = MAX_OFFSET;
@@ -2361,7 +2428,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
2361 unsigned int ppr_options = 0; 2428 unsigned int ppr_options = 0;
2362 unsigned int period = 0; 2429 unsigned int period = 0;
2363 unsigned long flags; 2430 unsigned long flags;
2364 struct ahc_syncrate *syncrate = NULL; 2431 const struct ahc_syncrate *syncrate = NULL;
2365 2432
2366 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2433 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2367 starget->channel + 'A', ROLE_INITIATOR); 2434 starget->channel + 'A', ROLE_INITIATOR);
@@ -2391,7 +2458,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
2391 unsigned int period = tinfo->goal.period; 2458 unsigned int period = tinfo->goal.period;
2392 unsigned int width = tinfo->goal.width; 2459 unsigned int width = tinfo->goal.width;
2393 unsigned long flags; 2460 unsigned long flags;
2394 struct ahc_syncrate *syncrate; 2461 const struct ahc_syncrate *syncrate;
2395 2462
2396 if (dt && spi_max_width(starget)) { 2463 if (dt && spi_max_width(starget)) {
2397 ppr_options |= MSG_EXT_PPR_DT_REQ; 2464 ppr_options |= MSG_EXT_PPR_DT_REQ;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index b48dab447bde..3f7238db35e5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -365,7 +365,7 @@ struct ahc_platform_data {
365#define AHC_LINUX_NOIRQ ((uint32_t)~0) 365#define AHC_LINUX_NOIRQ ((uint32_t)~0)
366 uint32_t irq; /* IRQ for this adapter */ 366 uint32_t irq; /* IRQ for this adapter */
367 uint32_t bios_address; 367 uint32_t bios_address;
368 uint32_t mem_busaddr; /* Mem Base Addr */ 368 resource_size_t mem_busaddr; /* Mem Base Addr */
369}; 369};
370 370
371/************************** OS Utility Wrappers *******************************/ 371/************************** OS Utility Wrappers *******************************/
@@ -375,82 +375,16 @@ struct ahc_platform_data {
375#define malloc(size, type, flags) kmalloc(size, flags) 375#define malloc(size, type, flags) kmalloc(size, flags)
376#define free(ptr, type) kfree(ptr) 376#define free(ptr, type) kfree(ptr)
377 377
378static __inline void ahc_delay(long); 378void ahc_delay(long);
379static __inline void
380ahc_delay(long usec)
381{
382 /*
383 * udelay on Linux can have problems for
384 * multi-millisecond waits. Wait at most
385 * 1024us per call.
386 */
387 while (usec > 0) {
388 udelay(usec % 1024);
389 usec -= 1024;
390 }
391}
392 379
393 380
394/***************************** Low Level I/O **********************************/ 381/***************************** Low Level I/O **********************************/
395static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port); 382uint8_t ahc_inb(struct ahc_softc * ahc, long port);
396static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val); 383void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
397static __inline void ahc_outsb(struct ahc_softc * ahc, long port, 384void ahc_outsb(struct ahc_softc * ahc, long port,
398 uint8_t *, int count); 385 uint8_t *, int count);
399static __inline void ahc_insb(struct ahc_softc * ahc, long port, 386void ahc_insb(struct ahc_softc * ahc, long port,
400 uint8_t *, int count); 387 uint8_t *, int count);
401
402static __inline uint8_t
403ahc_inb(struct ahc_softc * ahc, long port)
404{
405 uint8_t x;
406
407 if (ahc->tag == BUS_SPACE_MEMIO) {
408 x = readb(ahc->bsh.maddr + port);
409 } else {
410 x = inb(ahc->bsh.ioport + port);
411 }
412 mb();
413 return (x);
414}
415
416static __inline void
417ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
418{
419 if (ahc->tag == BUS_SPACE_MEMIO) {
420 writeb(val, ahc->bsh.maddr + port);
421 } else {
422 outb(val, ahc->bsh.ioport + port);
423 }
424 mb();
425}
426
427static __inline void
428ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
429{
430 int i;
431
432 /*
433 * There is probably a more efficient way to do this on Linux
434 * but we don't use this for anything speed critical and this
435 * should work.
436 */
437 for (i = 0; i < count; i++)
438 ahc_outb(ahc, port, *array++);
439}
440
441static __inline void
442ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
443{
444 int i;
445
446 /*
447 * There is probably a more efficient way to do this on Linux
448 * but we don't use this for anything speed critical and this
449 * should work.
450 */
451 for (i = 0; i < count; i++)
452 *array++ = ahc_inb(ahc, port);
453}
454 388
455/**************************** Initialization **********************************/ 389/**************************** Initialization **********************************/
456int ahc_linux_register_host(struct ahc_softc *, 390int ahc_linux_register_host(struct ahc_softc *,
@@ -464,9 +398,6 @@ struct info_str {
464 int pos; 398 int pos;
465}; 399};
466 400
467void ahc_format_transinfo(struct info_str *info,
468 struct ahc_transinfo *tinfo);
469
470/******************************** Locking *************************************/ 401/******************************** Locking *************************************/
471/* Lock protecting internal data structures */ 402/* Lock protecting internal data structures */
472 403
@@ -555,61 +486,12 @@ void ahc_linux_pci_exit(void);
555int ahc_pci_map_registers(struct ahc_softc *ahc); 486int ahc_pci_map_registers(struct ahc_softc *ahc);
556int ahc_pci_map_int(struct ahc_softc *ahc); 487int ahc_pci_map_int(struct ahc_softc *ahc);
557 488
558static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, 489uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
559 int reg, int width); 490 int reg, int width);
560 491
561static __inline uint32_t 492void ahc_pci_write_config(ahc_dev_softc_t pci,
562ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) 493 int reg, uint32_t value,
563{ 494 int width);
564 switch (width) {
565 case 1:
566 {
567 uint8_t retval;
568
569 pci_read_config_byte(pci, reg, &retval);
570 return (retval);
571 }
572 case 2:
573 {
574 uint16_t retval;
575 pci_read_config_word(pci, reg, &retval);
576 return (retval);
577 }
578 case 4:
579 {
580 uint32_t retval;
581 pci_read_config_dword(pci, reg, &retval);
582 return (retval);
583 }
584 default:
585 panic("ahc_pci_read_config: Read size too big");
586 /* NOTREACHED */
587 return (0);
588 }
589}
590
591static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
592 int reg, uint32_t value,
593 int width);
594
595static __inline void
596ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
597{
598 switch (width) {
599 case 1:
600 pci_write_config_byte(pci, reg, value);
601 break;
602 case 2:
603 pci_write_config_word(pci, reg, value);
604 break;
605 case 4:
606 pci_write_config_dword(pci, reg, value);
607 break;
608 default:
609 panic("ahc_pci_write_config: Write size too big");
610 /* NOTREACHED */
611 }
612}
613 495
614static __inline int ahc_get_pci_function(ahc_dev_softc_t); 496static __inline int ahc_get_pci_function(ahc_dev_softc_t);
615static __inline int 497static __inline int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3d3eaef65fb3..0d7628f1f1ef 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -46,7 +46,7 @@
46*/ 46*/
47#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI) 47#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
48 48
49static struct pci_device_id ahc_linux_pci_id_table[] = { 49static const struct pci_device_id ahc_linux_pci_id_table[] = {
50 /* aic7850 based controllers */ 50 /* aic7850 based controllers */
51 ID(ID_AHA_2902_04_10_15_20C_30C), 51 ID(ID_AHA_2902_04_10_15_20C_30C),
52 /* aic7860 based controllers */ 52 /* aic7860 based controllers */
@@ -206,7 +206,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
206 const uint64_t mask_39bit = 0x7FFFFFFFFFULL; 206 const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
207 struct ahc_softc *ahc; 207 struct ahc_softc *ahc;
208 ahc_dev_softc_t pci; 208 ahc_dev_softc_t pci;
209 struct ahc_pci_identity *entry; 209 const struct ahc_pci_identity *entry;
210 char *name; 210 char *name;
211 int error; 211 int error;
212 struct device *dev = &pdev->dev; 212 struct device *dev = &pdev->dev;
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269 return (0); 269 return (0);
270} 270}
271 271
272/******************************* PCI Routines *********************************/
273uint32_t
274ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
275{
276 switch (width) {
277 case 1:
278 {
279 uint8_t retval;
280
281 pci_read_config_byte(pci, reg, &retval);
282 return (retval);
283 }
284 case 2:
285 {
286 uint16_t retval;
287 pci_read_config_word(pci, reg, &retval);
288 return (retval);
289 }
290 case 4:
291 {
292 uint32_t retval;
293 pci_read_config_dword(pci, reg, &retval);
294 return (retval);
295 }
296 default:
297 panic("ahc_pci_read_config: Read size too big");
298 /* NOTREACHED */
299 return (0);
300 }
301}
302
303void
304ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
305{
306 switch (width) {
307 case 1:
308 pci_write_config_byte(pci, reg, value);
309 break;
310 case 2:
311 pci_write_config_word(pci, reg, value);
312 break;
313 case 4:
314 pci_write_config_dword(pci, reg, value);
315 break;
316 default:
317 panic("ahc_pci_write_config: Write size too big");
318 /* NOTREACHED */
319 }
320}
321
322
272static struct pci_driver aic7xxx_pci_driver = { 323static struct pci_driver aic7xxx_pci_driver = {
273 .name = "aic7xxx", 324 .name = "aic7xxx",
274 .probe = ahc_linux_pci_dev_probe, 325 .probe = ahc_linux_pci_dev_probe,
@@ -293,7 +344,7 @@ ahc_linux_pci_exit(void)
293} 344}
294 345
295static int 346static int
296ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base) 347ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
297{ 348{
298 if (aic7xxx_allow_memio == 0) 349 if (aic7xxx_allow_memio == 0)
299 return (ENOMEM); 350 return (ENOMEM);
@@ -308,10 +359,10 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
308 359
309static int 360static int
310ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, 361ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
311 u_long *bus_addr, 362 resource_size_t *bus_addr,
312 uint8_t __iomem **maddr) 363 uint8_t __iomem **maddr)
313{ 364{
314 u_long start; 365 resource_size_t start;
315 int error; 366 int error;
316 367
317 error = 0; 368 error = 0;
@@ -336,7 +387,7 @@ int
336ahc_pci_map_registers(struct ahc_softc *ahc) 387ahc_pci_map_registers(struct ahc_softc *ahc)
337{ 388{
338 uint32_t command; 389 uint32_t command;
339 u_long base; 390 resource_size_t base;
340 uint8_t __iomem *maddr; 391 uint8_t __iomem *maddr;
341 int error; 392 int error;
342 393
@@ -374,12 +425,12 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
374 } else 425 } else
375 command |= PCIM_CMD_MEMEN; 426 command |= PCIM_CMD_MEMEN;
376 } else { 427 } else {
377 printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx " 428 printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
378 "unavailable. Cannot memory map device.\n", 429 "unavailable. Cannot memory map device.\n",
379 ahc_get_pci_bus(ahc->dev_softc), 430 ahc_get_pci_bus(ahc->dev_softc),
380 ahc_get_pci_slot(ahc->dev_softc), 431 ahc_get_pci_slot(ahc->dev_softc),
381 ahc_get_pci_function(ahc->dev_softc), 432 ahc_get_pci_function(ahc->dev_softc),
382 base); 433 (unsigned long long)base);
383 } 434 }
384 435
385 /* 436 /*
@@ -390,15 +441,15 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
390 error = ahc_linux_pci_reserve_io_region(ahc, &base); 441 error = ahc_linux_pci_reserve_io_region(ahc, &base);
391 if (error == 0) { 442 if (error == 0) {
392 ahc->tag = BUS_SPACE_PIO; 443 ahc->tag = BUS_SPACE_PIO;
393 ahc->bsh.ioport = base; 444 ahc->bsh.ioport = (u_long)base;
394 command |= PCIM_CMD_PORTEN; 445 command |= PCIM_CMD_PORTEN;
395 } else { 446 } else {
396 printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] " 447 printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
397 "unavailable. Cannot map device.\n", 448 "unavailable. Cannot map device.\n",
398 ahc_get_pci_bus(ahc->dev_softc), 449 ahc_get_pci_bus(ahc->dev_softc),
399 ahc_get_pci_slot(ahc->dev_softc), 450 ahc_get_pci_slot(ahc->dev_softc),
400 ahc_get_pci_function(ahc->dev_softc), 451 ahc_get_pci_function(ahc->dev_softc),
401 base); 452 (unsigned long long)base);
402 } 453 }
403 } 454 }
404 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4); 455 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 56848f41e4f9..c07cb6eebb02 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -168,8 +168,7 @@ static ahc_device_setup_t ahc_aha394XX_setup;
168static ahc_device_setup_t ahc_aha494XX_setup; 168static ahc_device_setup_t ahc_aha494XX_setup;
169static ahc_device_setup_t ahc_aha398XX_setup; 169static ahc_device_setup_t ahc_aha398XX_setup;
170 170
171static struct ahc_pci_identity ahc_pci_ident_table [] = 171static const struct ahc_pci_identity ahc_pci_ident_table[] = {
172{
173 /* aic7850 based controllers */ 172 /* aic7850 based controllers */
174 { 173 {
175 ID_AHA_2902_04_10_15_20C_30C, 174 ID_AHA_2902_04_10_15_20C_30C,
@@ -668,7 +667,7 @@ ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
668 return (result); 667 return (result);
669} 668}
670 669
671struct ahc_pci_identity * 670const struct ahc_pci_identity *
672ahc_find_pci_device(ahc_dev_softc_t pci) 671ahc_find_pci_device(ahc_dev_softc_t pci)
673{ 672{
674 uint64_t full_id; 673 uint64_t full_id;
@@ -676,7 +675,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
676 uint16_t vendor; 675 uint16_t vendor;
677 uint16_t subdevice; 676 uint16_t subdevice;
678 uint16_t subvendor; 677 uint16_t subvendor;
679 struct ahc_pci_identity *entry; 678 const struct ahc_pci_identity *entry;
680 u_int i; 679 u_int i;
681 680
682 vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 681 vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -710,7 +709,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
710} 709}
711 710
712int 711int
713ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) 712ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
714{ 713{
715 u_int command; 714 u_int command;
716 u_int our_id; 715 u_int our_id;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 99e5443e7535..e92991a7c485 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -58,7 +58,7 @@ static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
58 * Table of syncrates that don't follow the "divisible by 4" 58 * Table of syncrates that don't follow the "divisible by 4"
59 * rule. This table will be expanded in future SCSI specs. 59 * rule. This table will be expanded in future SCSI specs.
60 */ 60 */
61static struct { 61static const struct {
62 u_int period_factor; 62 u_int period_factor;
63 u_int period; /* in 100ths of ns */ 63 u_int period; /* in 100ths of ns */
64} scsi_syncrates[] = { 64} scsi_syncrates[] = {
@@ -137,7 +137,7 @@ copy_info(struct info_str *info, char *fmt, ...)
137 return (len); 137 return (len);
138} 138}
139 139
140void 140static void
141ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo) 141ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
142{ 142{
143 u_int speed; 143 u_int speed;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 88bfd767c51c..309a562b009e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -8,7 +8,7 @@
8 8
9#include "aic7xxx_osm.h" 9#include "aic7xxx_osm.h"
10 10
11static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = { 11static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
12 { "SCSIRSTO", 0x01, 0x01 }, 12 { "SCSIRSTO", 0x01, 0x01 },
13 { "ENAUTOATNP", 0x02, 0x02 }, 13 { "ENAUTOATNP", 0x02, 0x02 },
14 { "ENAUTOATNI", 0x04, 0x04 }, 14 { "ENAUTOATNI", 0x04, 0x04 },
@@ -26,7 +26,7 @@ ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
26 0x00, regvalue, cur_col, wrap)); 26 0x00, regvalue, cur_col, wrap));
27} 27}
28 28
29static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = { 29static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
30 { "CLRCHN", 0x02, 0x02 }, 30 { "CLRCHN", 0x02, 0x02 },
31 { "SCAMEN", 0x04, 0x04 }, 31 { "SCAMEN", 0x04, 0x04 },
32 { "SPIOEN", 0x08, 0x08 }, 32 { "SPIOEN", 0x08, 0x08 },
@@ -43,7 +43,7 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
43 0x01, regvalue, cur_col, wrap)); 43 0x01, regvalue, cur_col, wrap));
44} 44}
45 45
46static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = { 46static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
47 { "STPWEN", 0x01, 0x01 }, 47 { "STPWEN", 0x01, 0x01 },
48 { "ACTNEGEN", 0x02, 0x02 }, 48 { "ACTNEGEN", 0x02, 0x02 },
49 { "ENSTIMER", 0x04, 0x04 }, 49 { "ENSTIMER", 0x04, 0x04 },
@@ -60,7 +60,7 @@ ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
60 0x02, regvalue, cur_col, wrap)); 60 0x02, regvalue, cur_col, wrap));
61} 61}
62 62
63static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = { 63static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
64 { "ACKO", 0x01, 0x01 }, 64 { "ACKO", 0x01, 0x01 },
65 { "REQO", 0x02, 0x02 }, 65 { "REQO", 0x02, 0x02 },
66 { "BSYO", 0x04, 0x04 }, 66 { "BSYO", 0x04, 0x04 },
@@ -85,7 +85,7 @@ ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
85 0x03, regvalue, cur_col, wrap)); 85 0x03, regvalue, cur_col, wrap));
86} 86}
87 87
88static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = { 88static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
89 { "ACKI", 0x01, 0x01 }, 89 { "ACKI", 0x01, 0x01 },
90 { "REQI", 0x02, 0x02 }, 90 { "REQI", 0x02, 0x02 },
91 { "BSYI", 0x04, 0x04 }, 91 { "BSYI", 0x04, 0x04 },
@@ -112,7 +112,7 @@ ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
112 0x03, regvalue, cur_col, wrap)); 112 0x03, regvalue, cur_col, wrap));
113} 113}
114 114
115static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = { 115static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
116 { "SINGLE_EDGE", 0x10, 0x10 }, 116 { "SINGLE_EDGE", 0x10, 0x10 },
117 { "ENABLE_CRC", 0x40, 0x40 }, 117 { "ENABLE_CRC", 0x40, 0x40 },
118 { "WIDEXFER", 0x80, 0x80 }, 118 { "WIDEXFER", 0x80, 0x80 },
@@ -128,7 +128,7 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
128 0x04, regvalue, cur_col, wrap)); 128 0x04, regvalue, cur_col, wrap));
129} 129}
130 130
131static ahc_reg_parse_entry_t SCSIID_parse_table[] = { 131static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
132 { "TWIN_CHNLB", 0x80, 0x80 }, 132 { "TWIN_CHNLB", 0x80, 0x80 },
133 { "OID", 0x0f, 0x0f }, 133 { "OID", 0x0f, 0x0f },
134 { "TWIN_TID", 0x70, 0x70 }, 134 { "TWIN_TID", 0x70, 0x70 },
@@ -151,20 +151,13 @@ ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
151} 151}
152 152
153int 153int
154ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
155{
156 return (ahc_print_register(NULL, 0, "SCSIDATH",
157 0x07, regvalue, cur_col, wrap));
158}
159
160int
161ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 154ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
162{ 155{
163 return (ahc_print_register(NULL, 0, "STCNT", 156 return (ahc_print_register(NULL, 0, "STCNT",
164 0x08, regvalue, cur_col, wrap)); 157 0x08, regvalue, cur_col, wrap));
165} 158}
166 159
167static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = { 160static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
168 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 }, 161 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
169 { "AUTO_MSGOUT_DE", 0x02, 0x02 }, 162 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
170 { "SCSIDATL_IMGEN", 0x04, 0x04 }, 163 { "SCSIDATL_IMGEN", 0x04, 0x04 },
@@ -190,7 +183,7 @@ ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
190 0x0a, regvalue, cur_col, wrap)); 183 0x0a, regvalue, cur_col, wrap));
191} 184}
192 185
193static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = { 186static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
194 { "CLRSPIORDY", 0x02, 0x02 }, 187 { "CLRSPIORDY", 0x02, 0x02 },
195 { "CLRSWRAP", 0x08, 0x08 }, 188 { "CLRSWRAP", 0x08, 0x08 },
196 { "CLRIOERR", 0x08, 0x08 }, 189 { "CLRIOERR", 0x08, 0x08 },
@@ -206,7 +199,7 @@ ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
206 0x0b, regvalue, cur_col, wrap)); 199 0x0b, regvalue, cur_col, wrap));
207} 200}
208 201
209static ahc_reg_parse_entry_t SSTAT0_parse_table[] = { 202static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
210 { "DMADONE", 0x01, 0x01 }, 203 { "DMADONE", 0x01, 0x01 },
211 { "SPIORDY", 0x02, 0x02 }, 204 { "SPIORDY", 0x02, 0x02 },
212 { "SDONE", 0x04, 0x04 }, 205 { "SDONE", 0x04, 0x04 },
@@ -225,7 +218,7 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
225 0x0b, regvalue, cur_col, wrap)); 218 0x0b, regvalue, cur_col, wrap));
226} 219}
227 220
228static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = { 221static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
229 { "CLRREQINIT", 0x01, 0x01 }, 222 { "CLRREQINIT", 0x01, 0x01 },
230 { "CLRPHASECHG", 0x02, 0x02 }, 223 { "CLRPHASECHG", 0x02, 0x02 },
231 { "CLRSCSIPERR", 0x04, 0x04 }, 224 { "CLRSCSIPERR", 0x04, 0x04 },
@@ -242,7 +235,7 @@ ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
242 0x0c, regvalue, cur_col, wrap)); 235 0x0c, regvalue, cur_col, wrap));
243} 236}
244 237
245static ahc_reg_parse_entry_t SSTAT1_parse_table[] = { 238static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
246 { "REQINIT", 0x01, 0x01 }, 239 { "REQINIT", 0x01, 0x01 },
247 { "PHASECHG", 0x02, 0x02 }, 240 { "PHASECHG", 0x02, 0x02 },
248 { "SCSIPERR", 0x04, 0x04 }, 241 { "SCSIPERR", 0x04, 0x04 },
@@ -260,7 +253,7 @@ ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
260 0x0c, regvalue, cur_col, wrap)); 253 0x0c, regvalue, cur_col, wrap));
261} 254}
262 255
263static ahc_reg_parse_entry_t SSTAT2_parse_table[] = { 256static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
264 { "DUAL_EDGE_ERR", 0x01, 0x01 }, 257 { "DUAL_EDGE_ERR", 0x01, 0x01 },
265 { "CRCREQERR", 0x02, 0x02 }, 258 { "CRCREQERR", 0x02, 0x02 },
266 { "CRCENDERR", 0x04, 0x04 }, 259 { "CRCENDERR", 0x04, 0x04 },
@@ -278,7 +271,7 @@ ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
278 0x0d, regvalue, cur_col, wrap)); 271 0x0d, regvalue, cur_col, wrap));
279} 272}
280 273
281static ahc_reg_parse_entry_t SSTAT3_parse_table[] = { 274static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
282 { "OFFCNT", 0x0f, 0x0f }, 275 { "OFFCNT", 0x0f, 0x0f },
283 { "U2OFFCNT", 0x7f, 0x7f }, 276 { "U2OFFCNT", 0x7f, 0x7f },
284 { "SCSICNT", 0xf0, 0xf0 } 277 { "SCSICNT", 0xf0, 0xf0 }
@@ -291,7 +284,7 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
291 0x0e, regvalue, cur_col, wrap)); 284 0x0e, regvalue, cur_col, wrap));
292} 285}
293 286
294static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = { 287static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
295 { "OID", 0x0f, 0x0f }, 288 { "OID", 0x0f, 0x0f },
296 { "TID", 0xf0, 0xf0 } 289 { "TID", 0xf0, 0xf0 }
297}; 290};
@@ -303,7 +296,7 @@ ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
303 0x0f, regvalue, cur_col, wrap)); 296 0x0f, regvalue, cur_col, wrap));
304} 297}
305 298
306static ahc_reg_parse_entry_t SIMODE0_parse_table[] = { 299static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
307 { "ENDMADONE", 0x01, 0x01 }, 300 { "ENDMADONE", 0x01, 0x01 },
308 { "ENSPIORDY", 0x02, 0x02 }, 301 { "ENSPIORDY", 0x02, 0x02 },
309 { "ENSDONE", 0x04, 0x04 }, 302 { "ENSDONE", 0x04, 0x04 },
@@ -321,7 +314,7 @@ ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
321 0x10, regvalue, cur_col, wrap)); 314 0x10, regvalue, cur_col, wrap));
322} 315}
323 316
324static ahc_reg_parse_entry_t SIMODE1_parse_table[] = { 317static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
325 { "ENREQINIT", 0x01, 0x01 }, 318 { "ENREQINIT", 0x01, 0x01 },
326 { "ENPHASECHG", 0x02, 0x02 }, 319 { "ENPHASECHG", 0x02, 0x02 },
327 { "ENSCSIPERR", 0x04, 0x04 }, 320 { "ENSCSIPERR", 0x04, 0x04 },
@@ -347,33 +340,13 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
347} 340}
348 341
349int 342int
350ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
351{
352 return (ahc_print_register(NULL, 0, "SCSIBUSH",
353 0x13, regvalue, cur_col, wrap));
354}
355
356static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
357 { "CMDDMAEN", 0x08, 0x08 },
358 { "AUTORSTDIS", 0x10, 0x10 },
359 { "ASYNC_SETUP", 0x07, 0x07 }
360};
361
362int
363ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
364{
365 return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
366 0x13, regvalue, cur_col, wrap));
367}
368
369int
370ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 343ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
371{ 344{
372 return (ahc_print_register(NULL, 0, "SHADDR", 345 return (ahc_print_register(NULL, 0, "SHADDR",
373 0x14, regvalue, cur_col, wrap)); 346 0x14, regvalue, cur_col, wrap));
374} 347}
375 348
376static ahc_reg_parse_entry_t SELTIMER_parse_table[] = { 349static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
377 { "STAGE1", 0x01, 0x01 }, 350 { "STAGE1", 0x01, 0x01 },
378 { "STAGE2", 0x02, 0x02 }, 351 { "STAGE2", 0x02, 0x02 },
379 { "STAGE3", 0x04, 0x04 }, 352 { "STAGE3", 0x04, 0x04 },
@@ -389,7 +362,7 @@ ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
389 0x18, regvalue, cur_col, wrap)); 362 0x18, regvalue, cur_col, wrap));
390} 363}
391 364
392static ahc_reg_parse_entry_t SELID_parse_table[] = { 365static const ahc_reg_parse_entry_t SELID_parse_table[] = {
393 { "ONEBIT", 0x08, 0x08 }, 366 { "ONEBIT", 0x08, 0x08 },
394 { "SELID_MASK", 0xf0, 0xf0 } 367 { "SELID_MASK", 0xf0, 0xf0 }
395}; 368};
@@ -401,21 +374,6 @@ ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
401 0x19, regvalue, cur_col, wrap)); 374 0x19, regvalue, cur_col, wrap));
402} 375}
403 376
404static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
405 { "DFLTTID", 0x10, 0x10 },
406 { "ALTSTIM", 0x20, 0x20 },
407 { "CLRSCAMSELID", 0x40, 0x40 },
408 { "ENSCAMSELO", 0x80, 0x80 },
409 { "SCAMLVL", 0x03, 0x03 }
410};
411
412int
413ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
414{
415 return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
416 0x1a, regvalue, cur_col, wrap));
417}
418
419int 377int
420ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap) 378ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
421{ 379{
@@ -423,7 +381,7 @@ ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
423 0x1b, regvalue, cur_col, wrap)); 381 0x1b, regvalue, cur_col, wrap));
424} 382}
425 383
426static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = { 384static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
427 { "SSPIOCPS", 0x01, 0x01 }, 385 { "SSPIOCPS", 0x01, 0x01 },
428 { "ROM", 0x02, 0x02 }, 386 { "ROM", 0x02, 0x02 },
429 { "EEPROM", 0x04, 0x04 }, 387 { "EEPROM", 0x04, 0x04 },
@@ -441,7 +399,7 @@ ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
441 0x1b, regvalue, cur_col, wrap)); 399 0x1b, regvalue, cur_col, wrap));
442} 400}
443 401
444static ahc_reg_parse_entry_t BRDCTL_parse_table[] = { 402static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
445 { "BRDCTL0", 0x01, 0x01 }, 403 { "BRDCTL0", 0x01, 0x01 },
446 { "BRDSTB_ULTRA2", 0x01, 0x01 }, 404 { "BRDSTB_ULTRA2", 0x01, 0x01 },
447 { "BRDCTL1", 0x02, 0x02 }, 405 { "BRDCTL1", 0x02, 0x02 },
@@ -464,7 +422,7 @@ ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
464 0x1d, regvalue, cur_col, wrap)); 422 0x1d, regvalue, cur_col, wrap));
465} 423}
466 424
467static ahc_reg_parse_entry_t SEECTL_parse_table[] = { 425static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
468 { "SEEDI", 0x01, 0x01 }, 426 { "SEEDI", 0x01, 0x01 },
469 { "SEEDO", 0x02, 0x02 }, 427 { "SEEDO", 0x02, 0x02 },
470 { "SEECK", 0x04, 0x04 }, 428 { "SEECK", 0x04, 0x04 },
@@ -482,7 +440,7 @@ ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
482 0x1e, regvalue, cur_col, wrap)); 440 0x1e, regvalue, cur_col, wrap));
483} 441}
484 442
485static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = { 443static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
486 { "XCVR", 0x01, 0x01 }, 444 { "XCVR", 0x01, 0x01 },
487 { "SELWIDE", 0x02, 0x02 }, 445 { "SELWIDE", 0x02, 0x02 },
488 { "ENAB20", 0x04, 0x04 }, 446 { "ENAB20", 0x04, 0x04 },
@@ -522,13 +480,6 @@ ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
522} 480}
523 481
524int 482int
525ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
526{
527 return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
528 0x34, regvalue, cur_col, wrap));
529}
530
531int
532ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap) 483ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
533{ 484{
534 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL", 485 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
@@ -549,7 +500,7 @@ ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
549 0x3a, regvalue, cur_col, wrap)); 500 0x3a, regvalue, cur_col, wrap));
550} 501}
551 502
552static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = { 503static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
553 { "FIFORESET", 0x01, 0x01 }, 504 { "FIFORESET", 0x01, 0x01 },
554 { "FIFOFLUSH", 0x02, 0x02 }, 505 { "FIFOFLUSH", 0x02, 0x02 },
555 { "DIRECTION", 0x04, 0x04 }, 506 { "DIRECTION", 0x04, 0x04 },
@@ -569,7 +520,7 @@ ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
569 0x3b, regvalue, cur_col, wrap)); 520 0x3b, regvalue, cur_col, wrap));
570} 521}
571 522
572static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 523static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
573 { "NO_DISCONNECT", 0x01, 0x01 }, 524 { "NO_DISCONNECT", 0x01, 0x01 },
574 { "SPHASE_PENDING", 0x02, 0x02 }, 525 { "SPHASE_PENDING", 0x02, 0x02 },
575 { "DPHASE_PENDING", 0x04, 0x04 }, 526 { "DPHASE_PENDING", 0x04, 0x04 },
@@ -602,7 +553,7 @@ ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
602 0x3e, regvalue, cur_col, wrap)); 553 0x3e, regvalue, cur_col, wrap));
603} 554}
604 555
605static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = { 556static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
606 { "MSGI", 0x20, 0x20 }, 557 { "MSGI", 0x20, 0x20 },
607 { "IOI", 0x40, 0x40 }, 558 { "IOI", 0x40, 0x40 },
608 { "CDI", 0x80, 0x80 }, 559 { "CDI", 0x80, 0x80 },
@@ -645,13 +596,6 @@ ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
645} 596}
646 597
647int 598int
648ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
649{
650 return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
651 0x43, regvalue, cur_col, wrap));
652}
653
654int
655ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap) 599ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
656{ 600{
657 return (ahc_print_register(NULL, 0, "HSCB_ADDR", 601 return (ahc_print_register(NULL, 0, "HSCB_ADDR",
@@ -700,7 +644,7 @@ ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
700 0x50, regvalue, cur_col, wrap)); 644 0x50, regvalue, cur_col, wrap));
701} 645}
702 646
703static ahc_reg_parse_entry_t ARG_1_parse_table[] = { 647static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
704 { "CONT_TARG_SESSION", 0x02, 0x02 }, 648 { "CONT_TARG_SESSION", 0x02, 0x02 },
705 { "CONT_MSG_LOOP", 0x04, 0x04 }, 649 { "CONT_MSG_LOOP", 0x04, 0x04 },
706 { "EXIT_MSG_LOOP", 0x08, 0x08 }, 650 { "EXIT_MSG_LOOP", 0x08, 0x08 },
@@ -731,7 +675,7 @@ ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
731 0x53, regvalue, cur_col, wrap)); 675 0x53, regvalue, cur_col, wrap));
732} 676}
733 677
734static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = { 678static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
735 { "ENAUTOATNP", 0x02, 0x02 }, 679 { "ENAUTOATNP", 0x02, 0x02 },
736 { "ENAUTOATNI", 0x04, 0x04 }, 680 { "ENAUTOATNI", 0x04, 0x04 },
737 { "ENAUTOATNO", 0x08, 0x08 }, 681 { "ENAUTOATNO", 0x08, 0x08 },
@@ -747,7 +691,7 @@ ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
747 0x54, regvalue, cur_col, wrap)); 691 0x54, regvalue, cur_col, wrap));
748} 692}
749 693
750static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = { 694static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
751 { "HA_274_EXTENDED_TRANS",0x01, 0x01 } 695 { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
752}; 696};
753 697
@@ -758,7 +702,7 @@ ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
758 0x56, regvalue, cur_col, wrap)); 702 0x56, regvalue, cur_col, wrap));
759} 703}
760 704
761static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 705static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
762 { "SCB_DMA", 0x01, 0x01 }, 706 { "SCB_DMA", 0x01, 0x01 },
763 { "TARGET_MSG_PENDING", 0x02, 0x02 } 707 { "TARGET_MSG_PENDING", 0x02, 0x02 }
764}; 708};
@@ -770,7 +714,7 @@ ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
770 0x57, regvalue, cur_col, wrap)); 714 0x57, regvalue, cur_col, wrap));
771} 715}
772 716
773static ahc_reg_parse_entry_t SCSICONF_parse_table[] = { 717static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
774 { "ENSPCHK", 0x20, 0x20 }, 718 { "ENSPCHK", 0x20, 0x20 },
775 { "RESET_SCSI", 0x40, 0x40 }, 719 { "RESET_SCSI", 0x40, 0x40 },
776 { "TERM_ENB", 0x80, 0x80 }, 720 { "TERM_ENB", 0x80, 0x80 },
@@ -785,7 +729,7 @@ ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
785 0x5a, regvalue, cur_col, wrap)); 729 0x5a, regvalue, cur_col, wrap));
786} 730}
787 731
788static ahc_reg_parse_entry_t INTDEF_parse_table[] = { 732static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
789 { "EDGE_TRIG", 0x80, 0x80 }, 733 { "EDGE_TRIG", 0x80, 0x80 },
790 { "VECTOR", 0x0f, 0x0f } 734 { "VECTOR", 0x0f, 0x0f }
791}; 735};
@@ -804,7 +748,7 @@ ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
804 0x5d, regvalue, cur_col, wrap)); 748 0x5d, regvalue, cur_col, wrap));
805} 749}
806 750
807static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = { 751static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
808 { "CHANNEL_B_PRIMARY", 0x08, 0x08 }, 752 { "CHANNEL_B_PRIMARY", 0x08, 0x08 },
809 { "BIOSMODE", 0x30, 0x30 }, 753 { "BIOSMODE", 0x30, 0x30 },
810 { "BIOSDISABLED", 0x30, 0x30 } 754 { "BIOSDISABLED", 0x30, 0x30 }
@@ -817,7 +761,7 @@ ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
817 0x5f, regvalue, cur_col, wrap)); 761 0x5f, regvalue, cur_col, wrap));
818} 762}
819 763
820static ahc_reg_parse_entry_t SEQCTL_parse_table[] = { 764static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
821 { "LOADRAM", 0x01, 0x01 }, 765 { "LOADRAM", 0x01, 0x01 },
822 { "SEQRESET", 0x02, 0x02 }, 766 { "SEQRESET", 0x02, 0x02 },
823 { "STEP", 0x04, 0x04 }, 767 { "STEP", 0x04, 0x04 },
@@ -849,7 +793,7 @@ ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
849 0x62, regvalue, cur_col, wrap)); 793 0x62, regvalue, cur_col, wrap));
850} 794}
851 795
852static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = { 796static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
853 { "SEQADDR1_MASK", 0x01, 0x01 } 797 { "SEQADDR1_MASK", 0x01, 0x01 }
854}; 798};
855 799
@@ -902,7 +846,7 @@ ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
902 0x6a, regvalue, cur_col, wrap)); 846 0x6a, regvalue, cur_col, wrap));
903} 847}
904 848
905static ahc_reg_parse_entry_t FLAGS_parse_table[] = { 849static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
906 { "CARRY", 0x01, 0x01 }, 850 { "CARRY", 0x01, 0x01 },
907 { "ZERO", 0x02, 0x02 } 851 { "ZERO", 0x02, 0x02 }
908}; 852};
@@ -929,13 +873,6 @@ ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
929} 873}
930 874
931int 875int
932ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
933{
934 return (ahc_print_register(NULL, 0, "FUNCTION1",
935 0x6e, regvalue, cur_col, wrap));
936}
937
938int
939ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap) 876ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
940{ 877{
941 return (ahc_print_register(NULL, 0, "STACK", 878 return (ahc_print_register(NULL, 0, "STACK",
@@ -956,19 +893,7 @@ ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
956 0x70, regvalue, cur_col, wrap)); 893 0x70, regvalue, cur_col, wrap));
957} 894}
958 895
959static ahc_reg_parse_entry_t BCTL_parse_table[] = { 896static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
960 { "ENABLE", 0x01, 0x01 },
961 { "ACE", 0x08, 0x08 }
962};
963
964int
965ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
966{
967 return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
968 0x84, regvalue, cur_col, wrap));
969}
970
971static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
972 { "CIOPARCKEN", 0x01, 0x01 }, 897 { "CIOPARCKEN", 0x01, 0x01 },
973 { "USCBSIZE32", 0x02, 0x02 }, 898 { "USCBSIZE32", 0x02, 0x02 },
974 { "RAMPS", 0x04, 0x04 }, 899 { "RAMPS", 0x04, 0x04 },
@@ -986,7 +911,7 @@ ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
986 0x84, regvalue, cur_col, wrap)); 911 0x84, regvalue, cur_col, wrap));
987} 912}
988 913
989static ahc_reg_parse_entry_t BUSTIME_parse_table[] = { 914static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
990 { "BON", 0x0f, 0x0f }, 915 { "BON", 0x0f, 0x0f },
991 { "BOFF", 0xf0, 0xf0 } 916 { "BOFF", 0xf0, 0xf0 }
992}; 917};
@@ -998,7 +923,7 @@ ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
998 0x85, regvalue, cur_col, wrap)); 923 0x85, regvalue, cur_col, wrap));
999} 924}
1000 925
1001static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = { 926static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
1002 { "HADDLDSEL0", 0x01, 0x01 }, 927 { "HADDLDSEL0", 0x01, 0x01 },
1003 { "HADDLDSEL1", 0x02, 0x02 }, 928 { "HADDLDSEL1", 0x02, 0x02 },
1004 { "DSLATT", 0xfc, 0xfc } 929 { "DSLATT", 0xfc, 0xfc }
@@ -1011,7 +936,7 @@ ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1011 0x85, regvalue, cur_col, wrap)); 936 0x85, regvalue, cur_col, wrap));
1012} 937}
1013 938
1014static ahc_reg_parse_entry_t BUSSPD_parse_table[] = { 939static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
1015 { "STBON", 0x07, 0x07 }, 940 { "STBON", 0x07, 0x07 },
1016 { "STBOFF", 0x38, 0x38 }, 941 { "STBOFF", 0x38, 0x38 },
1017 { "DFTHRSH_75", 0x80, 0x80 }, 942 { "DFTHRSH_75", 0x80, 0x80 },
@@ -1026,7 +951,7 @@ ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
1026 0x86, regvalue, cur_col, wrap)); 951 0x86, regvalue, cur_col, wrap));
1027} 952}
1028 953
1029static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 954static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
1030 { "SEQ_MAILBOX", 0x0f, 0x0f }, 955 { "SEQ_MAILBOX", 0x0f, 0x0f },
1031 { "HOST_TQINPOS", 0x80, 0x80 }, 956 { "HOST_TQINPOS", 0x80, 0x80 },
1032 { "HOST_MAILBOX", 0xf0, 0xf0 } 957 { "HOST_MAILBOX", 0xf0, 0xf0 }
@@ -1039,7 +964,7 @@ ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
1039 0x86, regvalue, cur_col, wrap)); 964 0x86, regvalue, cur_col, wrap));
1040} 965}
1041 966
1042static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = { 967static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
1043 { "DFTHRSH_100", 0xc0, 0xc0 } 968 { "DFTHRSH_100", 0xc0, 0xc0 }
1044}; 969};
1045 970
@@ -1050,7 +975,7 @@ ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
1050 0x86, regvalue, cur_col, wrap)); 975 0x86, regvalue, cur_col, wrap));
1051} 976}
1052 977
1053static ahc_reg_parse_entry_t HCNTRL_parse_table[] = { 978static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
1054 { "CHIPRST", 0x01, 0x01 }, 979 { "CHIPRST", 0x01, 0x01 },
1055 { "CHIPRSTACK", 0x01, 0x01 }, 980 { "CHIPRSTACK", 0x01, 0x01 },
1056 { "INTEN", 0x02, 0x02 }, 981 { "INTEN", 0x02, 0x02 },
@@ -1088,7 +1013,7 @@ ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1088 0x90, regvalue, cur_col, wrap)); 1013 0x90, regvalue, cur_col, wrap));
1089} 1014}
1090 1015
1091static ahc_reg_parse_entry_t INTSTAT_parse_table[] = { 1016static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
1092 { "SEQINT", 0x01, 0x01 }, 1017 { "SEQINT", 0x01, 0x01 },
1093 { "CMDCMPLT", 0x02, 0x02 }, 1018 { "CMDCMPLT", 0x02, 0x02 },
1094 { "SCSIINT", 0x04, 0x04 }, 1019 { "SCSIINT", 0x04, 0x04 },
@@ -1119,7 +1044,7 @@ ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1119 0x91, regvalue, cur_col, wrap)); 1044 0x91, regvalue, cur_col, wrap));
1120} 1045}
1121 1046
1122static ahc_reg_parse_entry_t CLRINT_parse_table[] = { 1047static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
1123 { "CLRSEQINT", 0x01, 0x01 }, 1048 { "CLRSEQINT", 0x01, 0x01 },
1124 { "CLRCMDINT", 0x02, 0x02 }, 1049 { "CLRCMDINT", 0x02, 0x02 },
1125 { "CLRSCSIINT", 0x04, 0x04 }, 1050 { "CLRSCSIINT", 0x04, 0x04 },
@@ -1134,7 +1059,7 @@ ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
1134 0x92, regvalue, cur_col, wrap)); 1059 0x92, regvalue, cur_col, wrap));
1135} 1060}
1136 1061
1137static ahc_reg_parse_entry_t ERROR_parse_table[] = { 1062static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
1138 { "ILLHADDR", 0x01, 0x01 }, 1063 { "ILLHADDR", 0x01, 0x01 },
1139 { "ILLSADDR", 0x02, 0x02 }, 1064 { "ILLSADDR", 0x02, 0x02 },
1140 { "ILLOPCODE", 0x04, 0x04 }, 1065 { "ILLOPCODE", 0x04, 0x04 },
@@ -1152,7 +1077,7 @@ ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
1152 0x92, regvalue, cur_col, wrap)); 1077 0x92, regvalue, cur_col, wrap));
1153} 1078}
1154 1079
1155static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = { 1080static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
1156 { "FIFORESET", 0x01, 0x01 }, 1081 { "FIFORESET", 0x01, 0x01 },
1157 { "FIFOFLUSH", 0x02, 0x02 }, 1082 { "FIFOFLUSH", 0x02, 0x02 },
1158 { "DIRECTION", 0x04, 0x04 }, 1083 { "DIRECTION", 0x04, 0x04 },
@@ -1172,7 +1097,7 @@ ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1172 0x93, regvalue, cur_col, wrap)); 1097 0x93, regvalue, cur_col, wrap));
1173} 1098}
1174 1099
1175static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = { 1100static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
1176 { "FIFOEMP", 0x01, 0x01 }, 1101 { "FIFOEMP", 0x01, 0x01 },
1177 { "FIFOFULL", 0x02, 0x02 }, 1102 { "FIFOFULL", 0x02, 0x02 },
1178 { "DFTHRESH", 0x04, 0x04 }, 1103 { "DFTHRESH", 0x04, 0x04 },
@@ -1198,20 +1123,13 @@ ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1198} 1123}
1199 1124
1200int 1125int
1201ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1202{
1203 return (ahc_print_register(NULL, 0, "DFRADDR",
1204 0x97, regvalue, cur_col, wrap));
1205}
1206
1207int
1208ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1126ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1209{ 1127{
1210 return (ahc_print_register(NULL, 0, "DFDAT", 1128 return (ahc_print_register(NULL, 0, "DFDAT",
1211 0x99, regvalue, cur_col, wrap)); 1129 0x99, regvalue, cur_col, wrap));
1212} 1130}
1213 1131
1214static ahc_reg_parse_entry_t SCBCNT_parse_table[] = { 1132static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
1215 { "SCBAUTO", 0x80, 0x80 }, 1133 { "SCBAUTO", 0x80, 0x80 },
1216 { "SCBCNT_MASK", 0x1f, 0x1f } 1134 { "SCBCNT_MASK", 0x1f, 0x1f }
1217}; 1135};
@@ -1231,20 +1149,13 @@ ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1231} 1149}
1232 1150
1233int 1151int
1234ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1235{
1236 return (ahc_print_register(NULL, 0, "QINCNT",
1237 0x9c, regvalue, cur_col, wrap));
1238}
1239
1240int
1241ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap) 1152ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1242{ 1153{
1243 return (ahc_print_register(NULL, 0, "QOUTFIFO", 1154 return (ahc_print_register(NULL, 0, "QOUTFIFO",
1244 0x9d, regvalue, cur_col, wrap)); 1155 0x9d, regvalue, cur_col, wrap));
1245} 1156}
1246 1157
1247static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = { 1158static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
1248 { "TARGCRCCNTEN", 0x04, 0x04 }, 1159 { "TARGCRCCNTEN", 0x04, 0x04 },
1249 { "TARGCRCENDEN", 0x08, 0x08 }, 1160 { "TARGCRCENDEN", 0x08, 0x08 },
1250 { "CRCREQCHKEN", 0x10, 0x10 }, 1161 { "CRCREQCHKEN", 0x10, 0x10 },
@@ -1260,14 +1171,7 @@ ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1260 0x9d, regvalue, cur_col, wrap)); 1171 0x9d, regvalue, cur_col, wrap));
1261} 1172}
1262 1173
1263int 1174static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1264ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265{
1266 return (ahc_print_register(NULL, 0, "QOUTCNT",
1267 0x9e, regvalue, cur_col, wrap));
1268}
1269
1270static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1271 { "DATA_OUT_PHASE", 0x01, 0x01 }, 1175 { "DATA_OUT_PHASE", 0x01, 0x01 },
1272 { "DATA_IN_PHASE", 0x02, 0x02 }, 1176 { "DATA_IN_PHASE", 0x02, 0x02 },
1273 { "MSG_OUT_PHASE", 0x04, 0x04 }, 1177 { "MSG_OUT_PHASE", 0x04, 0x04 },
@@ -1284,7 +1188,7 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
1284 0x9e, regvalue, cur_col, wrap)); 1188 0x9e, regvalue, cur_col, wrap));
1285} 1189}
1286 1190
1287static ahc_reg_parse_entry_t SFUNCT_parse_table[] = { 1191static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
1288 { "ALT_MODE", 0x80, 0x80 } 1192 { "ALT_MODE", 0x80, 0x80 }
1289}; 1193};
1290 1194
@@ -1351,7 +1255,7 @@ ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1351 0xac, regvalue, cur_col, wrap)); 1255 0xac, regvalue, cur_col, wrap));
1352} 1256}
1353 1257
1354static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = { 1258static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
1355 { "SG_LAST_SEG", 0x80, 0x80 }, 1259 { "SG_LAST_SEG", 0x80, 0x80 },
1356 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f } 1260 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
1357}; 1261};
@@ -1363,7 +1267,7 @@ ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1363 0xb0, regvalue, cur_col, wrap)); 1267 0xb0, regvalue, cur_col, wrap));
1364} 1268}
1365 1269
1366static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = { 1270static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
1367 { "SG_LIST_NULL", 0x01, 0x01 }, 1271 { "SG_LIST_NULL", 0x01, 0x01 },
1368 { "SG_FULL_RESID", 0x02, 0x02 }, 1272 { "SG_FULL_RESID", 0x02, 0x02 },
1369 { "SG_RESID_VALID", 0x04, 0x04 } 1273 { "SG_RESID_VALID", 0x04, 0x04 }
@@ -1376,7 +1280,7 @@ ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1376 0xb4, regvalue, cur_col, wrap)); 1280 0xb4, regvalue, cur_col, wrap));
1377} 1281}
1378 1282
1379static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 1283static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
1380 { "DISCONNECTED", 0x04, 0x04 }, 1284 { "DISCONNECTED", 0x04, 0x04 },
1381 { "ULTRAENB", 0x08, 0x08 }, 1285 { "ULTRAENB", 0x08, 0x08 },
1382 { "MK_MESSAGE", 0x10, 0x10 }, 1286 { "MK_MESSAGE", 0x10, 0x10 },
@@ -1394,7 +1298,7 @@ ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
1394 0xb8, regvalue, cur_col, wrap)); 1298 0xb8, regvalue, cur_col, wrap));
1395} 1299}
1396 1300
1397static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = { 1301static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
1398 { "TWIN_CHNLB", 0x80, 0x80 }, 1302 { "TWIN_CHNLB", 0x80, 0x80 },
1399 { "OID", 0x0f, 0x0f }, 1303 { "OID", 0x0f, 0x0f },
1400 { "TWIN_TID", 0x70, 0x70 }, 1304 { "TWIN_TID", 0x70, 0x70 },
@@ -1408,7 +1312,7 @@ ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1408 0xb9, regvalue, cur_col, wrap)); 1312 0xb9, regvalue, cur_col, wrap));
1409} 1313}
1410 1314
1411static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = { 1315static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
1412 { "SCB_XFERLEN_ODD", 0x80, 0x80 }, 1316 { "SCB_XFERLEN_ODD", 0x80, 0x80 },
1413 { "LID", 0x3f, 0x3f } 1317 { "LID", 0x3f, 0x3f }
1414}; 1318};
@@ -1455,14 +1359,7 @@ ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
1455 0xbf, regvalue, cur_col, wrap)); 1359 0xbf, regvalue, cur_col, wrap));
1456} 1360}
1457 1361
1458int 1362static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1459ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
1460{
1461 return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
1462 0xc0, regvalue, cur_col, wrap));
1463}
1464
1465static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1466 { "DO_2840", 0x01, 0x01 }, 1363 { "DO_2840", 0x01, 0x01 },
1467 { "CK_2840", 0x02, 0x02 }, 1364 { "CK_2840", 0x02, 0x02 },
1468 { "CS_2840", 0x04, 0x04 } 1365 { "CS_2840", 0x04, 0x04 }
@@ -1475,7 +1372,7 @@ ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1475 0xc0, regvalue, cur_col, wrap)); 1372 0xc0, regvalue, cur_col, wrap));
1476} 1373}
1477 1374
1478static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = { 1375static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
1479 { "DI_2840", 0x01, 0x01 }, 1376 { "DI_2840", 0x01, 0x01 },
1480 { "EEPROM_TF", 0x80, 0x80 }, 1377 { "EEPROM_TF", 0x80, 0x80 },
1481 { "ADSEL", 0x1e, 0x1e }, 1378 { "ADSEL", 0x1e, 0x1e },
@@ -1524,7 +1421,7 @@ ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1524 0xea, regvalue, cur_col, wrap)); 1421 0xea, regvalue, cur_col, wrap));
1525} 1422}
1526 1423
1527static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = { 1424static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
1528 { "CCSGRESET", 0x01, 0x01 }, 1425 { "CCSGRESET", 0x01, 0x01 },
1529 { "SG_FETCH_NEEDED", 0x02, 0x02 }, 1426 { "SG_FETCH_NEEDED", 0x02, 0x02 },
1530 { "CCSGEN", 0x08, 0x08 }, 1427 { "CCSGEN", 0x08, 0x08 },
@@ -1552,7 +1449,7 @@ ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1552 0xed, regvalue, cur_col, wrap)); 1449 0xed, regvalue, cur_col, wrap));
1553} 1450}
1554 1451
1555static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = { 1452static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1556 { "CCSCBRESET", 0x01, 0x01 }, 1453 { "CCSCBRESET", 0x01, 0x01 },
1557 { "CCSCBDIR", 0x04, 0x04 }, 1454 { "CCSCBDIR", 0x04, 0x04 },
1558 { "CCSCBEN", 0x08, 0x08 }, 1455 { "CCSCBEN", 0x08, 0x08 },
@@ -1610,7 +1507,7 @@ ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1610 0xf8, regvalue, cur_col, wrap)); 1507 0xf8, regvalue, cur_col, wrap));
1611} 1508}
1612 1509
1613static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = { 1510static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
1614 { "SDSCB_ROLLOVER", 0x10, 0x10 }, 1511 { "SDSCB_ROLLOVER", 0x10, 0x10 },
1615 { "SNSCB_ROLLOVER", 0x20, 0x20 }, 1512 { "SNSCB_ROLLOVER", 0x20, 0x20 },
1616 { "SCB_AVAIL", 0x40, 0x40 }, 1513 { "SCB_AVAIL", 0x40, 0x40 },
@@ -1625,7 +1522,7 @@ ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
1625 0xfa, regvalue, cur_col, wrap)); 1522 0xfa, regvalue, cur_col, wrap));
1626} 1523}
1627 1524
1628static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = { 1525static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1629 { "RD_DFTHRSH_MIN", 0x00, 0x00 }, 1526 { "RD_DFTHRSH_MIN", 0x00, 0x00 },
1630 { "WR_DFTHRSH_MIN", 0x00, 0x00 }, 1527 { "WR_DFTHRSH_MIN", 0x00, 0x00 },
1631 { "RD_DFTHRSH_25", 0x01, 0x01 }, 1528 { "RD_DFTHRSH_25", 0x01, 0x01 },
@@ -1653,7 +1550,7 @@ ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1653 0xfb, regvalue, cur_col, wrap)); 1550 0xfb, regvalue, cur_col, wrap));
1654} 1551}
1655 1552
1656static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = { 1553static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
1657 { "LAST_SEG_DONE", 0x01, 0x01 }, 1554 { "LAST_SEG_DONE", 0x01, 0x01 },
1658 { "LAST_SEG", 0x02, 0x02 }, 1555 { "LAST_SEG", 0x02, 0x02 },
1659 { "SG_ADDR_MASK", 0xf8, 0xf8 } 1556 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -1666,7 +1563,7 @@ ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
1666 0xfc, regvalue, cur_col, wrap)); 1563 0xfc, regvalue, cur_col, wrap));
1667} 1564}
1668 1565
1669static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = { 1566static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
1670 { "LAST_SEG_DONE", 0x01, 0x01 }, 1567 { "LAST_SEG_DONE", 0x01, 0x01 },
1671 { "LAST_SEG", 0x02, 0x02 }, 1568 { "LAST_SEG", 0x02, 0x02 },
1672 { "SG_ADDR_MASK", 0xf8, 0xf8 } 1569 { "SG_ADDR_MASK", 0xf8, 0xf8 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index 4cee08521e75..07e93fbae706 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -5,7 +5,7 @@
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static const uint8_t seqprog[] = {
9 0xb2, 0x00, 0x00, 0x08, 9 0xb2, 0x00, 0x00, 0x08,
10 0xf7, 0x11, 0x22, 0x08, 10 0xf7, 0x11, 0x22, 0x08,
11 0x00, 0x65, 0xee, 0x59, 11 0x00, 0x65, 0xee, 0x59,
@@ -1081,7 +1081,7 @@ ahc_patch0_func(struct ahc_softc *ahc)
1081 return (0); 1081 return (0);
1082} 1082}
1083 1083
1084static struct patch { 1084static const struct patch {
1085 ahc_patch_func_t *patch_func; 1085 ahc_patch_func_t *patch_func;
1086 uint32_t begin :10, 1086 uint32_t begin :10,
1087 skip_instr :10, 1087 skip_instr :10,
@@ -1291,7 +1291,7 @@ static struct patch {
1291 { ahc_patch4_func, 865, 12, 1 } 1291 { ahc_patch4_func, 865, 12, 1 }
1292}; 1292};
1293 1293
1294static struct cs { 1294static const struct cs {
1295 uint16_t begin; 1295 uint16_t begin;
1296 uint16_t end; 1296 uint16_t end;
1297} critical_sections[] = { 1297} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 924102720b14..e4a778720301 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -362,7 +362,7 @@ output_code()
362" *\n" 362" *\n"
363"%s */\n", versions); 363"%s */\n", versions);
364 364
365 fprintf(ofile, "static uint8_t seqprog[] = {\n"); 365 fprintf(ofile, "static const uint8_t seqprog[] = {\n");
366 for (cur_instr = STAILQ_FIRST(&seq_program); 366 for (cur_instr = STAILQ_FIRST(&seq_program);
367 cur_instr != NULL; 367 cur_instr != NULL;
368 cur_instr = STAILQ_NEXT(cur_instr, links)) { 368 cur_instr = STAILQ_NEXT(cur_instr, links)) {
@@ -415,7 +415,7 @@ output_code()
415 } 415 }
416 416
417 fprintf(ofile, 417 fprintf(ofile,
418"static struct patch {\n" 418"static const struct patch {\n"
419" %spatch_func_t *patch_func;\n" 419" %spatch_func_t *patch_func;\n"
420" uint32_t begin :10,\n" 420" uint32_t begin :10,\n"
421" skip_instr :10,\n" 421" skip_instr :10,\n"
@@ -435,7 +435,7 @@ output_code()
435 fprintf(ofile, "\n};\n\n"); 435 fprintf(ofile, "\n};\n\n");
436 436
437 fprintf(ofile, 437 fprintf(ofile,
438"static struct cs {\n" 438"static const struct cs {\n"
439" uint16_t begin;\n" 439" uint16_t begin;\n"
440" uint16_t end;\n" 440" uint16_t end;\n"
441"} critical_sections[] = {\n"); 441"} critical_sections[] = {\n");
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 702e2dbd11fb..81be6a261cc8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -101,11 +101,12 @@ static void format_3_instr(int opcode, symbol_ref_t *src,
101 expression_t *immed, symbol_ref_t *address); 101 expression_t *immed, symbol_ref_t *address);
102static void test_readable_symbol(symbol_t *symbol); 102static void test_readable_symbol(symbol_t *symbol);
103static void test_writable_symbol(symbol_t *symbol); 103static void test_writable_symbol(symbol_t *symbol);
104static void type_check(symbol_t *symbol, expression_t *expression, int and_op); 104static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
105static void make_expression(expression_t *immed, int value); 105static void make_expression(expression_t *immed, int value);
106static void add_conditional(symbol_t *symbol); 106static void add_conditional(symbol_t *symbol);
107static void add_version(const char *verstring); 107static void add_version(const char *verstring);
108static int is_download_const(expression_t *immed); 108static int is_download_const(expression_t *immed);
109static int is_location_address(symbol_t *symbol);
109void yyerror(const char *string); 110void yyerror(const char *string);
110 111
111#define SRAM_SYMNAME "SRAM_BASE" 112#define SRAM_SYMNAME "SRAM_BASE"
@@ -142,6 +143,8 @@ void yyerror(const char *string);
142 143
143%token <value> T_ADDRESS 144%token <value> T_ADDRESS
144 145
146%token T_COUNT
147
145%token T_ACCESS_MODE 148%token T_ACCESS_MODE
146 149
147%token T_MODES 150%token T_MODES
@@ -192,10 +195,10 @@ void yyerror(const char *string);
192 195
193%token <value> T_OR 196%token <value> T_OR
194 197
195/* 16 bit extensions */ 198/* 16 bit extensions, not implemented
196%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16 199 * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
197%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG 200 * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
198 201 */
199%token T_RET 202%token T_RET
200 203
201%token T_NOP 204%token T_NOP
@@ -214,7 +217,7 @@ void yyerror(const char *string);
214 217
215%type <expression> expression immediate immediate_or_a 218%type <expression> expression immediate immediate_or_a
216 219
217%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne 220%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
218 221
219%type <value> mode_value mode_list macro_arglist 222%type <value> mode_value mode_list macro_arglist
220 223
@@ -313,13 +316,13 @@ reg_definition:
313 stop("Register multiply defined", EX_DATAERR); 316 stop("Register multiply defined", EX_DATAERR);
314 /* NOTREACHED */ 317 /* NOTREACHED */
315 } 318 }
316 cur_symbol = $1; 319 cur_symbol = $1;
317 cur_symbol->type = cur_symtype; 320 cur_symbol->type = cur_symtype;
318 initialize_symbol(cur_symbol); 321 initialize_symbol(cur_symbol);
319 } 322 }
320 reg_attribute_list 323 reg_attribute_list
321 '}' 324 '}'
322 { 325 {
323 /* 326 /*
324 * Default to allowing everything in for registers 327 * Default to allowing everything in for registers
325 * with no bit or mask definitions. 328 * with no bit or mask definitions.
@@ -349,9 +352,10 @@ reg_attribute_list:
349| reg_attribute_list reg_attribute 352| reg_attribute_list reg_attribute
350; 353;
351 354
352reg_attribute: 355reg_attribute:
353 reg_address 356 reg_address
354| size 357| size
358| count
355| access_mode 359| access_mode
356| modes 360| modes
357| field_defn 361| field_defn
@@ -392,6 +396,13 @@ size:
392 } 396 }
393; 397;
394 398
399count:
400 T_COUNT T_NUMBER
401 {
402 cur_symbol->count += $2;
403 }
404;
405
395access_mode: 406access_mode:
396 T_ACCESS_MODE T_MODE 407 T_ACCESS_MODE T_MODE
397 { 408 {
@@ -641,14 +652,14 @@ expression:
641 &($1.referenced_syms), 652 &($1.referenced_syms),
642 &($3.referenced_syms)); 653 &($3.referenced_syms));
643 } 654 }
644| expression T_EXPR_LSHIFT expression 655| expression T_EXPR_LSHIFT expression
645 { 656 {
646 $$.value = $1.value << $3.value; 657 $$.value = $1.value << $3.value;
647 symlist_merge(&$$.referenced_syms, 658 symlist_merge(&$$.referenced_syms,
648 &$1.referenced_syms, 659 &$1.referenced_syms,
649 &$3.referenced_syms); 660 &$3.referenced_syms);
650 } 661 }
651| expression T_EXPR_RSHIFT expression 662| expression T_EXPR_RSHIFT expression
652 { 663 {
653 $$.value = $1.value >> $3.value; 664 $$.value = $1.value >> $3.value;
654 symlist_merge(&$$.referenced_syms, 665 symlist_merge(&$$.referenced_syms,
@@ -714,7 +725,7 @@ expression:
714; 725;
715 726
716constant: 727constant:
717 T_CONST T_SYMBOL expression 728 T_CONST T_SYMBOL expression
718 { 729 {
719 if ($2->type != UNINITIALIZED) { 730 if ($2->type != UNINITIALIZED) {
720 stop("Re-definition of symbol as a constant", 731 stop("Re-definition of symbol as a constant",
@@ -800,6 +811,7 @@ scratch_ram:
800 cur_symtype = SRAMLOC; 811 cur_symtype = SRAMLOC;
801 cur_symbol->type = SRAMLOC; 812 cur_symbol->type = SRAMLOC;
802 initialize_symbol(cur_symbol); 813 initialize_symbol(cur_symbol);
814 cur_symbol->count += 1;
803 } 815 }
804 reg_address 816 reg_address
805 { 817 {
@@ -831,6 +843,7 @@ scb:
831 initialize_symbol(cur_symbol); 843 initialize_symbol(cur_symbol);
832 /* 64 bytes of SCB space */ 844 /* 64 bytes of SCB space */
833 cur_symbol->info.rinfo->size = 64; 845 cur_symbol->info.rinfo->size = 64;
846 cur_symbol->count += 1;
834 } 847 }
835 reg_address 848 reg_address
836 { 849 {
@@ -1311,14 +1324,18 @@ f2_opcode:
1311| T_ROR { $$ = AIC_OP_ROR; } 1324| T_ROR { $$ = AIC_OP_ROR; }
1312; 1325;
1313 1326
1314f4_opcode: 1327/*
1315 T_OR16 { $$ = AIC_OP_OR16; } 1328 * 16bit opcodes, not used
1316| T_AND16 { $$ = AIC_OP_AND16; } 1329 *
1317| T_XOR16 { $$ = AIC_OP_XOR16; } 1330 *f4_opcode:
1318| T_ADD16 { $$ = AIC_OP_ADD16; } 1331 * T_OR16 { $$ = AIC_OP_OR16; }
1319| T_ADC16 { $$ = AIC_OP_ADC16; } 1332 *| T_AND16 { $$ = AIC_OP_AND16; }
1320| T_MVI16 { $$ = AIC_OP_MVI16; } 1333 *| T_XOR16 { $$ = AIC_OP_XOR16; }
1321; 1334 *| T_ADD16 { $$ = AIC_OP_ADD16; }
1335 *| T_ADC16 { $$ = AIC_OP_ADC16; }
1336 *| T_MVI16 { $$ = AIC_OP_MVI16; }
1337 *;
1338 */
1322 1339
1323code: 1340code:
1324 f2_opcode destination ',' expression opt_source ret ';' 1341 f2_opcode destination ',' expression opt_source ret ';'
@@ -1357,6 +1374,7 @@ code:
1357code: 1374code:
1358 T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';' 1375 T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
1359 { 1376 {
1377 type_check(&$2, &$4, AIC_OP_OR);
1360 format_3_instr($5, &$2, &$4, &$6); 1378 format_3_instr($5, &$2, &$4, &$6);
1361 } 1379 }
1362; 1380;
@@ -1528,7 +1546,7 @@ initialize_symbol(symbol_t *symbol)
1528 sizeof(struct cond_info)); 1546 sizeof(struct cond_info));
1529 break; 1547 break;
1530 case MACRO: 1548 case MACRO:
1531 symbol->info.macroinfo = 1549 symbol->info.macroinfo =
1532 (struct macro_info *)malloc(sizeof(struct macro_info)); 1550 (struct macro_info *)malloc(sizeof(struct macro_info));
1533 if (symbol->info.macroinfo == NULL) { 1551 if (symbol->info.macroinfo == NULL) {
1534 stop("Can't create macro info", EX_SOFTWARE); 1552 stop("Can't create macro info", EX_SOFTWARE);
@@ -1552,7 +1570,6 @@ add_macro_arg(const char *argtext, int argnum)
1552 struct macro_arg *marg; 1570 struct macro_arg *marg;
1553 int i; 1571 int i;
1554 int retval; 1572 int retval;
1555
1556 1573
1557 if (cur_symbol == NULL || cur_symbol->type != MACRO) { 1574 if (cur_symbol == NULL || cur_symbol->type != MACRO) {
1558 stop("Invalid current symbol for adding macro arg", 1575 stop("Invalid current symbol for adding macro arg",
@@ -1633,8 +1650,10 @@ format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
1633 test_writable_symbol(dest->symbol); 1650 test_writable_symbol(dest->symbol);
1634 test_readable_symbol(src->symbol); 1651 test_readable_symbol(src->symbol);
1635 1652
1636 /* Ensure that immediate makes sense for this destination */ 1653 if (!is_location_address(dest->symbol)) {
1637 type_check(dest->symbol, immed, opcode); 1654 /* Ensure that immediate makes sense for this destination */
1655 type_check(dest, immed, opcode);
1656 }
1638 1657
1639 /* Allocate sequencer space for the instruction and fill it out */ 1658 /* Allocate sequencer space for the instruction and fill it out */
1640 instr = seq_alloc(); 1659 instr = seq_alloc();
@@ -1766,9 +1785,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
1766 /* Test register permissions */ 1785 /* Test register permissions */
1767 test_readable_symbol(src->symbol); 1786 test_readable_symbol(src->symbol);
1768 1787
1769 /* Ensure that immediate makes sense for this source */
1770 type_check(src->symbol, immed, opcode);
1771
1772 /* Allocate sequencer space for the instruction and fill it out */ 1788 /* Allocate sequencer space for the instruction and fill it out */
1773 instr = seq_alloc(); 1789 instr = seq_alloc();
1774 f3_instr = &instr->format.format3; 1790 f3_instr = &instr->format.format3;
@@ -1797,7 +1813,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
1797static void 1813static void
1798test_readable_symbol(symbol_t *symbol) 1814test_readable_symbol(symbol_t *symbol)
1799{ 1815{
1800
1801 if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) { 1816 if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
1802 snprintf(errbuf, sizeof(errbuf), 1817 snprintf(errbuf, sizeof(errbuf),
1803 "Register %s unavailable in source reg mode %d", 1818 "Register %s unavailable in source reg mode %d",
@@ -1815,7 +1830,6 @@ test_readable_symbol(symbol_t *symbol)
1815static void 1830static void
1816test_writable_symbol(symbol_t *symbol) 1831test_writable_symbol(symbol_t *symbol)
1817{ 1832{
1818
1819 if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) { 1833 if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
1820 snprintf(errbuf, sizeof(errbuf), 1834 snprintf(errbuf, sizeof(errbuf),
1821 "Register %s unavailable in destination reg mode %d", 1835 "Register %s unavailable in destination reg mode %d",
@@ -1831,25 +1845,34 @@ test_writable_symbol(symbol_t *symbol)
1831} 1845}
1832 1846
1833static void 1847static void
1834type_check(symbol_t *symbol, expression_t *expression, int opcode) 1848type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
1835{ 1849{
1850 symbol_t *symbol = sym->symbol;
1836 symbol_node_t *node; 1851 symbol_node_t *node;
1837 int and_op; 1852 int and_op;
1853 int8_t value, mask;
1838 1854
1839 and_op = FALSE; 1855 and_op = FALSE;
1840 if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
1841 and_op = TRUE;
1842
1843 /* 1856 /*
1844 * Make sure that we aren't attempting to write something 1857 * Make sure that we aren't attempting to write something
1845 * that hasn't been defined. If this is an and operation, 1858 * that hasn't been defined. If this is an and operation,
1846 * this is a mask, so "undefined" bits are okay. 1859 * this is a mask, so "undefined" bits are okay.
1847 */ 1860 */
1848 if (and_op == FALSE 1861 if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
1849 && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) { 1862 opcode == AIC_OP_JZ || opcode == AIC_OP_JNE ||
1863 opcode == AIC_OP_BMOV)
1864 and_op = TRUE;
1865
1866 /*
1867 * Defaulting to 8 bit logic
1868 */
1869 mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
1870 value = (int8_t)expression->value;
1871
1872 if (and_op == FALSE && (mask & value) != 0 ) {
1850 snprintf(errbuf, sizeof(errbuf), 1873 snprintf(errbuf, sizeof(errbuf),
1851 "Invalid bit(s) 0x%x in immediate written to %s", 1874 "Invalid bit(s) 0x%x in immediate written to %s",
1852 expression->value & ~symbol->info.rinfo->valid_bitmask, 1875 (mask & value),
1853 symbol->name); 1876 symbol->name);
1854 stop(errbuf, EX_DATAERR); 1877 stop(errbuf, EX_DATAERR);
1855 /* NOTREACHED */ 1878 /* NOTREACHED */
@@ -1959,3 +1982,13 @@ is_download_const(expression_t *immed)
1959 1982
1960 return (FALSE); 1983 return (FALSE);
1961} 1984}
1985
1986static int
1987is_location_address(symbol_t *sym)
1988{
1989 if (sym->type == SCBLOC ||
1990 sym->type == SRAMLOC)
1991 return (TRUE);
1992 return (FALSE);
1993}
1994
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 7c3983f868a9..2c7f02daf88d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -162,6 +162,7 @@ register { return T_REGISTER; }
162const { yylval.value = FALSE; return T_CONST; } 162const { yylval.value = FALSE; return T_CONST; }
163download { return T_DOWNLOAD; } 163download { return T_DOWNLOAD; }
164address { return T_ADDRESS; } 164address { return T_ADDRESS; }
165count { return T_COUNT; }
165access_mode { return T_ACCESS_MODE; } 166access_mode { return T_ACCESS_MODE; }
166modes { return T_MODES; } 167modes { return T_MODES; }
167RW|RO|WO { 168RW|RO|WO {
@@ -228,15 +229,15 @@ ret { return T_RET; }
228nop { return T_NOP; } 229nop { return T_NOP; }
229 230
230 /* ARP2 16bit extensions */ 231 /* ARP2 16bit extensions */
231or16 { return T_OR16; } 232 /* or16 { return T_OR16; } */
232and16 { return T_AND16; } 233 /* and16 { return T_AND16; }*/
233xor16 { return T_XOR16; } 234 /* xor16 { return T_XOR16; }*/
234add16 { return T_ADD16; } 235 /* add16 { return T_ADD16; }*/
235adc16 { return T_ADC16; } 236 /* adc16 { return T_ADC16; }*/
236mvi16 { return T_MVI16; } 237 /* mvi16 { return T_MVI16; }*/
237test16 { return T_TEST16; } 238 /* test16 { return T_TEST16; }*/
238cmp16 { return T_CMP16; } 239 /* cmp16 { return T_CMP16; }*/
239cmpxchg { return T_CMPXCHG; } 240 /* cmpxchg { return T_CMPXCHG; }*/
240 241
241 /* Allowed Symbols */ 242 /* Allowed Symbols */
242\<\< { return T_EXPR_LSHIFT; } 243\<\< { return T_EXPR_LSHIFT; }
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index f1f448dff569..fcd357872b43 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -77,6 +77,7 @@ symbol_create(char *name)
77 if (new_symbol->name == NULL) 77 if (new_symbol->name == NULL)
78 stop("Unable to strdup symbol name", EX_SOFTWARE); 78 stop("Unable to strdup symbol name", EX_SOFTWARE);
79 new_symbol->type = UNINITIALIZED; 79 new_symbol->type = UNINITIALIZED;
80 new_symbol->count = 1;
80 return (new_symbol); 81 return (new_symbol);
81} 82}
82 83
@@ -198,6 +199,12 @@ symtable_get(char *name)
198 } 199 }
199 } 200 }
200 memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); 201 memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
202 stored_ptr->count++;
203 data.data = &stored_ptr;
204 if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
205 perror("Symtable put failed");
206 exit(EX_SOFTWARE);
207 }
201 return (stored_ptr); 208 return (stored_ptr);
202} 209}
203 210
@@ -256,7 +263,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
256 && (curnode->symbol->info.finfo->value > 263 && (curnode->symbol->info.finfo->value >
257 newnode->symbol->info.finfo->value)))) 264 newnode->symbol->info.finfo->value))))
258 || (!field && (curnode->symbol->info.rinfo->address > 265 || (!field && (curnode->symbol->info.rinfo->address >
259 newnode->symbol->info.rinfo->address))) { 266 newnode->symbol->info.rinfo->address))) {
260 SLIST_INSERT_HEAD(symlist, newnode, links); 267 SLIST_INSERT_HEAD(symlist, newnode, links);
261 return; 268 return;
262 } 269 }
@@ -271,7 +278,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
271 278
272 cursymbol = SLIST_NEXT(curnode, links)->symbol; 279 cursymbol = SLIST_NEXT(curnode, links)->symbol;
273 if ((field 280 if ((field
274 && (cursymbol->type > symbol->type 281 && (cursymbol->type > symbol->type
275 || (cursymbol->type == symbol->type 282 || (cursymbol->type == symbol->type
276 && (cursymbol->info.finfo->value > 283 && (cursymbol->info.finfo->value >
277 symbol->info.finfo->value)))) 284 symbol->info.finfo->value))))
@@ -351,7 +358,7 @@ aic_print_reg_dump_types(FILE *ofile)
351{ 358{
352 if (ofile == NULL) 359 if (ofile == NULL)
353 return; 360 return;
354 361
355 fprintf(ofile, 362 fprintf(ofile,
356"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n" 363"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
357"typedef struct %sreg_parse_entry {\n" 364"typedef struct %sreg_parse_entry {\n"
@@ -370,7 +377,7 @@ aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
370 return; 377 return;
371 378
372 fprintf(dfile, 379 fprintf(dfile,
373"static %sreg_parse_entry_t %s_parse_table[] = {\n", 380"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
374 prefix, 381 prefix,
375 regnode->symbol->name); 382 regnode->symbol->name);
376} 383}
@@ -385,7 +392,7 @@ aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
385 lower_name = strdup(regnode->symbol->name); 392 lower_name = strdup(regnode->symbol->name);
386 if (lower_name == NULL) 393 if (lower_name == NULL)
387 stop("Unable to strdup symbol name", EX_SOFTWARE); 394 stop("Unable to strdup symbol name", EX_SOFTWARE);
388 395
389 for (letter = lower_name; *letter != '\0'; letter++) 396 for (letter = lower_name; *letter != '\0'; letter++)
390 *letter = tolower(*letter); 397 *letter = tolower(*letter);
391 398
@@ -472,6 +479,7 @@ symtable_dump(FILE *ofile, FILE *dfile)
472 DBT key; 479 DBT key;
473 DBT data; 480 DBT data;
474 int flag; 481 int flag;
482 int reg_count = 0, reg_used = 0;
475 u_int i; 483 u_int i;
476 484
477 if (symtable == NULL) 485 if (symtable == NULL)
@@ -541,6 +549,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
541 int num_entries; 549 int num_entries;
542 550
543 num_entries = 0; 551 num_entries = 0;
552 reg_count++;
553 if (curnode->symbol->count == 1)
554 break;
544 fields = &curnode->symbol->info.rinfo->fields; 555 fields = &curnode->symbol->info.rinfo->fields;
545 SLIST_FOREACH(fieldnode, fields, links) { 556 SLIST_FOREACH(fieldnode, fields, links) {
546 if (num_entries == 0) 557 if (num_entries == 0)
@@ -553,11 +564,14 @@ symtable_dump(FILE *ofile, FILE *dfile)
553 } 564 }
554 aic_print_reg_dump_end(ofile, dfile, 565 aic_print_reg_dump_end(ofile, dfile,
555 curnode, num_entries); 566 curnode, num_entries);
567 reg_used++;
556 } 568 }
557 default: 569 default:
558 break; 570 break;
559 } 571 }
560 } 572 }
573 fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
574 reg_used, reg_count);
561 575
562 /* Fold in the masks and bits */ 576 /* Fold in the masks and bits */
563 while (SLIST_FIRST(&masks) != NULL) { 577 while (SLIST_FIRST(&masks) != NULL) {
@@ -646,7 +660,6 @@ symtable_dump(FILE *ofile, FILE *dfile)
646 free(curnode); 660 free(curnode);
647 } 661 }
648 662
649
650 fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n"); 663 fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
651 664
652 for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) { 665 for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index afc22e8b4903..05190c1a2fb7 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -128,6 +128,7 @@ typedef struct expression_info {
128typedef struct symbol { 128typedef struct symbol {
129 char *name; 129 char *name;
130 symtype type; 130 symtype type;
131 int count;
131 union { 132 union {
132 struct reg_info *rinfo; 133 struct reg_info *rinfo;
133 struct field_info *finfo; 134 struct field_info *finfo;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 8be3d76656fa..a73a6bbb1b2b 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -2286,17 +2286,14 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
2286 } 2286 }
2287} 2287}
2288 2288
2289static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost) 2289static irqreturn_t ihdlr(struct Scsi_Host *shost)
2290{ 2290{
2291 struct scsi_cmnd *SCpnt; 2291 struct scsi_cmnd *SCpnt;
2292 unsigned int i, k, c, status, tstatus, reg; 2292 unsigned int i, k, c, status, tstatus, reg;
2293 struct mssp *spp; 2293 struct mssp *spp;
2294 struct mscp *cpp; 2294 struct mscp *cpp;
2295 struct hostdata *ha = (struct hostdata *)shost->hostdata; 2295 struct hostdata *ha = (struct hostdata *)shost->hostdata;
2296 2296 int irq = shost->irq;
2297 if (shost->irq != irq)
2298 panic("%s: ihdlr, irq %d, shost->irq %d.\n", ha->board_name, irq,
2299 shost->irq);
2300 2297
2301 /* Check if this board need to be serviced */ 2298 /* Check if this board need to be serviced */
2302 if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)) 2299 if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
@@ -2535,7 +2532,7 @@ static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
2535 return IRQ_NONE; 2532 return IRQ_NONE;
2536} 2533}
2537 2534
2538static irqreturn_t do_interrupt_handler(int irq, void *shap) 2535static irqreturn_t do_interrupt_handler(int dummy, void *shap)
2539{ 2536{
2540 struct Scsi_Host *shost; 2537 struct Scsi_Host *shost;
2541 unsigned int j; 2538 unsigned int j;
@@ -2548,7 +2545,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap)
2548 shost = sh[j]; 2545 shost = sh[j];
2549 2546
2550 spin_lock_irqsave(shost->host_lock, spin_flags); 2547 spin_lock_irqsave(shost->host_lock, spin_flags);
2551 ret = ihdlr(irq, shost); 2548 ret = ihdlr(shost);
2552 spin_unlock_irqrestore(shost->host_lock, spin_flags); 2549 spin_unlock_irqrestore(shost->host_lock, spin_flags);
2553 return ret; 2550 return ret;
2554} 2551}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bfdee5968892..a0b6d414953d 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -978,7 +978,7 @@ static int esp_check_spur_intr(struct esp *esp)
978 */ 978 */
979 if (!esp->ops->dma_error(esp)) { 979 if (!esp->ops->dma_error(esp)) {
980 printk(KERN_ERR PFX "esp%d: Spurious irq, " 980 printk(KERN_ERR PFX "esp%d: Spurious irq, "
981 "sreg=%x.\n", 981 "sreg=%02x.\n",
982 esp->host->unique_id, esp->sreg); 982 esp->host->unique_id, esp->sreg);
983 return -1; 983 return -1;
984 } 984 }
@@ -1447,6 +1447,9 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1447 if (offset > 15) 1447 if (offset > 15)
1448 goto do_reject; 1448 goto do_reject;
1449 1449
1450 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
1451 offset = 0;
1452
1450 if (offset) { 1453 if (offset) {
1451 int rounded_up, one_clock; 1454 int rounded_up, one_clock;
1452 1455
@@ -1697,7 +1700,12 @@ again:
1697 else 1700 else
1698 ent->flags &= ~ESP_CMD_FLAG_WRITE; 1701 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699 1702
1700 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); 1703 if (esp->ops->dma_length_limit)
1704 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1705 dma_len);
1706 else
1707 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1708
1701 esp->data_dma_len = dma_len; 1709 esp->data_dma_len = dma_len;
1702 1710
1703 if (!dma_len) { 1711 if (!dma_len) {
@@ -1761,7 +1769,6 @@ again:
1761 esp_advance_dma(esp, ent, cmd, bytes_sent); 1769 esp_advance_dma(esp, ent, cmd, bytes_sent);
1762 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1770 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1763 goto again; 1771 goto again;
1764 break;
1765 } 1772 }
1766 1773
1767 case ESP_EVENT_STATUS: { 1774 case ESP_EVENT_STATUS: {
@@ -2235,7 +2242,7 @@ static void esp_bootup_reset(struct esp *esp)
2235 2242
2236static void esp_set_clock_params(struct esp *esp) 2243static void esp_set_clock_params(struct esp *esp)
2237{ 2244{
2238 int fmhz; 2245 int fhz;
2239 u8 ccf; 2246 u8 ccf;
2240 2247
2241 /* This is getting messy but it has to be done correctly or else 2248 /* This is getting messy but it has to be done correctly or else
@@ -2270,9 +2277,9 @@ static void esp_set_clock_params(struct esp *esp)
2270 * This entails the smallest and largest sync period we could ever 2277 * This entails the smallest and largest sync period we could ever
2271 * handle on this ESP. 2278 * handle on this ESP.
2272 */ 2279 */
2273 fmhz = esp->cfreq; 2280 fhz = esp->cfreq;
2274 2281
2275 ccf = ((fmhz / 1000000) + 4) / 5; 2282 ccf = ((fhz / 1000000) + 4) / 5;
2276 if (ccf == 1) 2283 if (ccf == 1)
2277 ccf = 2; 2284 ccf = 2;
2278 2285
@@ -2281,16 +2288,16 @@ static void esp_set_clock_params(struct esp *esp)
2281 * been unable to find the clock-frequency PROM property. All 2288 * been unable to find the clock-frequency PROM property. All
2282 * other machines provide useful values it seems. 2289 * other machines provide useful values it seems.
2283 */ 2290 */
2284 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { 2291 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2285 fmhz = 20000000; 2292 fhz = 20000000;
2286 ccf = 4; 2293 ccf = 4;
2287 } 2294 }
2288 2295
2289 esp->cfact = (ccf == 8 ? 0 : ccf); 2296 esp->cfact = (ccf == 8 ? 0 : ccf);
2290 esp->cfreq = fmhz; 2297 esp->cfreq = fhz;
2291 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); 2298 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2292 esp->ctick = ESP_TICK(ccf, esp->ccycle); 2299 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2293 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); 2300 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2294 esp->sync_defp = SYNC_DEFP_SLOW; 2301 esp->sync_defp = SYNC_DEFP_SLOW;
2295} 2302}
2296 2303
@@ -2382,6 +2389,12 @@ static int esp_slave_configure(struct scsi_device *dev)
2382 struct esp_target_data *tp = &esp->target[dev->id]; 2389 struct esp_target_data *tp = &esp->target[dev->id];
2383 int goal_tags, queue_depth; 2390 int goal_tags, queue_depth;
2384 2391
2392 if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
2393 /* Bypass async domain validation */
2394 dev->ppr = 0;
2395 dev->sdtr = 0;
2396 }
2397
2385 goal_tags = 0; 2398 goal_tags = 0;
2386 2399
2387 if (dev->tagged_supported) { 2400 if (dev->tagged_supported) {
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index d5576d54ce76..bb43a1388188 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -224,7 +224,7 @@
224#define ESP_TIMEO_CONST 8192 224#define ESP_TIMEO_CONST 8192
225#define ESP_NEG_DEFP(mhz, cfact) \ 225#define ESP_NEG_DEFP(mhz, cfact) \
226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) 226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
227#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) 227#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000))
228#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) 228#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
229 229
230/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high 230/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
@@ -240,9 +240,9 @@ struct esp_cmd_priv {
240 int num_sg; 240 int num_sg;
241 } u; 241 } u;
242 242
243 unsigned int cur_residue; 243 int cur_residue;
244 struct scatterlist *cur_sg; 244 struct scatterlist *cur_sg;
245 unsigned int tot_residue; 245 int tot_residue;
246}; 246};
247#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) 247#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
248 248
@@ -368,6 +368,12 @@ struct esp_driver_ops {
368 */ 368 */
369 int (*irq_pending)(struct esp *esp); 369 int (*irq_pending)(struct esp *esp);
370 370
371 /* Return the maximum allowable size of a DMA transfer for a
372 * given buffer.
373 */
374 u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
375 u32 dma_len);
376
371 /* Reset the DMA engine entirely. On return, ESP interrupts 377 /* Reset the DMA engine entirely. On return, ESP interrupts
372 * should be enabled. Often the interrupt enabling is 378 * should be enabled. Often the interrupt enabling is
373 * controlled in the DMA engine. 379 * controlled in the DMA engine.
@@ -471,6 +477,7 @@ struct esp {
471#define ESP_FLAG_DOING_SLOWCMD 0x00000004 477#define ESP_FLAG_DOING_SLOWCMD 0x00000004
472#define ESP_FLAG_WIDE_CAPABLE 0x00000008 478#define ESP_FLAG_WIDE_CAPABLE 0x00000008
473#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 479#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
480#define ESP_FLAG_DISABLE_SYNC 0x00000020
474 481
475 u8 select_state; 482 u8 select_state;
476#define ESP_SELECT_NONE 0x00 /* Not selecting */ 483#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c264a8c5f01e..3690360d7a79 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -199,9 +199,13 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
199 if (!shost->can_queue) { 199 if (!shost->can_queue) {
200 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n", 200 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
201 sht->name); 201 sht->name);
202 goto out; 202 goto fail;
203 } 203 }
204 204
205 error = scsi_setup_command_freelist(shost);
206 if (error)
207 goto fail;
208
205 if (!shost->shost_gendev.parent) 209 if (!shost->shost_gendev.parent)
206 shost->shost_gendev.parent = dev ? dev : &platform_bus; 210 shost->shost_gendev.parent = dev ? dev : &platform_bus;
207 211
@@ -255,6 +259,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
255 out_del_gendev: 259 out_del_gendev:
256 device_del(&shost->shost_gendev); 260 device_del(&shost->shost_gendev);
257 out: 261 out:
262 scsi_destroy_command_freelist(shost);
263 fail:
258 return error; 264 return error;
259} 265}
260EXPORT_SYMBOL(scsi_add_host); 266EXPORT_SYMBOL(scsi_add_host);
@@ -284,6 +290,11 @@ static void scsi_host_dev_release(struct device *dev)
284 kfree(shost); 290 kfree(shost);
285} 291}
286 292
293struct device_type scsi_host_type = {
294 .name = "scsi_host",
295 .release = scsi_host_dev_release,
296};
297
287/** 298/**
288 * scsi_host_alloc - register a scsi host adapter instance. 299 * scsi_host_alloc - register a scsi host adapter instance.
289 * @sht: pointer to scsi host template 300 * @sht: pointer to scsi host template
@@ -376,33 +387,31 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
376 else 387 else
377 shost->dma_boundary = 0xffffffff; 388 shost->dma_boundary = 0xffffffff;
378 389
379 rval = scsi_setup_command_freelist(shost);
380 if (rval)
381 goto fail_kfree;
382
383 device_initialize(&shost->shost_gendev); 390 device_initialize(&shost->shost_gendev);
384 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d", 391 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
385 shost->host_no); 392 shost->host_no);
386 shost->shost_gendev.release = scsi_host_dev_release; 393#ifndef CONFIG_SYSFS_DEPRECATED
394 shost->shost_gendev.bus = &scsi_bus_type;
395#endif
396 shost->shost_gendev.type = &scsi_host_type;
387 397
388 device_initialize(&shost->shost_dev); 398 device_initialize(&shost->shost_dev);
389 shost->shost_dev.parent = &shost->shost_gendev; 399 shost->shost_dev.parent = &shost->shost_gendev;
390 shost->shost_dev.class = &shost_class; 400 shost->shost_dev.class = &shost_class;
391 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d", 401 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
392 shost->host_no); 402 shost->host_no);
403 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
393 404
394 shost->ehandler = kthread_run(scsi_error_handler, shost, 405 shost->ehandler = kthread_run(scsi_error_handler, shost,
395 "scsi_eh_%d", shost->host_no); 406 "scsi_eh_%d", shost->host_no);
396 if (IS_ERR(shost->ehandler)) { 407 if (IS_ERR(shost->ehandler)) {
397 rval = PTR_ERR(shost->ehandler); 408 rval = PTR_ERR(shost->ehandler);
398 goto fail_destroy_freelist; 409 goto fail_kfree;
399 } 410 }
400 411
401 scsi_proc_hostdir_add(shost->hostt); 412 scsi_proc_hostdir_add(shost->hostt);
402 return shost; 413 return shost;
403 414
404 fail_destroy_freelist:
405 scsi_destroy_command_freelist(shost);
406 fail_kfree: 415 fail_kfree:
407 kfree(shost); 416 kfree(shost);
408 return NULL; 417 return NULL;
@@ -496,7 +505,7 @@ void scsi_exit_hosts(void)
496 505
497int scsi_is_host_device(const struct device *dev) 506int scsi_is_host_device(const struct device *dev)
498{ 507{
499 return dev->release == scsi_host_dev_release; 508 return dev->type == &scsi_host_type;
500} 509}
501EXPORT_SYMBOL(scsi_is_host_device); 510EXPORT_SYMBOL(scsi_is_host_device);
502 511
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 93c3fc20aa59..32553639aded 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -258,8 +258,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
258 258
259 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 259 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
260 /* force an abort */ 260 /* force an abort */
261 hwif->OUTB(WIN_IDLEIMMEDIATE, 261 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
262 hwif->io_ports[IDE_COMMAND_OFFSET]);
263 262
264 rq->errors++; 263 rq->errors++;
265 264
@@ -393,7 +392,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
393 printk ("ide-scsi: %s: DMA complete\n", drive->name); 392 printk ("ide-scsi: %s: DMA complete\n", drive->name);
394#endif /* IDESCSI_DEBUG_LOG */ 393#endif /* IDESCSI_DEBUG_LOG */
395 pc->xferred = pc->req_xfer; 394 pc->xferred = pc->req_xfer;
396 (void) HWIF(drive)->ide_dma_end(drive); 395 (void)hwif->dma_ops->dma_end(drive);
397 } 396 }
398 397
399 /* Clear the interrupt */ 398 /* Clear the interrupt */
@@ -410,9 +409,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
410 idescsi_end_request (drive, 1, 0); 409 idescsi_end_request (drive, 1, 0);
411 return ide_stopped; 410 return ide_stopped;
412 } 411 }
413 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 412 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
414 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 413 hwif->INB(hwif->io_ports.lbam_addr);
415 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 414 ireason = hwif->INB(hwif->io_ports.nsect_addr);
416 415
417 if (ireason & CD) { 416 if (ireason & CD) {
418 printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n"); 417 printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
@@ -485,7 +484,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
485 "initiated yet DRQ isn't asserted\n"); 484 "initiated yet DRQ isn't asserted\n");
486 return startstop; 485 return startstop;
487 } 486 }
488 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 487 ireason = hwif->INB(hwif->io_ports.nsect_addr);
489 if ((ireason & CD) == 0 || (ireason & IO)) { 488 if ((ireason & CD) == 0 || (ireason & IO)) {
490 printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while " 489 printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
491 "issuing a packet command\n"); 490 "issuing a packet command\n");
@@ -498,7 +497,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
498 drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12); 497 drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
499 if (pc->flags & PC_FLAG_DMA_OK) { 498 if (pc->flags & PC_FLAG_DMA_OK) {
500 pc->flags |= PC_FLAG_DMA_IN_PROGRESS; 499 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
501 hwif->dma_start(drive); 500 hwif->dma_ops->dma_start(drive);
502 } 501 }
503 return ide_started; 502 return ide_started;
504} 503}
@@ -560,7 +559,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
560 559
561 if (drive->using_dma && !idescsi_map_sg(drive, pc)) { 560 if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
562 hwif->sg_mapped = 1; 561 hwif->sg_mapped = 1;
563 dma = !hwif->dma_setup(drive); 562 dma = !hwif->dma_ops->dma_setup(drive);
564 hwif->sg_mapped = 0; 563 hwif->sg_mapped = 0;
565 } 564 }
566 565
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
575 return ide_started; 574 return ide_started;
576 } else { 575 } else {
577 /* Issue the packet command */ 576 /* Issue the packet command */
578 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 577 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
579 return idescsi_transfer_pc(drive); 578 return idescsi_transfer_pc(drive);
580 } 579 }
581} 580}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 5d231015bb20..b2d481dd3750 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -217,11 +217,15 @@ static int __devexit esp_jazz_remove(struct platform_device *dev)
217 return 0; 217 return 0;
218} 218}
219 219
220/* work with hotplug and coldplug */
221MODULE_ALIAS("platform:jazz_esp");
222
220static struct platform_driver esp_jazz_driver = { 223static struct platform_driver esp_jazz_driver = {
221 .probe = esp_jazz_probe, 224 .probe = esp_jazz_probe,
222 .remove = __devexit_p(esp_jazz_remove), 225 .remove = __devexit_p(esp_jazz_remove),
223 .driver = { 226 .driver = {
224 .name = "jazz_esp", 227 .name = "jazz_esp",
228 .owner = THIS_MODULE,
225 }, 229 },
226}; 230};
227 231
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a9fbb3f88659..960baaf11fb1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -182,8 +182,8 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
182 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); 182 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
183} 183}
184static ssize_t 184static ssize_t
185lpfc_state_show(struct device *dev, struct device_attribute *attr, 185lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
186 char *buf) 186 char *buf)
187{ 187{
188 struct Scsi_Host *shost = class_to_shost(dev); 188 struct Scsi_Host *shost = class_to_shost(dev);
189 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 189 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -936,7 +936,7 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
936static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 936static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
937static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 937static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
938static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 938static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
939static DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL); 939static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
940static DEVICE_ATTR(option_rom_version, S_IRUGO, 940static DEVICE_ATTR(option_rom_version, S_IRUGO,
941 lpfc_option_rom_version_show, NULL); 941 lpfc_option_rom_version_show, NULL);
942static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 942static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1666,7 +1666,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
1666 &dev_attr_fwrev, 1666 &dev_attr_fwrev,
1667 &dev_attr_hdw, 1667 &dev_attr_hdw,
1668 &dev_attr_option_rom_version, 1668 &dev_attr_option_rom_version,
1669 &dev_attr_state, 1669 &dev_attr_link_state,
1670 &dev_attr_num_discovered_ports, 1670 &dev_attr_num_discovered_ports,
1671 &dev_attr_lpfc_drvr_version, 1671 &dev_attr_lpfc_drvr_version,
1672 &dev_attr_lpfc_temp_sensor, 1672 &dev_attr_lpfc_temp_sensor,
@@ -1714,7 +1714,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
1714 1714
1715struct device_attribute *lpfc_vport_attrs[] = { 1715struct device_attribute *lpfc_vport_attrs[] = {
1716 &dev_attr_info, 1716 &dev_attr_info,
1717 &dev_attr_state, 1717 &dev_attr_link_state,
1718 &dev_attr_num_discovered_ports, 1718 &dev_attr_num_discovered_ports,
1719 &dev_attr_lpfc_drvr_version, 1719 &dev_attr_lpfc_drvr_version,
1720 &dev_attr_lpfc_log_verbose, 1720 &dev_attr_lpfc_log_verbose,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644
index 000000000000..cd37bd69a115
--- /dev/null
+++ b/drivers/scsi/mac_esp.c
@@ -0,0 +1,657 @@
1/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
2 *
3 * Adapted from jazz_esp.c and the old mac_esp.c.
4 *
5 * The pseudo DMA algorithm is based on the one used in NetBSD.
6 * See sys/arch/mac68k/obio/esp.c for some background information.
7 *
8 * Copyright (C) 2007-2008 Finn Thain
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h>
18#include <linux/scatterlist.h>
19#include <linux/delay.h>
20#include <linux/io.h>
21#include <linux/nubus.h>
22
23#include <asm/irq.h>
24#include <asm/dma.h>
25
26#include <asm/macints.h>
27#include <asm/macintosh.h>
28
29#include <scsi/scsi_host.h>
30
31#include "esp_scsi.h"
32
33#define DRV_MODULE_NAME "mac_esp"
34#define PFX DRV_MODULE_NAME ": "
35#define DRV_VERSION "1.000"
36#define DRV_MODULE_RELDATE "Sept 15, 2007"
37
38#define MAC_ESP_IO_BASE 0x50F00000
39#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
40#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
41#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
42#define MAC_ESP_REGS_SPACING 0x402
43#define MAC_ESP_PDMA_REG 0xF9800024
44#define MAC_ESP_PDMA_REG_SPACING 0x4
45#define MAC_ESP_PDMA_IO_OFFSET 0x100
46
47#define esp_read8(REG) mac_esp_read8(esp, REG)
48#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
49
50struct mac_esp_priv {
51 struct esp *esp;
52 void __iomem *pdma_regs;
53 void __iomem *pdma_io;
54 int error;
55};
56static struct platform_device *internal_esp, *external_esp;
57
58#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
59 platform_get_drvdata((struct platform_device *) \
60 (esp->dev)))
61
62static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
63{
64 nubus_writeb(val, esp->regs + reg * 16);
65}
66
67static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
68{
69 return nubus_readb(esp->regs + reg * 16);
70}
71
72/* For pseudo DMA and PIO we need the virtual address
73 * so this address mapping is the identity mapping.
74 */
75
76static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
77 size_t sz, int dir)
78{
79 return (dma_addr_t)buf;
80}
81
82static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
83 int num_sg, int dir)
84{
85 int i;
86
87 for (i = 0; i < num_sg; i++)
88 sg[i].dma_address = (u32)sg_virt(&sg[i]);
89 return num_sg;
90}
91
92static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
93 size_t sz, int dir)
94{
95 /* Nothing to do. */
96}
97
98static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
99 int num_sg, int dir)
100{
101 /* Nothing to do. */
102}
103
104static void mac_esp_reset_dma(struct esp *esp)
105{
106 /* Nothing to do. */
107}
108
109static void mac_esp_dma_drain(struct esp *esp)
110{
111 /* Nothing to do. */
112}
113
114static void mac_esp_dma_invalidate(struct esp *esp)
115{
116 /* Nothing to do. */
117}
118
119static int mac_esp_dma_error(struct esp *esp)
120{
121 return MAC_ESP_GET_PRIV(esp)->error;
122}
123
124static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
125{
126 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
127 int i = 500000;
128
129 do {
130 if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
131 return 0;
132
133 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
134 return 1;
135
136 udelay(2);
137 } while (--i);
138
139 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
140 esp_read8(ESP_STATUS));
141 mep->error = 1;
142 return 1;
143}
144
145static inline int mac_esp_wait_for_dreq(struct esp *esp)
146{
147 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
148 int i = 500000;
149
150 do {
151 if (mep->pdma_regs == NULL) {
152 if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
153 return 0;
154 } else {
155 if (nubus_readl(mep->pdma_regs) & 0x200)
156 return 0;
157 }
158
159 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
160 return 1;
161
162 udelay(2);
163 } while (--i);
164
165 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
166 esp_read8(ESP_STATUS));
167 mep->error = 1;
168 return 1;
169}
170
171#define MAC_ESP_PDMA_LOOP(operands) \
172 asm volatile ( \
173 " tstw %2 \n" \
174 " jbeq 20f \n" \
175 "1: movew " operands " \n" \
176 "2: movew " operands " \n" \
177 "3: movew " operands " \n" \
178 "4: movew " operands " \n" \
179 "5: movew " operands " \n" \
180 "6: movew " operands " \n" \
181 "7: movew " operands " \n" \
182 "8: movew " operands " \n" \
183 "9: movew " operands " \n" \
184 "10: movew " operands " \n" \
185 "11: movew " operands " \n" \
186 "12: movew " operands " \n" \
187 "13: movew " operands " \n" \
188 "14: movew " operands " \n" \
189 "15: movew " operands " \n" \
190 "16: movew " operands " \n" \
191 " subqw #1,%2 \n" \
192 " jbne 1b \n" \
193 "20: tstw %3 \n" \
194 " jbeq 30f \n" \
195 "21: movew " operands " \n" \
196 " subqw #1,%3 \n" \
197 " jbne 21b \n" \
198 "30: tstw %4 \n" \
199 " jbeq 40f \n" \
200 "31: moveb " operands " \n" \
201 "32: nop \n" \
202 "40: \n" \
203 " \n" \
204 " .section __ex_table,\"a\" \n" \
205 " .align 4 \n" \
206 " .long 1b,40b \n" \
207 " .long 2b,40b \n" \
208 " .long 3b,40b \n" \
209 " .long 4b,40b \n" \
210 " .long 5b,40b \n" \
211 " .long 6b,40b \n" \
212 " .long 7b,40b \n" \
213 " .long 8b,40b \n" \
214 " .long 9b,40b \n" \
215 " .long 10b,40b \n" \
216 " .long 11b,40b \n" \
217 " .long 12b,40b \n" \
218 " .long 13b,40b \n" \
219 " .long 14b,40b \n" \
220 " .long 15b,40b \n" \
221 " .long 16b,40b \n" \
222 " .long 21b,40b \n" \
223 " .long 31b,40b \n" \
224 " .long 32b,40b \n" \
225 " .previous \n" \
226 : "+a" (addr) \
227 : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
228
229static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd)
231{
232 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
233 unsigned long flags;
234
235 local_irq_save(flags);
236
237 mep->error = 0;
238
239 if (!write)
240 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
241
242 esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
243 esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
244
245 scsi_esp_cmd(esp, cmd);
246
247 do {
248 unsigned int count32 = esp_count >> 5;
249 unsigned int count2 = (esp_count & 0x1F) >> 1;
250 unsigned int start_addr = addr;
251
252 if (mac_esp_wait_for_dreq(esp))
253 break;
254
255 if (write) {
256 MAC_ESP_PDMA_LOOP("%1@,%0@+");
257
258 esp_count -= addr - start_addr;
259 } else {
260 unsigned int n;
261
262 MAC_ESP_PDMA_LOOP("%0@+,%1@");
263
264 if (mac_esp_wait_for_empty_fifo(esp))
265 break;
266
267 n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
268 addr = start_addr + esp_count - n;
269 esp_count = n;
270 }
271 } while (esp_count);
272
273 local_irq_restore(flags);
274}
275
276/*
277 * Programmed IO routines follow.
278 */
279
280static inline int mac_esp_wait_for_fifo(struct esp *esp)
281{
282 int i = 500000;
283
284 do {
285 if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
286 return 0;
287
288 udelay(2);
289 } while (--i);
290
291 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
292 esp_read8(ESP_STATUS));
293 return 1;
294}
295
296static inline int mac_esp_wait_for_intr(struct esp *esp)
297{
298 int i = 500000;
299
300 do {
301 esp->sreg = esp_read8(ESP_STATUS);
302 if (esp->sreg & ESP_STAT_INTR)
303 return 0;
304
305 udelay(2);
306 } while (--i);
307
308 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
309 return 1;
310}
311
312#define MAC_ESP_PIO_LOOP(operands, reg1) \
313 asm volatile ( \
314 "1: moveb " operands " \n" \
315 " subqw #1,%1 \n" \
316 " jbne 1b \n" \
317 : "+a" (addr), "+r" (reg1) \
318 : "a" (fifo))
319
320#define MAC_ESP_PIO_FILL(operands, reg1) \
321 asm volatile ( \
322 " moveb " operands " \n" \
323 " moveb " operands " \n" \
324 " moveb " operands " \n" \
325 " moveb " operands " \n" \
326 " moveb " operands " \n" \
327 " moveb " operands " \n" \
328 " moveb " operands " \n" \
329 " moveb " operands " \n" \
330 " moveb " operands " \n" \
331 " moveb " operands " \n" \
332 " moveb " operands " \n" \
333 " moveb " operands " \n" \
334 " moveb " operands " \n" \
335 " moveb " operands " \n" \
336 " moveb " operands " \n" \
337 " moveb " operands " \n" \
338 " subqw #8,%1 \n" \
339 " subqw #8,%1 \n" \
340 : "+a" (addr), "+r" (reg1) \
341 : "a" (fifo))
342
343#define MAC_ESP_FIFO_SIZE 16
344
345static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
346 u32 dma_count, int write, u8 cmd)
347{
348 unsigned long flags;
349 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
350 u8 *fifo = esp->regs + ESP_FDATA * 16;
351
352 local_irq_save(flags);
353
354 cmd &= ~ESP_CMD_DMA;
355 mep->error = 0;
356
357 if (write) {
358 scsi_esp_cmd(esp, cmd);
359
360 if (!mac_esp_wait_for_intr(esp)) {
361 if (mac_esp_wait_for_fifo(esp))
362 esp_count = 0;
363 } else {
364 esp_count = 0;
365 }
366 } else {
367 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
368
369 if (esp_count >= MAC_ESP_FIFO_SIZE)
370 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
371 else
372 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
373
374 scsi_esp_cmd(esp, cmd);
375 }
376
377 while (esp_count) {
378 unsigned int n;
379
380 if (mac_esp_wait_for_intr(esp)) {
381 mep->error = 1;
382 break;
383 }
384
385 if (esp->sreg & ESP_STAT_SPAM) {
386 printk(KERN_ERR PFX "gross error\n");
387 mep->error = 1;
388 break;
389 }
390
391 n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
392
393 if (write) {
394 if (n > esp_count)
395 n = esp_count;
396 esp_count -= n;
397
398 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
399
400 if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
401 break;
402
403 if (esp_count) {
404 esp->ireg = esp_read8(ESP_INTRPT);
405 if (esp->ireg & ESP_INTR_DC)
406 break;
407
408 scsi_esp_cmd(esp, ESP_CMD_TI);
409 }
410 } else {
411 esp->ireg = esp_read8(ESP_INTRPT);
412 if (esp->ireg & ESP_INTR_DC)
413 break;
414
415 n = MAC_ESP_FIFO_SIZE - n;
416 if (n > esp_count)
417 n = esp_count;
418
419 if (n == MAC_ESP_FIFO_SIZE) {
420 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
421 } else {
422 esp_count -= n;
423 MAC_ESP_PIO_LOOP("%0@+,%2@", n);
424 }
425
426 scsi_esp_cmd(esp, ESP_CMD_TI);
427 }
428 }
429
430 local_irq_restore(flags);
431}
432
433static int mac_esp_irq_pending(struct esp *esp)
434{
435 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
436 return 1;
437 return 0;
438}
439
440static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
441{
442 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
443}
444
445static struct esp_driver_ops mac_esp_ops = {
446 .esp_write8 = mac_esp_write8,
447 .esp_read8 = mac_esp_read8,
448 .map_single = mac_esp_map_single,
449 .map_sg = mac_esp_map_sg,
450 .unmap_single = mac_esp_unmap_single,
451 .unmap_sg = mac_esp_unmap_sg,
452 .irq_pending = mac_esp_irq_pending,
453 .dma_length_limit = mac_esp_dma_length_limit,
454 .reset_dma = mac_esp_reset_dma,
455 .dma_drain = mac_esp_dma_drain,
456 .dma_invalidate = mac_esp_dma_invalidate,
457 .send_dma_cmd = mac_esp_send_pdma_cmd,
458 .dma_error = mac_esp_dma_error,
459};
460
461static int __devinit esp_mac_probe(struct platform_device *dev)
462{
463 struct scsi_host_template *tpnt = &scsi_esp_template;
464 struct Scsi_Host *host;
465 struct esp *esp;
466 int err;
467 int chips_present;
468 struct mac_esp_priv *mep;
469
470 if (!MACH_IS_MAC)
471 return -ENODEV;
472
473 switch (macintosh_config->scsi_type) {
474 case MAC_SCSI_QUADRA:
475 case MAC_SCSI_QUADRA3:
476 chips_present = 1;
477 break;
478 case MAC_SCSI_QUADRA2:
479 if ((macintosh_config->ident == MAC_MODEL_Q900) ||
480 (macintosh_config->ident == MAC_MODEL_Q950))
481 chips_present = 2;
482 else
483 chips_present = 1;
484 break;
485 default:
486 chips_present = 0;
487 }
488
489 if (dev->id + 1 > chips_present)
490 return -ENODEV;
491
492 host = scsi_host_alloc(tpnt, sizeof(struct esp));
493
494 err = -ENOMEM;
495 if (!host)
496 goto fail;
497
498 host->max_id = 8;
499 host->use_clustering = DISABLE_CLUSTERING;
500 esp = shost_priv(host);
501
502 esp->host = host;
503 esp->dev = dev;
504
505 esp->command_block = kzalloc(16, GFP_KERNEL);
506 if (!esp->command_block)
507 goto fail_unlink;
508 esp->command_block_dma = (dma_addr_t)esp->command_block;
509
510 esp->scsi_id = 7;
511 host->this_id = esp->scsi_id;
512 esp->scsi_id_mask = 1 << esp->scsi_id;
513
514 mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
515 if (!mep)
516 goto fail_free_command_block;
517 mep->esp = esp;
518 platform_set_drvdata(dev, mep);
519
520 switch (macintosh_config->scsi_type) {
521 case MAC_SCSI_QUADRA:
522 esp->cfreq = 16500000;
523 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
524 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
525 mep->pdma_regs = NULL;
526 break;
527 case MAC_SCSI_QUADRA2:
528 esp->cfreq = 25000000;
529 esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
530 dev->id * MAC_ESP_REGS_SPACING);
531 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
532 mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
533 dev->id * MAC_ESP_PDMA_REG_SPACING);
534 nubus_writel(0x1d1, mep->pdma_regs);
535 break;
536 case MAC_SCSI_QUADRA3:
537 /* These quadras have a real DMA controller (the PSC) but we
538 * don't know how to drive it so we must use PIO instead.
539 */
540 esp->cfreq = 25000000;
541 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
542 mep->pdma_io = NULL;
543 mep->pdma_regs = NULL;
544 break;
545 }
546
547 esp->ops = &mac_esp_ops;
548 if (mep->pdma_io == NULL) {
549 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
550 esp_write8(0, ESP_TCLOW);
551 esp_write8(0, ESP_TCMED);
552 esp->flags = ESP_FLAG_DISABLE_SYNC;
553 mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
554 } else {
555 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
556 }
557
558 host->irq = IRQ_MAC_SCSI;
559 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
560 esp);
561 if (err < 0)
562 goto fail_free_priv;
563
564 err = scsi_esp_register(esp, &dev->dev);
565 if (err)
566 goto fail_free_irq;
567
568 return 0;
569
570fail_free_irq:
571 free_irq(host->irq, esp);
572fail_free_priv:
573 kfree(mep);
574fail_free_command_block:
575 kfree(esp->command_block);
576fail_unlink:
577 scsi_host_put(host);
578fail:
579 return err;
580}
581
582static int __devexit esp_mac_remove(struct platform_device *dev)
583{
584 struct mac_esp_priv *mep = platform_get_drvdata(dev);
585 struct esp *esp = mep->esp;
586 unsigned int irq = esp->host->irq;
587
588 scsi_esp_unregister(esp);
589
590 free_irq(irq, esp);
591
592 kfree(mep);
593
594 kfree(esp->command_block);
595
596 scsi_host_put(esp->host);
597
598 return 0;
599}
600
601static struct platform_driver esp_mac_driver = {
602 .probe = esp_mac_probe,
603 .remove = __devexit_p(esp_mac_remove),
604 .driver = {
605 .name = DRV_MODULE_NAME,
606 },
607};
608
609static int __init mac_esp_init(void)
610{
611 int err;
612
613 err = platform_driver_register(&esp_mac_driver);
614 if (err)
615 return err;
616
617 internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
618 if (internal_esp && platform_device_add(internal_esp)) {
619 platform_device_put(internal_esp);
620 internal_esp = NULL;
621 }
622
623 external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
624 if (external_esp && platform_device_add(external_esp)) {
625 platform_device_put(external_esp);
626 external_esp = NULL;
627 }
628
629 if (internal_esp || external_esp) {
630 return 0;
631 } else {
632 platform_driver_unregister(&esp_mac_driver);
633 return -ENOMEM;
634 }
635}
636
637static void __exit mac_esp_exit(void)
638{
639 platform_driver_unregister(&esp_mac_driver);
640
641 if (internal_esp) {
642 platform_device_unregister(internal_esp);
643 internal_esp = NULL;
644 }
645 if (external_esp) {
646 platform_device_unregister(external_esp);
647 external_esp = NULL;
648 }
649}
650
651MODULE_DESCRIPTION("Mac ESP SCSI driver");
652MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
653MODULE_LICENSE("GPLv2");
654MODULE_VERSION(DRV_VERSION);
655
656module_init(mac_esp_init);
657module_exit(mac_esp_exit);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d61df036910c..287690853caf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -609,8 +609,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
609} 609}
610 610
611static ssize_t 611static ssize_t
612qla2x00_state_show(struct device *dev, struct device_attribute *attr, 612qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
613 char *buf) 613 char *buf)
614{ 614{
615 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 615 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
616 int len = 0; 616 int len = 0;
@@ -814,7 +814,7 @@ static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
814static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 814static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
815static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 815static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
816static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 816static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
817static DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL); 817static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
818static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 818static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
819static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 819static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
820 qla2x00_zio_timer_store); 820 qla2x00_zio_timer_store);
@@ -838,7 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
838 &dev_attr_model_name, 838 &dev_attr_model_name,
839 &dev_attr_model_desc, 839 &dev_attr_model_desc,
840 &dev_attr_pci_info, 840 &dev_attr_pci_info,
841 &dev_attr_state, 841 &dev_attr_link_state,
842 &dev_attr_zio, 842 &dev_attr_zio,
843 &dev_attr_zio_timer, 843 &dev_attr_zio_timer,
844 &dev_attr_beacon, 844 &dev_attr_beacon,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9d12d9f26209..cbef785765cf 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -38,78 +38,38 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
38} 38}
39 39
40static int 40static int
41qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram, 41qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
42 uint32_t cram_size, uint32_t *ext_mem, void **nxt) 42 uint32_t ram_dwords, void **nxt)
43{ 43{
44 int rval; 44 int rval;
45 uint32_t cnt, stat, timer, risc_address, ext_mem_cnt; 45 uint32_t cnt, stat, timer, dwords, idx;
46 uint16_t mb[4]; 46 uint16_t mb0;
47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
48 dma_addr_t dump_dma = ha->gid_list_dma;
49 uint32_t *dump = (uint32_t *)ha->gid_list;
48 50
49 rval = QLA_SUCCESS; 51 rval = QLA_SUCCESS;
50 risc_address = ext_mem_cnt = 0; 52 mb0 = 0;
51 memset(mb, 0, sizeof(mb));
52 53
53 /* Code RAM. */ 54 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
54 risc_address = 0x20000;
55 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
56 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 55 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
57 56
58 for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS; 57 dwords = GID_LIST_SIZE / 4;
59 cnt++, risc_address++) { 58 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
60 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address)); 59 cnt += dwords, addr += dwords) {
61 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address)); 60 if (cnt + dwords > ram_dwords)
62 RD_REG_WORD(&reg->mailbox8); 61 dwords = ram_dwords - cnt;
63 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
64
65 for (timer = 6000000; timer; timer--) {
66 /* Check for pending interrupts. */
67 stat = RD_REG_DWORD(&reg->host_status);
68 if (stat & HSRX_RISC_INT) {
69 stat &= 0xff;
70 62
71 if (stat == 0x1 || stat == 0x2 || 63 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
72 stat == 0x10 || stat == 0x11) { 64 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
73 set_bit(MBX_INTERRUPT,
74 &ha->mbx_cmd_flags);
75 65
76 mb[0] = RD_REG_WORD(&reg->mailbox0); 66 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
77 mb[2] = RD_REG_WORD(&reg->mailbox2); 67 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
78 mb[3] = RD_REG_WORD(&reg->mailbox3); 68 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
69 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
79 70
80 WRT_REG_DWORD(&reg->hccr, 71 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
81 HCCRX_CLR_RISC_INT); 72 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
82 RD_REG_DWORD(&reg->hccr);
83 break;
84 }
85
86 /* Clear this intr; it wasn't a mailbox intr */
87 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
88 RD_REG_DWORD(&reg->hccr);
89 }
90 udelay(5);
91 }
92
93 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
94 rval = mb[0] & MBS_MASK;
95 code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
96 } else {
97 rval = QLA_FUNCTION_FAILED;
98 }
99 }
100
101 if (rval == QLA_SUCCESS) {
102 /* External Memory. */
103 risc_address = 0x100000;
104 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
105 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
106 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 }
108 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
109 cnt++, risc_address++) {
110 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
111 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
112 RD_REG_WORD(&reg->mailbox8);
113 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); 73 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
114 74
115 for (timer = 6000000; timer; timer--) { 75 for (timer = 6000000; timer; timer--) {
@@ -123,9 +83,7 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
123 set_bit(MBX_INTERRUPT, 83 set_bit(MBX_INTERRUPT,
124 &ha->mbx_cmd_flags); 84 &ha->mbx_cmd_flags);
125 85
126 mb[0] = RD_REG_WORD(&reg->mailbox0); 86 mb0 = RD_REG_WORD(&reg->mailbox0);
127 mb[2] = RD_REG_WORD(&reg->mailbox2);
128 mb[3] = RD_REG_WORD(&reg->mailbox3);
129 87
130 WRT_REG_DWORD(&reg->hccr, 88 WRT_REG_DWORD(&reg->hccr,
131 HCCRX_CLR_RISC_INT); 89 HCCRX_CLR_RISC_INT);
@@ -141,17 +99,34 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
141 } 99 }
142 100
143 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 101 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
144 rval = mb[0] & MBS_MASK; 102 rval = mb0 & MBS_MASK;
145 ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]); 103 for (idx = 0; idx < dwords; idx++)
104 ram[cnt + idx] = swab32(dump[idx]);
146 } else { 105 } else {
147 rval = QLA_FUNCTION_FAILED; 106 rval = QLA_FUNCTION_FAILED;
148 } 107 }
149 } 108 }
150 109
151 *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL; 110 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
152 return rval; 111 return rval;
153} 112}
154 113
114static int
115qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
116 uint32_t cram_size, void **nxt)
117{
118 int rval;
119
120 /* Code RAM. */
121 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
122 if (rval != QLA_SUCCESS)
123 return rval;
124
125 /* External Memory. */
126 return qla24xx_dump_ram(ha, 0x100000, *nxt,
127 ha->fw_memory_size - 0x100000 + 1, nxt);
128}
129
155static uint32_t * 130static uint32_t *
156qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 131qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
157 uint32_t count, uint32_t *buf) 132 uint32_t count, uint32_t *buf)
@@ -239,6 +214,90 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
239 return rval; 214 return rval;
240} 215}
241 216
217static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
219 uint16_t ram_words, void **nxt)
220{
221 int rval;
222 uint32_t cnt, stat, timer, words, idx;
223 uint16_t mb0;
224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
225 dma_addr_t dump_dma = ha->gid_list_dma;
226 uint16_t *dump = (uint16_t *)ha->gid_list;
227
228 rval = QLA_SUCCESS;
229 mb0 = 0;
230
231 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
232 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233
234 words = GID_LIST_SIZE / 2;
235 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
236 cnt += words, addr += words) {
237 if (cnt + words > ram_words)
238 words = ram_words - cnt;
239
240 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
241 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
242
243 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
244 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
245 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
246 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
247
248 WRT_MAILBOX_REG(ha, reg, 4, words);
249 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
250
251 for (timer = 6000000; timer; timer--) {
252 /* Check for pending interrupts. */
253 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
254 if (stat & HSR_RISC_INT) {
255 stat &= 0xff;
256
257 if (stat == 0x1 || stat == 0x2) {
258 set_bit(MBX_INTERRUPT,
259 &ha->mbx_cmd_flags);
260
261 mb0 = RD_MAILBOX_REG(ha, reg, 0);
262
263 /* Release mailbox registers. */
264 WRT_REG_WORD(&reg->semaphore, 0);
265 WRT_REG_WORD(&reg->hccr,
266 HCCR_CLR_RISC_INT);
267 RD_REG_WORD(&reg->hccr);
268 break;
269 } else if (stat == 0x10 || stat == 0x11) {
270 set_bit(MBX_INTERRUPT,
271 &ha->mbx_cmd_flags);
272
273 mb0 = RD_MAILBOX_REG(ha, reg, 0);
274
275 WRT_REG_WORD(&reg->hccr,
276 HCCR_CLR_RISC_INT);
277 RD_REG_WORD(&reg->hccr);
278 break;
279 }
280
281 /* clear this intr; it wasn't a mailbox intr */
282 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
283 RD_REG_WORD(&reg->hccr);
284 }
285 udelay(5);
286 }
287
288 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
289 rval = mb0 & MBS_MASK;
290 for (idx = 0; idx < words; idx++)
291 ram[cnt + idx] = swab16(dump[idx]);
292 } else {
293 rval = QLA_FUNCTION_FAILED;
294 }
295 }
296
297 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
298 return rval;
299}
300
242static inline void 301static inline void
243qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 302qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
244 uint16_t *buf) 303 uint16_t *buf)
@@ -258,19 +317,14 @@ void
258qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 317qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
259{ 318{
260 int rval; 319 int rval;
261 uint32_t cnt, timer; 320 uint32_t cnt;
262 uint32_t risc_address;
263 uint16_t mb0, mb2;
264 321
265 uint32_t stat;
266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
267 uint16_t __iomem *dmp_reg; 323 uint16_t __iomem *dmp_reg;
268 unsigned long flags; 324 unsigned long flags;
269 struct qla2300_fw_dump *fw; 325 struct qla2300_fw_dump *fw;
270 uint32_t data_ram_cnt; 326 void *nxt;
271 327
272 risc_address = data_ram_cnt = 0;
273 mb0 = mb2 = 0;
274 flags = 0; 328 flags = 0;
275 329
276 if (!hardware_locked) 330 if (!hardware_locked)
@@ -388,185 +442,23 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
388 } 442 }
389 } 443 }
390 444
391 if (rval == QLA_SUCCESS) { 445 /* Get RISC SRAM. */
392 /* Get RISC SRAM. */ 446 if (rval == QLA_SUCCESS)
393 risc_address = 0x800; 447 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
394 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 448 sizeof(fw->risc_ram) / 2, &nxt);
395 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
396 }
397 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
398 cnt++, risc_address++) {
399 WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
400 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
401
402 for (timer = 6000000; timer; timer--) {
403 /* Check for pending interrupts. */
404 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
405 if (stat & HSR_RISC_INT) {
406 stat &= 0xff;
407
408 if (stat == 0x1 || stat == 0x2) {
409 set_bit(MBX_INTERRUPT,
410 &ha->mbx_cmd_flags);
411
412 mb0 = RD_MAILBOX_REG(ha, reg, 0);
413 mb2 = RD_MAILBOX_REG(ha, reg, 2);
414
415 /* Release mailbox registers. */
416 WRT_REG_WORD(&reg->semaphore, 0);
417 WRT_REG_WORD(&reg->hccr,
418 HCCR_CLR_RISC_INT);
419 RD_REG_WORD(&reg->hccr);
420 break;
421 } else if (stat == 0x10 || stat == 0x11) {
422 set_bit(MBX_INTERRUPT,
423 &ha->mbx_cmd_flags);
424
425 mb0 = RD_MAILBOX_REG(ha, reg, 0);
426 mb2 = RD_MAILBOX_REG(ha, reg, 2);
427
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
432 }
433
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
437 }
438 udelay(5);
439 }
440
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 fw->risc_ram[cnt] = htons(mb2);
444 } else {
445 rval = QLA_FUNCTION_FAILED;
446 }
447 }
448
449 if (rval == QLA_SUCCESS) {
450 /* Get stack SRAM. */
451 risc_address = 0x10000;
452 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
453 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
454 }
455 for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
456 cnt++, risc_address++) {
457 WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
458 WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
459 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
460
461 for (timer = 6000000; timer; timer--) {
462 /* Check for pending interrupts. */
463 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
464 if (stat & HSR_RISC_INT) {
465 stat &= 0xff;
466
467 if (stat == 0x1 || stat == 0x2) {
468 set_bit(MBX_INTERRUPT,
469 &ha->mbx_cmd_flags);
470
471 mb0 = RD_MAILBOX_REG(ha, reg, 0);
472 mb2 = RD_MAILBOX_REG(ha, reg, 2);
473
474 /* Release mailbox registers. */
475 WRT_REG_WORD(&reg->semaphore, 0);
476 WRT_REG_WORD(&reg->hccr,
477 HCCR_CLR_RISC_INT);
478 RD_REG_WORD(&reg->hccr);
479 break;
480 } else if (stat == 0x10 || stat == 0x11) {
481 set_bit(MBX_INTERRUPT,
482 &ha->mbx_cmd_flags);
483
484 mb0 = RD_MAILBOX_REG(ha, reg, 0);
485 mb2 = RD_MAILBOX_REG(ha, reg, 2);
486
487 WRT_REG_WORD(&reg->hccr,
488 HCCR_CLR_RISC_INT);
489 RD_REG_WORD(&reg->hccr);
490 break;
491 }
492
493 /* clear this intr; it wasn't a mailbox intr */
494 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
495 RD_REG_WORD(&reg->hccr);
496 }
497 udelay(5);
498 }
499
500 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
501 rval = mb0 & MBS_MASK;
502 fw->stack_ram[cnt] = htons(mb2);
503 } else {
504 rval = QLA_FUNCTION_FAILED;
505 }
506 }
507
508 if (rval == QLA_SUCCESS) {
509 /* Get data SRAM. */
510 risc_address = 0x11000;
511 data_ram_cnt = ha->fw_memory_size - risc_address + 1;
512 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
513 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
514 }
515 for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
516 cnt++, risc_address++) {
517 WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
518 WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
519 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
520
521 for (timer = 6000000; timer; timer--) {
522 /* Check for pending interrupts. */
523 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
524 if (stat & HSR_RISC_INT) {
525 stat &= 0xff;
526
527 if (stat == 0x1 || stat == 0x2) {
528 set_bit(MBX_INTERRUPT,
529 &ha->mbx_cmd_flags);
530
531 mb0 = RD_MAILBOX_REG(ha, reg, 0);
532 mb2 = RD_MAILBOX_REG(ha, reg, 2);
533
534 /* Release mailbox registers. */
535 WRT_REG_WORD(&reg->semaphore, 0);
536 WRT_REG_WORD(&reg->hccr,
537 HCCR_CLR_RISC_INT);
538 RD_REG_WORD(&reg->hccr);
539 break;
540 } else if (stat == 0x10 || stat == 0x11) {
541 set_bit(MBX_INTERRUPT,
542 &ha->mbx_cmd_flags);
543
544 mb0 = RD_MAILBOX_REG(ha, reg, 0);
545 mb2 = RD_MAILBOX_REG(ha, reg, 2);
546
547 WRT_REG_WORD(&reg->hccr,
548 HCCR_CLR_RISC_INT);
549 RD_REG_WORD(&reg->hccr);
550 break;
551 }
552 449
553 /* clear this intr; it wasn't a mailbox intr */ 450 /* Get stack SRAM. */
554 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 451 if (rval == QLA_SUCCESS)
555 RD_REG_WORD(&reg->hccr); 452 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
556 } 453 sizeof(fw->stack_ram) / 2, &nxt);
557 udelay(5);
558 }
559 454
560 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 455 /* Get data SRAM. */
561 rval = mb0 & MBS_MASK; 456 if (rval == QLA_SUCCESS)
562 fw->data_ram[cnt] = htons(mb2); 457 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
563 } else { 458 ha->fw_memory_size - 0x11000 + 1, &nxt);
564 rval = QLA_FUNCTION_FAILED;
565 }
566 }
567 459
568 if (rval == QLA_SUCCESS) 460 if (rval == QLA_SUCCESS)
569 qla2xxx_copy_queues(ha, &fw->data_ram[cnt]); 461 qla2xxx_copy_queues(ha, nxt);
570 462
571 if (rval != QLA_SUCCESS) { 463 if (rval != QLA_SUCCESS) {
572 qla_printk(KERN_WARNING, ha, 464 qla_printk(KERN_WARNING, ha,
@@ -1010,7 +902,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1010 goto qla24xx_fw_dump_failed_0; 902 goto qla24xx_fw_dump_failed_0;
1011 903
1012 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 904 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1013 fw->ext_mem, &nxt); 905 &nxt);
1014 if (rval != QLA_SUCCESS) 906 if (rval != QLA_SUCCESS)
1015 goto qla24xx_fw_dump_failed_0; 907 goto qla24xx_fw_dump_failed_0;
1016 908
@@ -1318,7 +1210,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1318 goto qla25xx_fw_dump_failed_0; 1210 goto qla25xx_fw_dump_failed_0;
1319 1211
1320 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1212 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1321 fw->ext_mem, &nxt); 1213 &nxt);
1322 if (rval != QLA_SUCCESS) 1214 if (rval != QLA_SUCCESS)
1323 goto qla25xx_fw_dump_failed_0; 1215 goto qla25xx_fw_dump_failed_0;
1324 1216
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 078f2a15f40b..cf194517400d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1036,22 +1036,6 @@ struct mid_db_entry_24xx {
1036 uint8_t reserved_1; 1036 uint8_t reserved_1;
1037}; 1037};
1038 1038
1039 /*
1040 * Virtual Fabric ID type definition.
1041 */
1042typedef struct vf_id {
1043 uint16_t id : 12;
1044 uint16_t priority : 4;
1045} vf_id_t;
1046
1047/*
1048 * Virtual Fabric HopCt type definition.
1049 */
1050typedef struct vf_hopct {
1051 uint16_t reserved : 8;
1052 uint16_t hopct : 8;
1053} vf_hopct_t;
1054
1055/* 1039/*
1056 * Virtual Port Control IOCB 1040 * Virtual Port Control IOCB
1057 */ 1041 */
@@ -1082,10 +1066,10 @@ struct vp_ctrl_entry_24xx {
1082 1066
1083 uint8_t vp_idx_map[16]; 1067 uint8_t vp_idx_map[16];
1084 uint16_t flags; 1068 uint16_t flags;
1085 struct vf_id id; 1069 uint16_t id;
1086 uint16_t reserved_4; 1070 uint16_t reserved_4;
1087 struct vf_hopct hopct; 1071 uint16_t hopct;
1088 uint8_t reserved_5[8]; 1072 uint8_t reserved_5[24];
1089}; 1073};
1090 1074
1091/* 1075/*
@@ -1132,9 +1116,9 @@ struct vp_config_entry_24xx {
1132 uint16_t reserved_vp2; 1116 uint16_t reserved_vp2;
1133 uint8_t port_name_idx2[WWN_SIZE]; 1117 uint8_t port_name_idx2[WWN_SIZE];
1134 uint8_t node_name_idx2[WWN_SIZE]; 1118 uint8_t node_name_idx2[WWN_SIZE];
1135 struct vf_id id; 1119 uint16_t id;
1136 uint16_t reserved_4; 1120 uint16_t reserved_4;
1137 struct vf_hopct hopct; 1121 uint16_t hopct;
1138 uint8_t reserved_5; 1122 uint8_t reserved_5;
1139}; 1123};
1140 1124
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 76eb4fecce65..f8827068d30f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -152,10 +152,6 @@ extern int
152qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 152qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
153 153
154extern int 154extern int
155qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
156 uint32_t);
157
158extern int
159qla2x00_abort_command(scsi_qla_host_t *, srb_t *); 155qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
160 156
161extern int 157extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 750d7ef83aae..4cb80b476c85 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1583,8 +1583,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1583 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1583 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1584 eiter->len = __constant_cpu_to_be16(4 + 4); 1584 eiter->len = __constant_cpu_to_be16(4 + 4);
1585 max_frame_size = IS_FWI2_CAPABLE(ha) ? 1585 max_frame_size = IS_FWI2_CAPABLE(ha) ?
1586 (uint32_t) icb24->frame_payload_size: 1586 le16_to_cpu(icb24->frame_payload_size):
1587 (uint32_t) ha->init_cb->frame_payload_size; 1587 le16_to_cpu(ha->init_cb->frame_payload_size);
1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1589 size += 4 + 4; 1589 size += 4 + 4;
1590 1590
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 01e26087c1dd..bbbc5a632a1d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3645,7 +3645,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3645 if (le16_to_cpu(nv->login_timeout) < 4) 3645 if (le16_to_cpu(nv->login_timeout) < 4)
3646 nv->login_timeout = __constant_cpu_to_le16(4); 3646 nv->login_timeout = __constant_cpu_to_le16(4);
3647 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3647 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3648 icb->login_timeout = cpu_to_le16(nv->login_timeout); 3648 icb->login_timeout = nv->login_timeout;
3649 3649
3650 /* Set minimum RATOV to 100 tenths of a second. */ 3650 /* Set minimum RATOV to 100 tenths of a second. */
3651 ha->r_a_tov = 100; 3651 ha->r_a_tov = 100;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 285479b62d8f..5d9a64a7879b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -409,6 +409,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
409 } 409 }
410 410
411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
412 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
412 413
413 ha->flags.management_server_logged_in = 0; 414 ha->flags.management_server_logged_in = 0;
414 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 415 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
@@ -454,8 +455,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
454 455
455 ha->flags.management_server_logged_in = 0; 456 ha->flags.management_server_logged_in = 0;
456 ha->link_data_rate = PORT_SPEED_UNKNOWN; 457 ha->link_data_rate = PORT_SPEED_UNKNOWN;
457 if (ql2xfdmienable)
458 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
459 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 458 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
460 break; 459 break;
461 460
@@ -511,6 +510,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
511 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 510 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
512 } 511 }
513 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 512 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
513 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
514 514
515 ha->flags.gpsc_supported = 1; 515 ha->flags.gpsc_supported = 1;
516 ha->flags.management_server_logged_in = 0; 516 ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7d0a8a4c7719..210060420809 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -681,7 +681,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
681 * Context: 681 * Context:
682 * Kernel context. 682 * Kernel context.
683 */ 683 */
684int 684static int
685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, 685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
686 dma_addr_t phys_addr, size_t size, uint32_t tov) 686 dma_addr_t phys_addr, size_t size, uint32_t tov)
687{ 687{
@@ -784,7 +784,6 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
785 ha->host_no, rval)); 785 ha->host_no, rval));
786 } else { 786 } else {
787 sp->flags |= SRB_ABORT_PENDING;
788 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 787 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
789 ha->host_no)); 788 ha->host_no));
790 } 789 }
@@ -1469,7 +1468,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1469 lg->port_id[0] = al_pa; 1468 lg->port_id[0] = al_pa;
1470 lg->port_id[1] = area; 1469 lg->port_id[1] = area;
1471 lg->port_id[2] = domain; 1470 lg->port_id[2] = domain;
1472 lg->vp_index = cpu_to_le16(ha->vp_idx); 1471 lg->vp_index = ha->vp_idx;
1473 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1472 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1474 if (rval != QLA_SUCCESS) { 1473 if (rval != QLA_SUCCESS) {
1475 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1474 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1724,7 +1723,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1724 lg->port_id[0] = al_pa; 1723 lg->port_id[0] = al_pa;
1725 lg->port_id[1] = area; 1724 lg->port_id[1] = area;
1726 lg->port_id[2] = domain; 1725 lg->port_id[2] = domain;
1727 lg->vp_index = cpu_to_le16(ha->vp_idx); 1726 lg->vp_index = ha->vp_idx;
1728 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1727 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1729 if (rval != QLA_SUCCESS) { 1728 if (rval != QLA_SUCCESS) {
1730 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1729 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -2210,7 +2209,6 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2210 rval = QLA_FUNCTION_FAILED; 2209 rval = QLA_FUNCTION_FAILED;
2211 } else { 2210 } else {
2212 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2211 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2213 sp->flags |= SRB_ABORT_PENDING;
2214 } 2212 }
2215 2213
2216 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2214 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2644,12 +2642,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2644 struct vp_rpt_id_entry_24xx *rptid_entry) 2642 struct vp_rpt_id_entry_24xx *rptid_entry)
2645{ 2643{
2646 uint8_t vp_idx; 2644 uint8_t vp_idx;
2645 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2647 scsi_qla_host_t *vha; 2646 scsi_qla_host_t *vha;
2648 2647
2649 if (rptid_entry->entry_status != 0) 2648 if (rptid_entry->entry_status != 0)
2650 return; 2649 return;
2651 if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
2652 return;
2653 2650
2654 if (rptid_entry->format == 0) { 2651 if (rptid_entry->format == 0) {
2655 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2652 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
@@ -2659,17 +2656,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2659 rptid_entry->port_id[2], rptid_entry->port_id[1], 2656 rptid_entry->port_id[2], rptid_entry->port_id[1],
2660 rptid_entry->port_id[0])); 2657 rptid_entry->port_id[0]));
2661 } else if (rptid_entry->format == 1) { 2658 } else if (rptid_entry->format == 1) {
2662 vp_idx = LSB(rptid_entry->vp_idx); 2659 vp_idx = LSB(stat);
2663 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2660 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2664 "- status %d - " 2661 "- status %d - "
2665 "with port id %02x%02x%02x\n",__func__,ha->host_no, 2662 "with port id %02x%02x%02x\n",__func__,ha->host_no,
2666 vp_idx, MSB(rptid_entry->vp_idx), 2663 vp_idx, MSB(stat),
2667 rptid_entry->port_id[2], rptid_entry->port_id[1], 2664 rptid_entry->port_id[2], rptid_entry->port_id[1],
2668 rptid_entry->port_id[0])); 2665 rptid_entry->port_id[0]));
2669 if (vp_idx == 0) 2666 if (vp_idx == 0)
2670 return; 2667 return;
2671 2668
2672 if (MSB(rptid_entry->vp_idx) == 1) 2669 if (MSB(stat) == 1)
2673 return; 2670 return;
2674 2671
2675 list_for_each_entry(vha, &ha->vp_list, vp_list) 2672 list_for_each_entry(vha, &ha->vp_list, vp_list)
@@ -2982,8 +2979,8 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
2982 /* We update the firmware with only one data sequence. */ 2979 /* We update the firmware with only one data sequence. */
2983 options |= VCO_END_OF_DATA; 2980 options |= VCO_END_OF_DATA;
2984 2981
2985 retry = 0;
2986 do { 2982 do {
2983 retry = 0;
2987 memset(mn, 0, sizeof(*mn)); 2984 memset(mn, 0, sizeof(*mn));
2988 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 2985 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
2989 mn->p.req.entry_count = 1; 2986 mn->p.req.entry_count = 1;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b33b163b1d4..3223fd16bcfe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
67 67
68static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 68static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
69 69
70int ql2xfdmienable; 70int ql2xfdmienable=1;
71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
72MODULE_PARM_DESC(ql2xfdmienable, 72MODULE_PARM_DESC(ql2xfdmienable,
73 "Enables FDMI registratons " 73 "Enables FDMI registratons "
@@ -2135,7 +2135,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2135 kfree(ha->nvram); 2135 kfree(ha->nvram);
2136} 2136}
2137 2137
2138struct qla_work_evt * 2138static struct qla_work_evt *
2139qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2139qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2140 int locked) 2140 int locked)
2141{ 2141{
@@ -2152,7 +2152,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2152 return e; 2152 return e;
2153} 2153}
2154 2154
2155int 2155static int
2156qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2156qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
2157{ 2157{
2158 unsigned long flags; 2158 unsigned long flags;
@@ -2373,7 +2373,7 @@ qla2x00_do_dpc(void *data)
2373 } else { 2373 } else {
2374 fcport->login_retry = 0; 2374 fcport->login_retry = 0;
2375 } 2375 }
2376 if (fcport->login_retry == 0) 2376 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2377 fcport->loop_id = FC_NO_LOOP_ID; 2377 fcport->loop_id = FC_NO_LOOP_ID;
2378 } 2378 }
2379 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2379 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
@@ -2599,6 +2599,10 @@ qla2x00_timer(scsi_qla_host_t *ha)
2599 start_dpc++; 2599 start_dpc++;
2600 } 2600 }
2601 2601
2602 /* Process any deferred work. */
2603 if (!list_empty(&ha->work_list))
2604 start_dpc++;
2605
2602 /* Schedule the DPC routine if needed */ 2606 /* Schedule the DPC routine if needed */
2603 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2607 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2604 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2608 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f42f17acf2cf..afeae2bfe7eb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k1" 10#define QLA2XXX_VERSION "8.02.01-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f34e9376b0a..b33e72516ef8 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -121,6 +121,7 @@ extern struct scsi_transport_template blank_transport_template;
121extern void __scsi_remove_device(struct scsi_device *); 121extern void __scsi_remove_device(struct scsi_device *);
122 122
123extern struct bus_type scsi_bus_type; 123extern struct bus_type scsi_bus_type;
124extern struct attribute_group *scsi_sysfs_shost_attr_groups[];
124 125
125/* scsi_netlink.c */ 126/* scsi_netlink.c */
126#ifdef CONFIG_SCSI_NETLINK 127#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index ed395154a5b1..3a1c99d5c775 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -190,10 +190,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
190 */ 190 */
191static int proc_print_scsidevice(struct device *dev, void *data) 191static int proc_print_scsidevice(struct device *dev, void *data)
192{ 192{
193 struct scsi_device *sdev = to_scsi_device(dev); 193 struct scsi_device *sdev;
194 struct seq_file *s = data; 194 struct seq_file *s = data;
195 int i; 195 int i;
196 196
197 if (!scsi_is_sdev_device(dev))
198 goto out;
199
200 sdev = to_scsi_device(dev);
197 seq_printf(s, 201 seq_printf(s,
198 "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ", 202 "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
199 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 203 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
@@ -230,6 +234,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
230 else 234 else
231 seq_printf(s, "\n"); 235 seq_printf(s, "\n");
232 236
237out:
233 return 0; 238 return 0;
234} 239}
235 240
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e67c14e31bab..fcd7455ffc39 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,21 @@ out:
322 return NULL; 322 return NULL;
323} 323}
324 324
325static void scsi_target_destroy(struct scsi_target *starget)
326{
327 struct device *dev = &starget->dev;
328 struct Scsi_Host *shost = dev_to_shost(dev->parent);
329 unsigned long flags;
330
331 transport_destroy_device(dev);
332 spin_lock_irqsave(shost->host_lock, flags);
333 if (shost->hostt->target_destroy)
334 shost->hostt->target_destroy(starget);
335 list_del_init(&starget->siblings);
336 spin_unlock_irqrestore(shost->host_lock, flags);
337 put_device(dev);
338}
339
325static void scsi_target_dev_release(struct device *dev) 340static void scsi_target_dev_release(struct device *dev)
326{ 341{
327 struct device *parent = dev->parent; 342 struct device *parent = dev->parent;
@@ -331,9 +346,14 @@ static void scsi_target_dev_release(struct device *dev)
331 put_device(parent); 346 put_device(parent);
332} 347}
333 348
349struct device_type scsi_target_type = {
350 .name = "scsi_target",
351 .release = scsi_target_dev_release,
352};
353
334int scsi_is_target_device(const struct device *dev) 354int scsi_is_target_device(const struct device *dev)
335{ 355{
336 return dev->release == scsi_target_dev_release; 356 return dev->type == &scsi_target_type;
337} 357}
338EXPORT_SYMBOL(scsi_is_target_device); 358EXPORT_SYMBOL(scsi_is_target_device);
339 359
@@ -391,14 +411,17 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
391 device_initialize(dev); 411 device_initialize(dev);
392 starget->reap_ref = 1; 412 starget->reap_ref = 1;
393 dev->parent = get_device(parent); 413 dev->parent = get_device(parent);
394 dev->release = scsi_target_dev_release;
395 sprintf(dev->bus_id, "target%d:%d:%d", 414 sprintf(dev->bus_id, "target%d:%d:%d",
396 shost->host_no, channel, id); 415 shost->host_no, channel, id);
416#ifndef CONFIG_SYSFS_DEPRECATED
417 dev->bus = &scsi_bus_type;
418#endif
419 dev->type = &scsi_target_type;
397 starget->id = id; 420 starget->id = id;
398 starget->channel = channel; 421 starget->channel = channel;
399 INIT_LIST_HEAD(&starget->siblings); 422 INIT_LIST_HEAD(&starget->siblings);
400 INIT_LIST_HEAD(&starget->devices); 423 INIT_LIST_HEAD(&starget->devices);
401 starget->state = STARGET_RUNNING; 424 starget->state = STARGET_CREATED;
402 starget->scsi_level = SCSI_2; 425 starget->scsi_level = SCSI_2;
403 retry: 426 retry:
404 spin_lock_irqsave(shost->host_lock, flags); 427 spin_lock_irqsave(shost->host_lock, flags);
@@ -411,18 +434,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
411 spin_unlock_irqrestore(shost->host_lock, flags); 434 spin_unlock_irqrestore(shost->host_lock, flags);
412 /* allocate and add */ 435 /* allocate and add */
413 transport_setup_device(dev); 436 transport_setup_device(dev);
414 error = device_add(dev);
415 if (error) {
416 dev_err(dev, "target device_add failed, error %d\n", error);
417 spin_lock_irqsave(shost->host_lock, flags);
418 list_del_init(&starget->siblings);
419 spin_unlock_irqrestore(shost->host_lock, flags);
420 transport_destroy_device(dev);
421 put_device(parent);
422 kfree(starget);
423 return NULL;
424 }
425 transport_add_device(dev);
426 if (shost->hostt->target_alloc) { 437 if (shost->hostt->target_alloc) {
427 error = shost->hostt->target_alloc(starget); 438 error = shost->hostt->target_alloc(starget);
428 439
@@ -430,9 +441,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
430 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); 441 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
431 /* don't want scsi_target_reap to do the final 442 /* don't want scsi_target_reap to do the final
432 * put because it will be under the host lock */ 443 * put because it will be under the host lock */
433 get_device(dev); 444 scsi_target_destroy(starget);
434 scsi_target_reap(starget);
435 put_device(dev);
436 return NULL; 445 return NULL;
437 } 446 }
438 } 447 }
@@ -459,18 +468,10 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
459{ 468{
460 struct scsi_target *starget = 469 struct scsi_target *starget =
461 container_of(work, struct scsi_target, ew.work); 470 container_of(work, struct scsi_target, ew.work);
462 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
463 unsigned long flags;
464 471
465 transport_remove_device(&starget->dev); 472 transport_remove_device(&starget->dev);
466 device_del(&starget->dev); 473 device_del(&starget->dev);
467 transport_destroy_device(&starget->dev); 474 scsi_target_destroy(starget);
468 spin_lock_irqsave(shost->host_lock, flags);
469 if (shost->hostt->target_destroy)
470 shost->hostt->target_destroy(starget);
471 list_del_init(&starget->siblings);
472 spin_unlock_irqrestore(shost->host_lock, flags);
473 put_device(&starget->dev);
474} 475}
475 476
476/** 477/**
@@ -485,21 +486,25 @@ void scsi_target_reap(struct scsi_target *starget)
485{ 486{
486 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 487 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
487 unsigned long flags; 488 unsigned long flags;
489 enum scsi_target_state state;
490 int empty;
488 491
489 spin_lock_irqsave(shost->host_lock, flags); 492 spin_lock_irqsave(shost->host_lock, flags);
493 state = starget->state;
494 empty = --starget->reap_ref == 0 &&
495 list_empty(&starget->devices) ? 1 : 0;
496 spin_unlock_irqrestore(shost->host_lock, flags);
490 497
491 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { 498 if (!empty)
492 BUG_ON(starget->state == STARGET_DEL);
493 starget->state = STARGET_DEL;
494 spin_unlock_irqrestore(shost->host_lock, flags);
495 execute_in_process_context(scsi_target_reap_usercontext,
496 &starget->ew);
497 return; 499 return;
498 500
499 } 501 BUG_ON(state == STARGET_DEL);
500 spin_unlock_irqrestore(shost->host_lock, flags); 502 starget->state = STARGET_DEL;
501 503 if (state == STARGET_CREATED)
502 return; 504 scsi_target_destroy(starget);
505 else
506 execute_in_process_context(scsi_target_reap_usercontext,
507 &starget->ew);
503} 508}
504 509
505/** 510/**
@@ -1048,8 +1053,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1048 scsi_inq_str(vend, result, 8, 16), 1053 scsi_inq_str(vend, result, 8, 16),
1049 scsi_inq_str(mod, result, 16, 32)); 1054 scsi_inq_str(mod, result, 16, 32));
1050 }); 1055 });
1056
1051 } 1057 }
1052 1058
1053 res = SCSI_SCAN_TARGET_PRESENT; 1059 res = SCSI_SCAN_TARGET_PRESENT;
1054 goto out_free_result; 1060 goto out_free_result;
1055 } 1061 }
@@ -1489,7 +1495,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1489 if (scsi_host_scan_allowed(shost)) 1495 if (scsi_host_scan_allowed(shost))
1490 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1496 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1491 mutex_unlock(&shost->scan_mutex); 1497 mutex_unlock(&shost->scan_mutex);
1492 transport_configure_device(&starget->dev);
1493 scsi_target_reap(starget); 1498 scsi_target_reap(starget);
1494 put_device(&starget->dev); 1499 put_device(&starget->dev);
1495 1500
@@ -1570,7 +1575,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1570 out_reap: 1575 out_reap:
1571 /* now determine if the target has any children at all 1576 /* now determine if the target has any children at all
1572 * and if not, nuke it */ 1577 * and if not, nuke it */
1573 transport_configure_device(&starget->dev);
1574 scsi_target_reap(starget); 1578 scsi_target_reap(starget);
1575 1579
1576 put_device(&starget->dev); 1580 put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67bb20ed45d2..049103f1d16f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -21,6 +21,8 @@
21#include "scsi_priv.h" 21#include "scsi_priv.h"
22#include "scsi_logging.h" 22#include "scsi_logging.h"
23 23
24static struct device_type scsi_dev_type;
25
24static const struct { 26static const struct {
25 enum scsi_device_state value; 27 enum scsi_device_state value;
26 char *name; 28 char *name;
@@ -249,18 +251,27 @@ shost_rd_attr(sg_tablesize, "%hu\n");
249shost_rd_attr(unchecked_isa_dma, "%d\n"); 251shost_rd_attr(unchecked_isa_dma, "%d\n");
250shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 252shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
251 253
252static struct device_attribute *scsi_sysfs_shost_attrs[] = { 254static struct attribute *scsi_sysfs_shost_attrs[] = {
253 &dev_attr_unique_id, 255 &dev_attr_unique_id.attr,
254 &dev_attr_host_busy, 256 &dev_attr_host_busy.attr,
255 &dev_attr_cmd_per_lun, 257 &dev_attr_cmd_per_lun.attr,
256 &dev_attr_can_queue, 258 &dev_attr_can_queue.attr,
257 &dev_attr_sg_tablesize, 259 &dev_attr_sg_tablesize.attr,
258 &dev_attr_unchecked_isa_dma, 260 &dev_attr_unchecked_isa_dma.attr,
259 &dev_attr_proc_name, 261 &dev_attr_proc_name.attr,
260 &dev_attr_scan, 262 &dev_attr_scan.attr,
261 &dev_attr_hstate, 263 &dev_attr_hstate.attr,
262 &dev_attr_supported_mode, 264 &dev_attr_supported_mode.attr,
263 &dev_attr_active_mode, 265 &dev_attr_active_mode.attr,
266 NULL
267};
268
269struct attribute_group scsi_shost_attr_group = {
270 .attrs = scsi_sysfs_shost_attrs,
271};
272
273struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
274 &scsi_shost_attr_group,
264 NULL 275 NULL
265}; 276};
266 277
@@ -335,7 +346,12 @@ static struct class sdev_class = {
335/* all probing is done in the individual ->probe routines */ 346/* all probing is done in the individual ->probe routines */
336static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) 347static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
337{ 348{
338 struct scsi_device *sdp = to_scsi_device(dev); 349 struct scsi_device *sdp;
350
351 if (dev->type != &scsi_dev_type)
352 return 0;
353
354 sdp = to_scsi_device(dev);
339 if (sdp->no_uld_attach) 355 if (sdp->no_uld_attach)
340 return 0; 356 return 0;
341 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 357 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
@@ -351,10 +367,16 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
351 367
352static int scsi_bus_suspend(struct device * dev, pm_message_t state) 368static int scsi_bus_suspend(struct device * dev, pm_message_t state)
353{ 369{
354 struct device_driver *drv = dev->driver; 370 struct device_driver *drv;
355 struct scsi_device *sdev = to_scsi_device(dev); 371 struct scsi_device *sdev;
356 int err; 372 int err;
357 373
374 if (dev->type != &scsi_dev_type)
375 return 0;
376
377 drv = dev->driver;
378 sdev = to_scsi_device(dev);
379
358 err = scsi_device_quiesce(sdev); 380 err = scsi_device_quiesce(sdev);
359 if (err) 381 if (err)
360 return err; 382 return err;
@@ -370,10 +392,16 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
370 392
371static int scsi_bus_resume(struct device * dev) 393static int scsi_bus_resume(struct device * dev)
372{ 394{
373 struct device_driver *drv = dev->driver; 395 struct device_driver *drv;
374 struct scsi_device *sdev = to_scsi_device(dev); 396 struct scsi_device *sdev;
375 int err = 0; 397 int err = 0;
376 398
399 if (dev->type != &scsi_dev_type)
400 return 0;
401
402 drv = dev->driver;
403 sdev = to_scsi_device(dev);
404
377 if (drv && drv->resume) 405 if (drv && drv->resume)
378 err = drv->resume(dev); 406 err = drv->resume(dev);
379 407
@@ -781,6 +809,27 @@ sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
781 return count; 809 return count;
782} 810}
783 811
812static int scsi_target_add(struct scsi_target *starget)
813{
814 int error;
815
816 if (starget->state != STARGET_CREATED)
817 return 0;
818
819 error = device_add(&starget->dev);
820 if (error) {
821 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
822 get_device(&starget->dev);
823 scsi_target_reap(starget);
824 put_device(&starget->dev);
825 return error;
826 }
827 transport_add_device(&starget->dev);
828 starget->state = STARGET_RUNNING;
829
830 return 0;
831}
832
784static struct device_attribute sdev_attr_queue_type_rw = 833static struct device_attribute sdev_attr_queue_type_rw =
785 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, 834 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
786 sdev_store_queue_type_rw); 835 sdev_store_queue_type_rw);
@@ -796,10 +845,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
796{ 845{
797 int error, i; 846 int error, i;
798 struct request_queue *rq = sdev->request_queue; 847 struct request_queue *rq = sdev->request_queue;
848 struct scsi_target *starget = sdev->sdev_target;
799 849
800 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) 850 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
801 return error; 851 return error;
802 852
853 error = scsi_target_add(starget);
854 if (error)
855 return error;
856
857 transport_configure_device(&starget->dev);
803 error = device_add(&sdev->sdev_gendev); 858 error = device_add(&sdev->sdev_gendev);
804 if (error) { 859 if (error) {
805 put_device(sdev->sdev_gendev.parent); 860 put_device(sdev->sdev_gendev.parent);
@@ -834,7 +889,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
834 goto out; 889 goto out;
835 } 890 }
836 891
837 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL); 892 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
838 893
839 if (error) 894 if (error)
840 sdev_printk(KERN_INFO, sdev, 895 sdev_printk(KERN_INFO, sdev,
@@ -971,44 +1026,6 @@ int scsi_register_interface(struct class_interface *intf)
971} 1026}
972EXPORT_SYMBOL(scsi_register_interface); 1027EXPORT_SYMBOL(scsi_register_interface);
973 1028
974
975static struct device_attribute *class_attr_overridden(
976 struct device_attribute **attrs,
977 struct device_attribute *attr)
978{
979 int i;
980
981 if (!attrs)
982 return NULL;
983 for (i = 0; attrs[i]; i++)
984 if (!strcmp(attrs[i]->attr.name, attr->attr.name))
985 return attrs[i];
986 return NULL;
987}
988
989static int class_attr_add(struct device *classdev,
990 struct device_attribute *attr)
991{
992 struct device_attribute *base_attr;
993
994 /*
995 * Spare the caller from having to copy things it's not interested in.
996 */
997 base_attr = class_attr_overridden(scsi_sysfs_shost_attrs, attr);
998 if (base_attr) {
999 /* extend permissions */
1000 attr->attr.mode |= base_attr->attr.mode;
1001
1002 /* override null show/store with default */
1003 if (!attr->show)
1004 attr->show = base_attr->show;
1005 if (!attr->store)
1006 attr->store = base_attr->store;
1007 }
1008
1009 return device_create_file(classdev, attr);
1010}
1011
1012/** 1029/**
1013 * scsi_sysfs_add_host - add scsi host to subsystem 1030 * scsi_sysfs_add_host - add scsi host to subsystem
1014 * @shost: scsi host struct to add to subsystem 1031 * @shost: scsi host struct to add to subsystem
@@ -1018,20 +1035,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
1018{ 1035{
1019 int error, i; 1036 int error, i;
1020 1037
1038 /* add host specific attributes */
1021 if (shost->hostt->shost_attrs) { 1039 if (shost->hostt->shost_attrs) {
1022 for (i = 0; shost->hostt->shost_attrs[i]; i++) { 1040 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1023 error = class_attr_add(&shost->shost_dev,
1024 shost->hostt->shost_attrs[i]);
1025 if (error)
1026 return error;
1027 }
1028 }
1029
1030 for (i = 0; scsi_sysfs_shost_attrs[i]; i++) {
1031 if (!class_attr_overridden(shost->hostt->shost_attrs,
1032 scsi_sysfs_shost_attrs[i])) {
1033 error = device_create_file(&shost->shost_dev, 1041 error = device_create_file(&shost->shost_dev,
1034 scsi_sysfs_shost_attrs[i]); 1042 shost->hostt->shost_attrs[i]);
1035 if (error) 1043 if (error)
1036 return error; 1044 return error;
1037 } 1045 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6b092a6c295d..5fd64e70029d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1961,12 +1961,17 @@ fc_timed_out(struct scsi_cmnd *scmd)
1961} 1961}
1962 1962
1963/* 1963/*
1964 * Must be called with shost->host_lock held 1964 * Called by fc_user_scan to locate an rport on the shost that
1965 * matches the channel and target id, and invoke scsi_scan_target()
1966 * on the rport.
1965 */ 1967 */
1966static int fc_user_scan(struct Scsi_Host *shost, uint channel, 1968static void
1967 uint id, uint lun) 1969fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
1968{ 1970{
1969 struct fc_rport *rport; 1971 struct fc_rport *rport;
1972 unsigned long flags;
1973
1974 spin_lock_irqsave(shost->host_lock, flags);
1970 1975
1971 list_for_each_entry(rport, &fc_host_rports(shost), peers) { 1976 list_for_each_entry(rport, &fc_host_rports(shost), peers) {
1972 if (rport->scsi_target_id == -1) 1977 if (rport->scsi_target_id == -1)
@@ -1975,13 +1980,54 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
1975 if (rport->port_state != FC_PORTSTATE_ONLINE) 1980 if (rport->port_state != FC_PORTSTATE_ONLINE)
1976 continue; 1981 continue;
1977 1982
1978 if ((channel == SCAN_WILD_CARD || channel == rport->channel) && 1983 if ((channel == rport->channel) &&
1979 (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) { 1984 (id == rport->scsi_target_id)) {
1980 scsi_scan_target(&rport->dev, rport->channel, 1985 spin_unlock_irqrestore(shost->host_lock, flags);
1981 rport->scsi_target_id, lun, 1); 1986 scsi_scan_target(&rport->dev, channel, id, lun, 1);
1987 return;
1982 } 1988 }
1983 } 1989 }
1984 1990
1991 spin_unlock_irqrestore(shost->host_lock, flags);
1992}
1993
1994/*
1995 * Called via sysfs scan routines. Necessary, as the FC transport
1996 * wants to place all target objects below the rport object. So this
1997 * routine must invoke the scsi_scan_target() routine with the rport
1998 * object as the parent.
1999 */
2000static int
2001fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2002{
2003 uint chlo, chhi;
2004 uint tgtlo, tgthi;
2005
2006 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2007 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2008 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2009 return -EINVAL;
2010
2011 if (channel == SCAN_WILD_CARD) {
2012 chlo = 0;
2013 chhi = shost->max_channel + 1;
2014 } else {
2015 chlo = channel;
2016 chhi = channel + 1;
2017 }
2018
2019 if (id == SCAN_WILD_CARD) {
2020 tgtlo = 0;
2021 tgthi = shost->max_id;
2022 } else {
2023 tgtlo = id;
2024 tgthi = id + 1;
2025 }
2026
2027 for ( ; chlo < chhi; chlo++)
2028 for ( ; tgtlo < tgthi; tgtlo++)
2029 fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2030
1985 return 0; 2031 return 0;
1986} 2032}
1987 2033
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 27ec625ab771..7899e3dda9bf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -192,6 +192,16 @@ static void sas_non_host_smp_request(struct request_queue *q)
192 sas_smp_request(q, rphy_to_shost(rphy), rphy); 192 sas_smp_request(q, rphy_to_shost(rphy), rphy);
193} 193}
194 194
195static void sas_host_release(struct device *dev)
196{
197 struct Scsi_Host *shost = dev_to_shost(dev);
198 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
199 struct request_queue *q = sas_host->q;
200
201 if (q)
202 blk_cleanup_queue(q);
203}
204
195static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) 205static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
196{ 206{
197 struct request_queue *q; 207 struct request_queue *q;
@@ -199,6 +209,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
199 struct device *dev; 209 struct device *dev;
200 char namebuf[BUS_ID_SIZE]; 210 char namebuf[BUS_ID_SIZE];
201 const char *name; 211 const char *name;
212 void (*release)(struct device *);
202 213
203 if (!to_sas_internal(shost->transportt)->f->smp_handler) { 214 if (!to_sas_internal(shost->transportt)->f->smp_handler) {
204 printk("%s can't handle SMP requests\n", shost->hostt->name); 215 printk("%s can't handle SMP requests\n", shost->hostt->name);
@@ -209,17 +220,19 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
209 q = blk_init_queue(sas_non_host_smp_request, NULL); 220 q = blk_init_queue(sas_non_host_smp_request, NULL);
210 dev = &rphy->dev; 221 dev = &rphy->dev;
211 name = dev->bus_id; 222 name = dev->bus_id;
223 release = NULL;
212 } else { 224 } else {
213 q = blk_init_queue(sas_host_smp_request, NULL); 225 q = blk_init_queue(sas_host_smp_request, NULL);
214 dev = &shost->shost_gendev; 226 dev = &shost->shost_gendev;
215 snprintf(namebuf, sizeof(namebuf), 227 snprintf(namebuf, sizeof(namebuf),
216 "sas_host%d", shost->host_no); 228 "sas_host%d", shost->host_no);
217 name = namebuf; 229 name = namebuf;
230 release = sas_host_release;
218 } 231 }
219 if (!q) 232 if (!q)
220 return -ENOMEM; 233 return -ENOMEM;
221 234
222 error = bsg_register_queue(q, dev, name); 235 error = bsg_register_queue(q, dev, name, release);
223 if (error) { 236 if (error) {
224 blk_cleanup_queue(q); 237 blk_cleanup_queue(q);
225 return -ENOMEM; 238 return -ENOMEM;
@@ -253,7 +266,6 @@ static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
253 return; 266 return;
254 267
255 bsg_unregister_queue(q); 268 bsg_unregister_queue(q);
256 blk_cleanup_queue(q);
257} 269}
258 270
259/* 271/*
@@ -1301,6 +1313,9 @@ static void sas_expander_release(struct device *dev)
1301 struct sas_rphy *rphy = dev_to_rphy(dev); 1313 struct sas_rphy *rphy = dev_to_rphy(dev);
1302 struct sas_expander_device *edev = rphy_to_expander_device(rphy); 1314 struct sas_expander_device *edev = rphy_to_expander_device(rphy);
1303 1315
1316 if (rphy->q)
1317 blk_cleanup_queue(rphy->q);
1318
1304 put_device(dev->parent); 1319 put_device(dev->parent);
1305 kfree(edev); 1320 kfree(edev);
1306} 1321}
@@ -1310,6 +1325,9 @@ static void sas_end_device_release(struct device *dev)
1310 struct sas_rphy *rphy = dev_to_rphy(dev); 1325 struct sas_rphy *rphy = dev_to_rphy(dev);
1311 struct sas_end_device *edev = rphy_to_end_device(rphy); 1326 struct sas_end_device *edev = rphy_to_end_device(rphy);
1312 1327
1328 if (rphy->q)
1329 blk_cleanup_queue(rphy->q);
1330
1313 put_device(dev->parent); 1331 put_device(dev->parent);
1314 kfree(edev); 1332 kfree(edev);
1315} 1333}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bc12b5d5d676..75a64a6cae8c 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -24,6 +24,7 @@
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/sysfs.h>
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include "scsi_priv.h" 29#include "scsi_priv.h"
29#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
@@ -1374,11 +1375,11 @@ static int spi_host_configure(struct transport_container *tc,
1374 * overloads the return by setting 1<<1 if the attribute should 1375 * overloads the return by setting 1<<1 if the attribute should
1375 * be writeable */ 1376 * be writeable */
1376#define TARGET_ATTRIBUTE_HELPER(name) \ 1377#define TARGET_ATTRIBUTE_HELPER(name) \
1377 (si->f->show_##name ? 1 : 0) + \ 1378 (si->f->show_##name ? S_IRUGO : 0) | \
1378 (si->f->set_##name ? 2 : 0) 1379 (si->f->set_##name ? S_IWUSR : 0)
1379 1380
1380static int target_attribute_is_visible(struct kobject *kobj, 1381static mode_t target_attribute_is_visible(struct kobject *kobj,
1381 struct attribute *attr, int i) 1382 struct attribute *attr, int i)
1382{ 1383{
1383 struct device *cdev = container_of(kobj, struct device, kobj); 1384 struct device *cdev = container_of(kobj, struct device, kobj);
1384 struct scsi_target *starget = transport_class_to_starget(cdev); 1385 struct scsi_target *starget = transport_class_to_starget(cdev);
@@ -1428,7 +1429,7 @@ static int target_attribute_is_visible(struct kobject *kobj,
1428 spi_support_ius(starget)) 1429 spi_support_ius(starget))
1429 return TARGET_ATTRIBUTE_HELPER(hold_mcs); 1430 return TARGET_ATTRIBUTE_HELPER(hold_mcs);
1430 else if (attr == &dev_attr_revalidate.attr) 1431 else if (attr == &dev_attr_revalidate.attr)
1431 return 1; 1432 return S_IWUSR;
1432 1433
1433 return 0; 1434 return 0;
1434} 1435}
@@ -1462,25 +1463,9 @@ static int spi_target_configure(struct transport_container *tc,
1462 struct device *cdev) 1463 struct device *cdev)
1463{ 1464{
1464 struct kobject *kobj = &cdev->kobj; 1465 struct kobject *kobj = &cdev->kobj;
1465 int i; 1466
1466 struct attribute *attr; 1467 /* force an update based on parameters read from the device */
1467 int rc; 1468 sysfs_update_group(kobj, &target_attribute_group);
1468
1469 for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
1470 int j = target_attribute_group.is_visible(kobj, attr, i);
1471
1472 /* FIXME: as well as returning -EEXIST, which we'd like
1473 * to ignore, sysfs also does a WARN_ON and dumps a trace,
1474 * which is bad, so temporarily, skip attributes that are
1475 * already visible (the revalidate one) */
1476 if (j && attr != &dev_attr_revalidate.attr)
1477 rc = sysfs_add_file_to_group(kobj, attr,
1478 target_attribute_group.name);
1479 /* and make the attribute writeable if we have a set
1480 * function */
1481 if ((j & 1))
1482 rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
1483 }
1484 1469
1485 return 0; 1470 return 0;
1486} 1471}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 03e359670506..31fe6051c799 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -313,7 +313,8 @@ static struct platform_driver sgiwd93_driver = {
313 .probe = sgiwd93_probe, 313 .probe = sgiwd93_probe,
314 .remove = __devexit_p(sgiwd93_remove), 314 .remove = __devexit_p(sgiwd93_remove),
315 .driver = { 315 .driver = {
316 .name = "sgiwd93" 316 .name = "sgiwd93",
317 .owner = THIS_MODULE,
317 } 318 }
318}; 319};
319 320
@@ -333,3 +334,4 @@ module_exit(sgiwd93_module_exit);
333MODULE_DESCRIPTION("SGI WD33C93 driver"); 334MODULE_DESCRIPTION("SGI WD33C93 driver");
334MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 335MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
335MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
337MODULE_ALIAS("platform:sgiwd93");
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 0a6b45b1b003..2bbef4c45a0d 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -53,6 +53,7 @@
53MODULE_AUTHOR("Thomas Bogendörfer"); 53MODULE_AUTHOR("Thomas Bogendörfer");
54MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); 54MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
55MODULE_LICENSE("GPL"); 55MODULE_LICENSE("GPL");
56MODULE_ALIAS("platform:snirm_53c710");
56 57
57#define SNIRM710_CLOCK 32 58#define SNIRM710_CLOCK 32
58 59
@@ -136,6 +137,7 @@ static struct platform_driver snirm710_driver = {
136 .remove = __devexit_p(snirm710_driver_remove), 137 .remove = __devexit_p(snirm710_driver_remove),
137 .driver = { 138 .driver = {
138 .name = "snirm_53c710", 139 .name = "snirm_53c710",
140 .owner = THIS_MODULE,
139 }, 141 },
140}; 142};
141 143
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a860c3a9ae99..e8db66ad0bde 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4322,7 +4322,7 @@ static void do_remove_sysfs_files(void)
4322static ssize_t 4322static ssize_t
4323st_defined_show(struct device *dev, struct device_attribute *attr, char *buf) 4323st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
4324{ 4324{
4325 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4325 struct st_modedef *STm = dev_get_drvdata(dev);
4326 ssize_t l = 0; 4326 ssize_t l = 0;
4327 4327
4328 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); 4328 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
@@ -4334,7 +4334,7 @@ DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
4334static ssize_t 4334static ssize_t
4335st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf) 4335st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
4336{ 4336{
4337 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4337 struct st_modedef *STm = dev_get_drvdata(dev);
4338 ssize_t l = 0; 4338 ssize_t l = 0;
4339 4339
4340 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); 4340 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
@@ -4346,7 +4346,7 @@ DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
4346static ssize_t 4346static ssize_t
4347st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf) 4347st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
4348{ 4348{
4349 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4349 struct st_modedef *STm = dev_get_drvdata(dev);
4350 ssize_t l = 0; 4350 ssize_t l = 0;
4351 char *fmt; 4351 char *fmt;
4352 4352
@@ -4361,7 +4361,7 @@ static ssize_t
4361st_defcompression_show(struct device *dev, struct device_attribute *attr, 4361st_defcompression_show(struct device *dev, struct device_attribute *attr,
4362 char *buf) 4362 char *buf)
4363{ 4363{
4364 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4364 struct st_modedef *STm = dev_get_drvdata(dev);
4365 ssize_t l = 0; 4365 ssize_t l = 0;
4366 4366
4367 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); 4367 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
@@ -4373,7 +4373,7 @@ DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
4373static ssize_t 4373static ssize_t
4374st_options_show(struct device *dev, struct device_attribute *attr, char *buf) 4374st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
4375{ 4375{
4376 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4376 struct st_modedef *STm = dev_get_drvdata(dev);
4377 struct scsi_tape *STp; 4377 struct scsi_tape *STp;
4378 int i, j, options; 4378 int i, j, options;
4379 ssize_t l = 0; 4379 ssize_t l = 0;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 06152c7fa689..7514b3a0390e 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -294,6 +294,7 @@ static struct platform_driver esp_sun3x_driver = {
294 .remove = __devexit_p(esp_sun3x_remove), 294 .remove = __devexit_p(esp_sun3x_remove),
295 .driver = { 295 .driver = {
296 .name = "sun3x_esp", 296 .name = "sun3x_esp",
297 .owner = THIS_MODULE,
297 }, 298 },
298}; 299};
299 300
@@ -314,3 +315,4 @@ MODULE_VERSION(DRV_VERSION);
314 315
315module_init(sun3x_esp_init); 316module_init(sun3x_esp_init);
316module_exit(sun3x_esp_exit); 317module_exit(sun3x_esp_exit);
318MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 58d7eee4fe81..640333b1e75c 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1715,13 +1715,12 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
1715 1715
1716} 1716}
1717 1717
1718static irqreturn_t ihdlr(int irq, unsigned int j) { 1718static irqreturn_t ihdlr(unsigned int j)
1719{
1719 struct scsi_cmnd *SCpnt; 1720 struct scsi_cmnd *SCpnt;
1720 unsigned int i, k, c, status, tstatus, reg, ret; 1721 unsigned int i, k, c, status, tstatus, reg, ret;
1721 struct mscp *spp, *cpp; 1722 struct mscp *spp, *cpp;
1722 1723 int irq = sh[j]->irq;
1723 if (sh[j]->irq != irq)
1724 panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
1725 1724
1726 /* Check if this board need to be serviced */ 1725 /* Check if this board need to be serviced */
1727 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; 1726 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
@@ -1935,7 +1934,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap) {
1935 if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE; 1934 if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
1936 1935
1937 spin_lock_irqsave(sh[j]->host_lock, spin_flags); 1936 spin_lock_irqsave(sh[j]->host_lock, spin_flags);
1938 ret = ihdlr(irq, j); 1937 ret = ihdlr(j);
1939 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); 1938 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
1940 return ret; 1939 return ret;
1941} 1940}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 678c02f1ae23..a452ac67fc94 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -224,12 +224,11 @@ static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt)
224} 224}
225 225
226static void 226static void
227v9fs_umount_begin(struct vfsmount *vfsmnt, int flags) 227v9fs_umount_begin(struct super_block *sb)
228{ 228{
229 struct v9fs_session_info *v9ses = vfsmnt->mnt_sb->s_fs_info; 229 struct v9fs_session_info *v9ses = sb->s_fs_info;
230 230
231 if (flags & MNT_FORCE) 231 v9fs_session_cancel(v9ses);
232 v9fs_session_cancel(v9ses);
233} 232}
234 233
235static const struct super_operations v9fs_super_ops = { 234static const struct super_operations v9fs_super_ops = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5e1a4fb5cacb..9924581df6f6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -543,7 +543,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
543 unsigned long interp_load_addr = 0; 543 unsigned long interp_load_addr = 0;
544 unsigned long start_code, end_code, start_data, end_data; 544 unsigned long start_code, end_code, start_data, end_data;
545 unsigned long reloc_func_desc = 0; 545 unsigned long reloc_func_desc = 0;
546 struct files_struct *files;
547 int executable_stack = EXSTACK_DEFAULT; 546 int executable_stack = EXSTACK_DEFAULT;
548 unsigned long def_flags = 0; 547 unsigned long def_flags = 0;
549 struct { 548 struct {
@@ -593,20 +592,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
593 goto out_free_ph; 592 goto out_free_ph;
594 } 593 }
595 594
596 files = current->files; /* Refcounted so ok */
597 retval = unshare_files();
598 if (retval < 0)
599 goto out_free_ph;
600 if (files == current->files) {
601 put_files_struct(files);
602 files = NULL;
603 }
604
605 /* exec will make our files private anyway, but for the a.out
606 loader stuff we need to do it earlier */
607 retval = get_unused_fd(); 595 retval = get_unused_fd();
608 if (retval < 0) 596 if (retval < 0)
609 goto out_free_fh; 597 goto out_free_ph;
610 get_file(bprm->file); 598 get_file(bprm->file);
611 fd_install(elf_exec_fileno = retval, bprm->file); 599 fd_install(elf_exec_fileno = retval, bprm->file);
612 600
@@ -728,12 +716,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
728 if (retval) 716 if (retval)
729 goto out_free_dentry; 717 goto out_free_dentry;
730 718
731 /* Discard our unneeded old files struct */
732 if (files) {
733 put_files_struct(files);
734 files = NULL;
735 }
736
737 /* OK, This is the point of no return */ 719 /* OK, This is the point of no return */
738 current->flags &= ~PF_FORKNOEXEC; 720 current->flags &= ~PF_FORKNOEXEC;
739 current->mm->def_flags = def_flags; 721 current->mm->def_flags = def_flags;
@@ -1016,9 +998,6 @@ out_free_interp:
1016 kfree(elf_interpreter); 998 kfree(elf_interpreter);
1017out_free_file: 999out_free_file:
1018 sys_close(elf_exec_fileno); 1000 sys_close(elf_exec_fileno);
1019out_free_fh:
1020 if (files)
1021 reset_files_struct(current, files);
1022out_free_ph: 1001out_free_ph:
1023 kfree(elf_phdata); 1002 kfree(elf_phdata);
1024 goto out; 1003 goto out;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index b53c7e5f41bb..dbf0ac0523de 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -110,7 +110,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
110 char *iname_addr = iname; 110 char *iname_addr = iname;
111 int retval; 111 int retval;
112 int fd_binary = -1; 112 int fd_binary = -1;
113 struct files_struct *files = NULL;
114 113
115 retval = -ENOEXEC; 114 retval = -ENOEXEC;
116 if (!enabled) 115 if (!enabled)
@@ -133,21 +132,13 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
133 132
134 if (fmt->flags & MISC_FMT_OPEN_BINARY) { 133 if (fmt->flags & MISC_FMT_OPEN_BINARY) {
135 134
136 files = current->files;
137 retval = unshare_files();
138 if (retval < 0)
139 goto _ret;
140 if (files == current->files) {
141 put_files_struct(files);
142 files = NULL;
143 }
144 /* if the binary should be opened on behalf of the 135 /* if the binary should be opened on behalf of the
145 * interpreter than keep it open and assign descriptor 136 * interpreter than keep it open and assign descriptor
146 * to it */ 137 * to it */
147 fd_binary = get_unused_fd(); 138 fd_binary = get_unused_fd();
148 if (fd_binary < 0) { 139 if (fd_binary < 0) {
149 retval = fd_binary; 140 retval = fd_binary;
150 goto _unshare; 141 goto _ret;
151 } 142 }
152 fd_install(fd_binary, bprm->file); 143 fd_install(fd_binary, bprm->file);
153 144
@@ -205,10 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
205 if (retval < 0) 196 if (retval < 0)
206 goto _error; 197 goto _error;
207 198
208 if (files) {
209 put_files_struct(files);
210 files = NULL;
211 }
212_ret: 199_ret:
213 return retval; 200 return retval;
214_error: 201_error:
@@ -216,9 +203,6 @@ _error:
216 sys_close(fd_binary); 203 sys_close(fd_binary);
217 bprm->interp_flags = 0; 204 bprm->interp_flags = 0;
218 bprm->interp_data = 0; 205 bprm->interp_data = 0;
219_unshare:
220 if (files)
221 reset_files_struct(current, files);
222 goto _ret; 206 goto _ret;
223} 207}
224 208
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 14c63527c762..fdc36bfd6a7b 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -194,7 +194,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
194 unsigned long som_entry; 194 unsigned long som_entry;
195 struct som_hdr *som_ex; 195 struct som_hdr *som_ex;
196 struct som_exec_auxhdr *hpuxhdr; 196 struct som_exec_auxhdr *hpuxhdr;
197 struct files_struct *files;
198 197
199 /* Get the exec-header */ 198 /* Get the exec-header */
200 som_ex = (struct som_hdr *) bprm->buf; 199 som_ex = (struct som_hdr *) bprm->buf;
@@ -221,15 +220,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
221 goto out_free; 220 goto out_free;
222 } 221 }
223 222
224 files = current->files; /* Refcounted so ok */
225 retval = unshare_files();
226 if (retval < 0)
227 goto out_free;
228 if (files == current->files) {
229 put_files_struct(files);
230 files = NULL;
231 }
232
233 retval = get_unused_fd(); 223 retval = get_unused_fd();
234 if (retval < 0) 224 if (retval < 0)
235 goto out_free; 225 goto out_free;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index f53f41ff1665..95024c066d89 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -25,14 +25,26 @@
25 25
26static LIST_HEAD(cifs_dfs_automount_list); 26static LIST_HEAD(cifs_dfs_automount_list);
27 27
28/* 28static void cifs_dfs_expire_automounts(struct work_struct *work);
29 * DFS functions 29static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
30*/ 30 cifs_dfs_expire_automounts);
31static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
32
33static void cifs_dfs_expire_automounts(struct work_struct *work)
34{
35 struct list_head *list = &cifs_dfs_automount_list;
36
37 mark_mounts_for_expiry(list);
38 if (!list_empty(list))
39 schedule_delayed_work(&cifs_dfs_automount_task,
40 cifs_dfs_mountpoint_expiry_timeout);
41}
31 42
32void dfs_shrink_umount_helper(struct vfsmount *vfsmnt) 43void cifs_dfs_release_automount_timer(void)
33{ 44{
34 mark_mounts_for_expiry(&cifs_dfs_automount_list); 45 BUG_ON(!list_empty(&cifs_dfs_automount_list));
35 mark_mounts_for_expiry(&cifs_dfs_automount_list); 46 cancel_delayed_work(&cifs_dfs_automount_task);
47 flush_scheduled_work();
36} 48}
37 49
38/** 50/**
@@ -261,10 +273,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
261 err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist); 273 err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist);
262 switch (err) { 274 switch (err) {
263 case 0: 275 case 0:
264 dput(nd->path.dentry); 276 path_put(&nd->path);
265 mntput(nd->path.mnt);
266 nd->path.mnt = newmnt; 277 nd->path.mnt = newmnt;
267 nd->path.dentry = dget(newmnt->mnt_root); 278 nd->path.dentry = dget(newmnt->mnt_root);
279 schedule_delayed_work(&cifs_dfs_automount_task,
280 cifs_dfs_mountpoint_expiry_timeout);
268 break; 281 break;
269 case -EBUSY: 282 case -EBUSY:
270 /* someone else made a mount here whilst we were busy */ 283 /* someone else made a mount here whilst we were busy */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a04b17e5a9d0..39c2cbdface7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -466,16 +466,11 @@ static struct quotactl_ops cifs_quotactl_ops = {
466}; 466};
467#endif 467#endif
468 468
469static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags) 469static void cifs_umount_begin(struct super_block *sb)
470{ 470{
471 struct cifs_sb_info *cifs_sb; 471 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
472 struct cifsTconInfo *tcon; 472 struct cifsTconInfo *tcon;
473 473
474 dfs_shrink_umount_helper(vfsmnt);
475
476 if (!(flags & MNT_FORCE))
477 return;
478 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
479 if (cifs_sb == NULL) 474 if (cifs_sb == NULL)
480 return; 475 return;
481 476
@@ -1100,6 +1095,7 @@ exit_cifs(void)
1100 cFYI(DBG2, ("exit_cifs")); 1095 cFYI(DBG2, ("exit_cifs"));
1101 cifs_proc_clean(); 1096 cifs_proc_clean();
1102#ifdef CONFIG_CIFS_DFS_UPCALL 1097#ifdef CONFIG_CIFS_DFS_UPCALL
1098 cifs_dfs_release_automount_timer();
1103 unregister_key_type(&key_type_dns_resolver); 1099 unregister_key_type(&key_type_dns_resolver);
1104#endif 1100#endif
1105#ifdef CONFIG_CIFS_UPCALL 1101#ifdef CONFIG_CIFS_UPCALL
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 0c83da4a7dab..50f9fdae19b3 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,13 +104,7 @@ extern int mode_to_acl(struct inode *inode, const char *path, __u64);
104extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 104extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
105 const char *); 105 const char *);
106extern int cifs_umount(struct super_block *, struct cifs_sb_info *); 106extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
107#ifdef CONFIG_CIFS_DFS_UPCALL 107extern void cifs_dfs_release_automount_timer(void);
108extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
109#else
110static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
111{
112}
113#endif /* DFS_UPCALL */
114void cifs_proc_init(void); 108void cifs_proc_init(void);
115void cifs_proc_clean(void); 109void cifs_proc_clean(void);
116 110
diff --git a/fs/exec.c b/fs/exec.c
index 54a0a557b678..b152029f18f6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -953,7 +953,6 @@ int flush_old_exec(struct linux_binprm * bprm)
953{ 953{
954 char * name; 954 char * name;
955 int i, ch, retval; 955 int i, ch, retval;
956 struct files_struct *files;
957 char tcomm[sizeof(current->comm)]; 956 char tcomm[sizeof(current->comm)];
958 957
959 /* 958 /*
@@ -965,26 +964,15 @@ int flush_old_exec(struct linux_binprm * bprm)
965 goto out; 964 goto out;
966 965
967 /* 966 /*
968 * Make sure we have private file handles. Ask the
969 * fork helper to do the work for us and the exit
970 * helper to do the cleanup of the old one.
971 */
972 files = current->files; /* refcounted so safe to hold */
973 retval = unshare_files();
974 if (retval)
975 goto out;
976 /*
977 * Release all of the old mmap stuff 967 * Release all of the old mmap stuff
978 */ 968 */
979 retval = exec_mmap(bprm->mm); 969 retval = exec_mmap(bprm->mm);
980 if (retval) 970 if (retval)
981 goto mmap_failed; 971 goto out;
982 972
983 bprm->mm = NULL; /* We're using it now */ 973 bprm->mm = NULL; /* We're using it now */
984 974
985 /* This is the point of no return */ 975 /* This is the point of no return */
986 put_files_struct(files);
987
988 current->sas_ss_sp = current->sas_ss_size = 0; 976 current->sas_ss_sp = current->sas_ss_size = 0;
989 977
990 if (current->euid == current->uid && current->egid == current->gid) 978 if (current->euid == current->uid && current->egid == current->gid)
@@ -1034,8 +1022,6 @@ int flush_old_exec(struct linux_binprm * bprm)
1034 1022
1035 return 0; 1023 return 0;
1036 1024
1037mmap_failed:
1038 reset_files_struct(current, files);
1039out: 1025out:
1040 return retval; 1026 return retval;
1041} 1027}
@@ -1283,12 +1269,17 @@ int do_execve(char * filename,
1283 struct linux_binprm *bprm; 1269 struct linux_binprm *bprm;
1284 struct file *file; 1270 struct file *file;
1285 unsigned long env_p; 1271 unsigned long env_p;
1272 struct files_struct *displaced;
1286 int retval; 1273 int retval;
1287 1274
1275 retval = unshare_files(&displaced);
1276 if (retval)
1277 goto out_ret;
1278
1288 retval = -ENOMEM; 1279 retval = -ENOMEM;
1289 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1280 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1290 if (!bprm) 1281 if (!bprm)
1291 goto out_ret; 1282 goto out_files;
1292 1283
1293 file = open_exec(filename); 1284 file = open_exec(filename);
1294 retval = PTR_ERR(file); 1285 retval = PTR_ERR(file);
@@ -1343,6 +1334,8 @@ int do_execve(char * filename,
1343 security_bprm_free(bprm); 1334 security_bprm_free(bprm);
1344 acct_update_integrals(current); 1335 acct_update_integrals(current);
1345 kfree(bprm); 1336 kfree(bprm);
1337 if (displaced)
1338 put_files_struct(displaced);
1346 return retval; 1339 return retval;
1347 } 1340 }
1348 1341
@@ -1363,6 +1356,9 @@ out_file:
1363out_kfree: 1356out_kfree:
1364 kfree(bprm); 1357 kfree(bprm);
1365 1358
1359out_files:
1360 if (displaced)
1361 reset_files_struct(displaced);
1366out_ret: 1362out_ret:
1367 return retval; 1363 return retval;
1368} 1364}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e632da761fc1..3f3ac630ccde 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -55,14 +55,16 @@ static int get_close_on_exec(unsigned int fd)
55 * file_lock held for write. 55 * file_lock held for write.
56 */ 56 */
57 57
58static int locate_fd(struct files_struct *files, 58static int locate_fd(unsigned int orig_start, int cloexec)
59 struct file *file, unsigned int orig_start)
60{ 59{
60 struct files_struct *files = current->files;
61 unsigned int newfd; 61 unsigned int newfd;
62 unsigned int start; 62 unsigned int start;
63 int error; 63 int error;
64 struct fdtable *fdt; 64 struct fdtable *fdt;
65 65
66 spin_lock(&files->file_lock);
67
66 error = -EINVAL; 68 error = -EINVAL;
67 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 69 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68 goto out; 70 goto out;
@@ -97,42 +99,28 @@ repeat:
97 if (error) 99 if (error)
98 goto repeat; 100 goto repeat;
99 101
100 /*
101 * We reacquired files_lock, so we are safe as long as
102 * we reacquire the fdtable pointer and use it while holding
103 * the lock, no one can free it during that time.
104 */
105 if (start <= files->next_fd) 102 if (start <= files->next_fd)
106 files->next_fd = newfd + 1; 103 files->next_fd = newfd + 1;
107 104
105 FD_SET(newfd, fdt->open_fds);
106 if (cloexec)
107 FD_SET(newfd, fdt->close_on_exec);
108 else
109 FD_CLR(newfd, fdt->close_on_exec);
108 error = newfd; 110 error = newfd;
109 111
110out: 112out:
113 spin_unlock(&files->file_lock);
111 return error; 114 return error;
112} 115}
113 116
114static int dupfd(struct file *file, unsigned int start, int cloexec) 117static int dupfd(struct file *file, unsigned int start, int cloexec)
115{ 118{
116 struct files_struct * files = current->files; 119 int fd = locate_fd(start, cloexec);
117 struct fdtable *fdt; 120 if (fd >= 0)
118 int fd;
119
120 spin_lock(&files->file_lock);
121 fd = locate_fd(files, file, start);
122 if (fd >= 0) {
123 /* locate_fd() may have expanded fdtable, load the ptr */
124 fdt = files_fdtable(files);
125 FD_SET(fd, fdt->open_fds);
126 if (cloexec)
127 FD_SET(fd, fdt->close_on_exec);
128 else
129 FD_CLR(fd, fdt->close_on_exec);
130 spin_unlock(&files->file_lock);
131 fd_install(fd, file); 121 fd_install(fd, file);
132 } else { 122 else
133 spin_unlock(&files->file_lock);
134 fput(file); 123 fput(file);
135 }
136 124
137 return fd; 125 return fd;
138} 126}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 033f7bdd47e8..4df34da2284a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -242,10 +242,9 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
242 return inode; 242 return inode;
243} 243}
244 244
245static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags) 245static void fuse_umount_begin(struct super_block *sb)
246{ 246{
247 if (flags & MNT_FORCE) 247 fuse_abort_conn(get_fuse_conn_super(sb));
248 fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
249} 248}
250 249
251static void fuse_send_destroy(struct fuse_conn *fc) 250static void fuse_send_destroy(struct fuse_conn *fc)
diff --git a/fs/locks.c b/fs/locks.c
index e1ea2fe03681..44d9a6a7ec50 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -236,6 +236,7 @@ void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
236 new->fl_ops = NULL; 236 new->fl_ops = NULL;
237 new->fl_lmops = NULL; 237 new->fl_lmops = NULL;
238} 238}
239EXPORT_SYMBOL(__locks_copy_lock);
239 240
240void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 241void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
241{ 242{
diff --git a/fs/namespace.c b/fs/namespace.c
index 0505fb61aa74..f48f98110c30 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1061,10 +1061,11 @@ static int do_umount(struct vfsmount *mnt, int flags)
1061 * about for the moment. 1061 * about for the moment.
1062 */ 1062 */
1063 1063
1064 lock_kernel(); 1064 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1065 if (sb->s_op->umount_begin) 1065 lock_kernel();
1066 sb->s_op->umount_begin(mnt, flags); 1066 sb->s_op->umount_begin(sb);
1067 unlock_kernel(); 1067 unlock_kernel();
1068 }
1068 1069
1069 /* 1070 /*
1070 * No sense to grab the lock for this test, but test itself looks 1071 * No sense to grab the lock for this test, but test itself looks
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 20a1cb1810fe..fa220dc74609 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -198,7 +198,7 @@ static match_table_t nfs_secflavor_tokens = {
198}; 198};
199 199
200 200
201static void nfs_umount_begin(struct vfsmount *, int); 201static void nfs_umount_begin(struct super_block *);
202static int nfs_statfs(struct dentry *, struct kstatfs *); 202static int nfs_statfs(struct dentry *, struct kstatfs *);
203static int nfs_show_options(struct seq_file *, struct vfsmount *); 203static int nfs_show_options(struct seq_file *, struct vfsmount *);
204static int nfs_show_stats(struct seq_file *, struct vfsmount *); 204static int nfs_show_stats(struct seq_file *, struct vfsmount *);
@@ -647,13 +647,11 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
647 * Begin unmount by attempting to remove all automounted mountpoints we added 647 * Begin unmount by attempting to remove all automounted mountpoints we added
648 * in response to xdev traversals and referrals 648 * in response to xdev traversals and referrals
649 */ 649 */
650static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags) 650static void nfs_umount_begin(struct super_block *sb)
651{ 651{
652 struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb); 652 struct nfs_server *server = NFS_SB(sb);
653 struct rpc_clnt *rpc; 653 struct rpc_clnt *rpc;
654 654
655 if (!(flags & MNT_FORCE))
656 return;
657 /* -EIO all pending I/O */ 655 /* -EIO all pending I/O */
658 rpc = server->client_acl; 656 rpc = server->client_acl;
659 if (!IS_ERR(rpc)) 657 if (!IS_ERR(rpc))
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index ade9a7e6a757..dbdfabbfd609 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -477,11 +477,10 @@ const struct file_operations sysfs_file_operations = {
477 .poll = sysfs_poll, 477 .poll = sysfs_poll,
478}; 478};
479 479
480 480int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
481int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr, 481 const struct attribute *attr, int type, mode_t amode)
482 int type)
483{ 482{
484 umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG; 483 umode_t mode = (amode & S_IALLUGO) | S_IFREG;
485 struct sysfs_addrm_cxt acxt; 484 struct sysfs_addrm_cxt acxt;
486 struct sysfs_dirent *sd; 485 struct sysfs_dirent *sd;
487 int rc; 486 int rc;
@@ -502,6 +501,13 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
502} 501}
503 502
504 503
504int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
505 int type)
506{
507 return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
508}
509
510
505/** 511/**
506 * sysfs_create_file - create an attribute file for an object. 512 * sysfs_create_file - create an attribute file for an object.
507 * @kobj: object we're creating for. 513 * @kobj: object we're creating for.
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 477904915032..eeba38417b1d 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -23,35 +23,50 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
23 int i; 23 int i;
24 24
25 for (i = 0, attr = grp->attrs; *attr; i++, attr++) 25 for (i = 0, attr = grp->attrs; *attr; i++, attr++)
26 if (!grp->is_visible || 26 sysfs_hash_and_remove(dir_sd, (*attr)->name);
27 grp->is_visible(kobj, *attr, i))
28 sysfs_hash_and_remove(dir_sd, (*attr)->name);
29} 27}
30 28
31static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj, 29static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
32 const struct attribute_group *grp) 30 const struct attribute_group *grp, int update)
33{ 31{
34 struct attribute *const* attr; 32 struct attribute *const* attr;
35 int error = 0, i; 33 int error = 0, i;
36 34
37 for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) 35 for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
38 if (!grp->is_visible || 36 mode_t mode = 0;
39 grp->is_visible(kobj, *attr, i)) 37
40 error |= 38 /* in update mode, we're changing the permissions or
41 sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR); 39 * visibility. Do this by first removing then
40 * re-adding (if required) the file */
41 if (update)
42 sysfs_hash_and_remove(dir_sd, (*attr)->name);
43 if (grp->is_visible) {
44 mode = grp->is_visible(kobj, *attr, i);
45 if (!mode)
46 continue;
47 }
48 error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR,
49 (*attr)->mode | mode);
50 if (unlikely(error))
51 break;
52 }
42 if (error) 53 if (error)
43 remove_files(dir_sd, kobj, grp); 54 remove_files(dir_sd, kobj, grp);
44 return error; 55 return error;
45} 56}
46 57
47 58
48int sysfs_create_group(struct kobject * kobj, 59static int internal_create_group(struct kobject *kobj, int update,
49 const struct attribute_group * grp) 60 const struct attribute_group *grp)
50{ 61{
51 struct sysfs_dirent *sd; 62 struct sysfs_dirent *sd;
52 int error; 63 int error;
53 64
54 BUG_ON(!kobj || !kobj->sd); 65 BUG_ON(!kobj || (!update && !kobj->sd));
66
67 /* Updates may happen before the object has been instantiated */
68 if (unlikely(update && !kobj->sd))
69 return -EINVAL;
55 70
56 if (grp->name) { 71 if (grp->name) {
57 error = sysfs_create_subdir(kobj, grp->name, &sd); 72 error = sysfs_create_subdir(kobj, grp->name, &sd);
@@ -60,7 +75,7 @@ int sysfs_create_group(struct kobject * kobj,
60 } else 75 } else
61 sd = kobj->sd; 76 sd = kobj->sd;
62 sysfs_get(sd); 77 sysfs_get(sd);
63 error = create_files(sd, kobj, grp); 78 error = create_files(sd, kobj, grp, update);
64 if (error) { 79 if (error) {
65 if (grp->name) 80 if (grp->name)
66 sysfs_remove_subdir(sd); 81 sysfs_remove_subdir(sd);
@@ -69,6 +84,47 @@ int sysfs_create_group(struct kobject * kobj,
69 return error; 84 return error;
70} 85}
71 86
87/**
88 * sysfs_create_group - given a directory kobject, create an attribute group
89 * @kobj: The kobject to create the group on
90 * @grp: The attribute group to create
91 *
92 * This function creates a group for the first time. It will explicitly
93 * warn and error if any of the attribute files being created already exist.
94 *
95 * Returns 0 on success or error.
96 */
97int sysfs_create_group(struct kobject *kobj,
98 const struct attribute_group *grp)
99{
100 return internal_create_group(kobj, 0, grp);
101}
102
103/**
104 * sysfs_update_group - given a directory kobject, create an attribute group
105 * @kobj: The kobject to create the group on
106 * @grp: The attribute group to create
107 *
108 * This function updates an attribute group. Unlike
109 * sysfs_create_group(), it will explicitly not warn or error if any
110 * of the attribute files being created already exist. Furthermore,
111 * if the visibility of the files has changed through the is_visible()
112 * callback, it will update the permissions and add or remove the
113 * relevant files.
114 *
115 * The primary use for this function is to call it after making a change
116 * that affects group visibility.
117 *
118 * Returns 0 on success or error.
119 */
120int sysfs_update_group(struct kobject *kobj,
121 const struct attribute_group *grp)
122{
123 return internal_create_group(kobj, 1, grp);
124}
125
126
127
72void sysfs_remove_group(struct kobject * kobj, 128void sysfs_remove_group(struct kobject * kobj,
73 const struct attribute_group * grp) 129 const struct attribute_group * grp)
74{ 130{
@@ -95,4 +151,5 @@ void sysfs_remove_group(struct kobject * kobj,
95 151
96 152
97EXPORT_SYMBOL_GPL(sysfs_create_group); 153EXPORT_SYMBOL_GPL(sysfs_create_group);
154EXPORT_SYMBOL_GPL(sysfs_update_group);
98EXPORT_SYMBOL_GPL(sysfs_remove_group); 155EXPORT_SYMBOL_GPL(sysfs_remove_group);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ff17f8da9b43..ce4e15f8aaeb 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -154,6 +154,8 @@ extern const struct file_operations sysfs_file_operations;
154int sysfs_add_file(struct sysfs_dirent *dir_sd, 154int sysfs_add_file(struct sysfs_dirent *dir_sd,
155 const struct attribute *attr, int type); 155 const struct attribute *attr, int type);
156 156
157int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
158 const struct attribute *attr, int type, mode_t amode);
157/* 159/*
158 * bin.c 160 * bin.c
159 */ 161 */
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 9e19a704d484..15f3ae25c511 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
388} 388}
389#endif 389#endif
390 390
391static inline unsigned long __fls(unsigned long x)
392{
393 return fls64(x) - 1;
394}
395
391static inline int fls(int x) 396static inline int fls(int x)
392{ 397{
393 return fls64((unsigned int) x); 398 return fls64((unsigned int) x);
diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h
index 98b10bcf9f1b..b14cbda01dc3 100644
--- a/include/asm-arm/arch-sa1100/ide.h
+++ b/include/asm-arm/arch-sa1100/ide.h
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
37 37
38 memset(hw, 0, sizeof(*hw)); 38 memset(hw, 0, sizeof(*hw));
39 39
40 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 40 for (i = 0; i <= 7; i++) {
41 hw->io_ports[i] = reg; 41 hw->io_ports_array[i] = reg;
42 reg += regincr; 42 reg += regincr;
43 } 43 }
44 44
45 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; 45 hw->io_ports.ctl_addr = ctrl_port;
46 46
47 if (irq) 47 if (irq)
48 *irq = 0; 48 *irq = 0;
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h
index ea34e0d0a388..5366e6239328 100644
--- a/include/asm-cris/arch-v10/ide.h
+++ b/include/asm-cris/arch-v10/ide.h
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u
59 int i; 59 int i;
60 60
61 /* fill in ports for ATA addresses 0 to 7 */ 61 /* fill in ports for ATA addresses 0 to 7 */
62 62 for (i = 0; i <= 7; i++) {
63 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 63 hw->io_ports_array[i] = data_port |
64 hw->io_ports[i] = data_port |
65 IO_FIELD(R_ATA_CTRL_DATA, addr, i) | 64 IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
66 IO_STATE(R_ATA_CTRL_DATA, cs0, active); 65 IO_STATE(R_ATA_CTRL_DATA, cs0, active);
67 } 66 }
68 67
69 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */ 68 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
70 69 hw->io_ports.ctl_addr = data_port |
71 hw->io_ports[IDE_CONTROL_OFFSET] = data_port |
72 IO_FIELD(R_ATA_CTRL_DATA, addr, 6) | 70 IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
73 IO_STATE(R_ATA_CTRL_DATA, cs1, active); 71 IO_STATE(R_ATA_CTRL_DATA, cs1, active);
74 72
75 /* whats this for ? */ 73 /* whats this for ? */
76 74 hw->io_ports.irq_addr = 0;
77 hw->io_ports[IDE_IRQ_OFFSET] = 0;
78} 75}
79 76
80static inline void ide_init_default_hwifs(void) 77static inline void ide_init_default_hwifs(void)
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
new file mode 100644
index 000000000000..be24465403d6
--- /dev/null
+++ b/include/asm-generic/bitops/__fls.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_GENERIC_BITOPS___FLS_H_
2#define _ASM_GENERIC_BITOPS___FLS_H_
3
4#include <asm/types.h>
5
6/**
7 * __fls - find last (most-significant) set bit in a long word
8 * @word: the word to search
9 *
10 * Undefined if no set bit exists, so code should check against 0 first.
11 */
12static inline unsigned long __fls(unsigned long word)
13{
14 int num = BITS_PER_LONG - 1;
15
16#if BITS_PER_LONG == 64
17 if (!(word & (~0ul << 32))) {
18 num -= 32;
19 word <<= 32;
20 }
21#endif
22 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
23 num -= 16;
24 word <<= 16;
25 }
26 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
27 num -= 8;
28 word <<= 8;
29 }
30 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
31 num -= 4;
32 word <<= 4;
33 }
34 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
35 num -= 2;
36 word <<= 2;
37 }
38 if (!(word & (~0ul << (BITS_PER_LONG-1))))
39 num -= 1;
40 return num;
41}
42
43#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 72a51e5a12ef..1914e9742512 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,11 +1,13 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_ 1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_ 2#define _ASM_GENERIC_BITOPS_FIND_H_
3 3
4#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
4extern unsigned long find_next_bit(const unsigned long *addr, unsigned long 5extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
5 size, unsigned long offset); 6 size, unsigned long offset);
6 7
7extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned 8extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
8 long size, unsigned long offset); 9 long size, unsigned long offset);
10#endif
9 11
10#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) 12#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
11#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 13#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
index 1b6b17ce2428..86d403f8b256 100644
--- a/include/asm-generic/bitops/fls64.h
+++ b/include/asm-generic/bitops/fls64.h
@@ -3,6 +3,18 @@
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6/**
7 * fls64 - find last set bit in a 64-bit word
8 * @x: the word to search
9 *
10 * This is defined in a similar way as the libc and compiler builtin
11 * ffsll, but returns the position of the most significant set bit.
12 *
13 * fls64(value) returns 0 if value is 0 or the position of the last
14 * set bit if value is nonzero. The last (most significant) bit is
15 * at position 64.
16 */
17#if BITS_PER_LONG == 32
6static inline int fls64(__u64 x) 18static inline int fls64(__u64 x)
7{ 19{
8 __u32 h = x >> 32; 20 __u32 h = x >> 32;
@@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
10 return fls(h) + 32; 22 return fls(h) + 32;
11 return fls(x); 23 return fls(x);
12} 24}
25#elif BITS_PER_LONG == 64
26static inline int fls64(__u64 x)
27{
28 if (x == 0)
29 return 0;
30 return __fls(x) + 1;
31}
32#else
33#error BITS_PER_LONG not 32 or 64
34#endif
13 35
14#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ 36#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 953d3df9dd22..e2ca80037335 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -407,6 +407,22 @@ fls (int t)
407 return ia64_popcnt(x); 407 return ia64_popcnt(x);
408} 408}
409 409
410/*
411 * Find the last (most significant) bit set. Undefined for x==0.
412 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
413 */
414static inline unsigned long
415__fls (unsigned long x)
416{
417 x |= x >> 1;
418 x |= x >> 2;
419 x |= x >> 4;
420 x |= x >> 8;
421 x |= x >> 16;
422 x |= x >> 32;
423 return ia64_popcnt(x) - 1;
424}
425
410#include <asm-generic/bitops/fls64.h> 426#include <asm-generic/bitops/fls64.h>
411 427
412/* 428/*
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index de2ed2cbdd84..2fe292c275fe 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -21,6 +21,10 @@
21 21
22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) 22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23 23
24#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
25
26#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
27
24extern void ia64_bad_param_for_setreg (void); 28extern void ia64_bad_param_for_setreg (void);
25extern void ia64_bad_param_for_getreg (void); 29extern void ia64_bad_param_for_getreg (void);
26 30
@@ -517,6 +521,14 @@ do { \
517#define ia64_ptrd(addr, size) \ 521#define ia64_ptrd(addr, size) \
518 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") 522 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
519 523
524#define ia64_ttag(addr) \
525({ \
526 __u64 ia64_intri_res; \
527 asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
528 ia64_intri_res; \
529})
530
531
520/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ 532/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
521 533
522#define ia64_lfhint_none 0 534#define ia64_lfhint_none 0
diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h
index 030d29b4b26b..eb2d3559d089 100644
--- a/include/asm-ia64/kvm.h
+++ b/include/asm-ia64/kvm.h
@@ -1,6 +1,205 @@
1#ifndef __LINUX_KVM_IA64_H 1#ifndef __ASM_IA64_KVM_H
2#define __LINUX_KVM_IA64_H 2#define __ASM_IA64_KVM_H
3 3
4/* ia64 does not support KVM */ 4/*
5 * asm-ia64/kvm.h: kvm structure definitions for ia64
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24#include <asm/types.h>
25#include <asm/fpu.h>
26
27#include <linux/ioctl.h>
28
29/* Architectural interrupt line count. */
30#define KVM_NR_INTERRUPTS 256
31
32#define KVM_IOAPIC_NUM_PINS 24
33
34struct kvm_ioapic_state {
35 __u64 base_address;
36 __u32 ioregsel;
37 __u32 id;
38 __u32 irr;
39 __u32 pad;
40 union {
41 __u64 bits;
42 struct {
43 __u8 vector;
44 __u8 delivery_mode:3;
45 __u8 dest_mode:1;
46 __u8 delivery_status:1;
47 __u8 polarity:1;
48 __u8 remote_irr:1;
49 __u8 trig_mode:1;
50 __u8 mask:1;
51 __u8 reserve:7;
52 __u8 reserved[4];
53 __u8 dest_id;
54 } fields;
55 } redirtbl[KVM_IOAPIC_NUM_PINS];
56};
57
58#define KVM_IRQCHIP_PIC_MASTER 0
59#define KVM_IRQCHIP_PIC_SLAVE 1
60#define KVM_IRQCHIP_IOAPIC 2
61
62#define KVM_CONTEXT_SIZE 8*1024
63
64union context {
65 /* 8K size */
66 char dummy[KVM_CONTEXT_SIZE];
67 struct {
68 unsigned long psr;
69 unsigned long pr;
70 unsigned long caller_unat;
71 unsigned long pad;
72 unsigned long gr[32];
73 unsigned long ar[128];
74 unsigned long br[8];
75 unsigned long cr[128];
76 unsigned long rr[8];
77 unsigned long ibr[8];
78 unsigned long dbr[8];
79 unsigned long pkr[8];
80 struct ia64_fpreg fr[128];
81 };
82};
83
84struct thash_data {
85 union {
86 struct {
87 unsigned long p : 1; /* 0 */
88 unsigned long rv1 : 1; /* 1 */
89 unsigned long ma : 3; /* 2-4 */
90 unsigned long a : 1; /* 5 */
91 unsigned long d : 1; /* 6 */
92 unsigned long pl : 2; /* 7-8 */
93 unsigned long ar : 3; /* 9-11 */
94 unsigned long ppn : 38; /* 12-49 */
95 unsigned long rv2 : 2; /* 50-51 */
96 unsigned long ed : 1; /* 52 */
97 unsigned long ig1 : 11; /* 53-63 */
98 };
99 struct {
100 unsigned long __rv1 : 53; /* 0-52 */
101 unsigned long contiguous : 1; /*53 */
102 unsigned long tc : 1; /* 54 TR or TC */
103 unsigned long cl : 1;
104 /* 55 I side or D side cache line */
105 unsigned long len : 4; /* 56-59 */
106 unsigned long io : 1; /* 60 entry is for io or not */
107 unsigned long nomap : 1;
108 /* 61 entry cann't be inserted into machine TLB.*/
109 unsigned long checked : 1;
110 /* 62 for VTLB/VHPT sanity check */
111 unsigned long invalid : 1;
112 /* 63 invalid entry */
113 };
114 unsigned long page_flags;
115 }; /* same for VHPT and TLB */
116
117 union {
118 struct {
119 unsigned long rv3 : 2;
120 unsigned long ps : 6;
121 unsigned long key : 24;
122 unsigned long rv4 : 32;
123 };
124 unsigned long itir;
125 };
126 union {
127 struct {
128 unsigned long ig2 : 12;
129 unsigned long vpn : 49;
130 unsigned long vrn : 3;
131 };
132 unsigned long ifa;
133 unsigned long vadr;
134 struct {
135 unsigned long tag : 63;
136 unsigned long ti : 1;
137 };
138 unsigned long etag;
139 };
140 union {
141 struct thash_data *next;
142 unsigned long rid;
143 unsigned long gpaddr;
144 };
145};
146
147#define NITRS 8
148#define NDTRS 8
149
150struct saved_vpd {
151 unsigned long vhpi;
152 unsigned long vgr[16];
153 unsigned long vbgr[16];
154 unsigned long vnat;
155 unsigned long vbnat;
156 unsigned long vcpuid[5];
157 unsigned long vpsr;
158 unsigned long vpr;
159 unsigned long vcr[128];
160};
161
162struct kvm_regs {
163 char *saved_guest;
164 char *saved_stack;
165 struct saved_vpd vpd;
166 /*Arch-regs*/
167 int mp_state;
168 unsigned long vmm_rr;
169 /* TR and TC. */
170 struct thash_data itrs[NITRS];
171 struct thash_data dtrs[NDTRS];
172 /* Bit is set if there is a tr/tc for the region. */
173 unsigned char itr_regions;
174 unsigned char dtr_regions;
175 unsigned char tc_regions;
176
177 char irq_check;
178 unsigned long saved_itc;
179 unsigned long itc_check;
180 unsigned long timer_check;
181 unsigned long timer_pending;
182 unsigned long last_itc;
183
184 unsigned long vrr[8];
185 unsigned long ibr[8];
186 unsigned long dbr[8];
187 unsigned long insvc[4]; /* Interrupt in service. */
188 unsigned long xtp;
189
190 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
191 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
192 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
193 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
194 unsigned long fp_psr; /*used for lazy float register */
195 unsigned long saved_gp;
196 /*for phycial emulation */
197};
198
199struct kvm_sregs {
200};
201
202struct kvm_fpu {
203};
5 204
6#endif 205#endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644
index 000000000000..c082c208c1f3
--- /dev/null
+++ b/include/asm-ia64/kvm_host.h
@@ -0,0 +1,524 @@
1/*
2 * kvm_host.h: used for kvm module, and hold ia64-specific sections.
3 *
4 * Copyright (C) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#ifndef __ASM_KVM_HOST_H
24#define __ASM_KVM_HOST_H
25
26
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31#include <linux/kvm_types.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35
36#define KVM_MAX_VCPUS 4
37#define KVM_MEMORY_SLOTS 32
38/* memory slots that does not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 4
40
41
42/* define exit reasons from vmm to kvm*/
43#define EXIT_REASON_VM_PANIC 0
44#define EXIT_REASON_MMIO_INSTRUCTION 1
45#define EXIT_REASON_PAL_CALL 2
46#define EXIT_REASON_SAL_CALL 3
47#define EXIT_REASON_SWITCH_RR6 4
48#define EXIT_REASON_VM_DESTROY 5
49#define EXIT_REASON_EXTERNAL_INTERRUPT 6
50#define EXIT_REASON_IPI 7
51#define EXIT_REASON_PTC_G 8
52
53/*Define vmm address space and vm data space.*/
54#define KVM_VMM_SIZE (16UL<<20)
55#define KVM_VMM_SHIFT 24
56#define KVM_VMM_BASE 0xD000000000000000UL
57#define VMM_SIZE (8UL<<20)
58
59/*
60 * Define vm_buffer, used by PAL Services, base address.
61 * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
62 */
63#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
64#define KVM_VM_BUFFER_SIZE (8UL<<20)
65
66/*Define Virtual machine data layout.*/
67#define KVM_VM_DATA_SHIFT 24
68#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
69#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
70
71
72#define KVM_P2M_BASE KVM_VM_DATA_BASE
73#define KVM_P2M_OFS 0
74#define KVM_P2M_SIZE (8UL << 20)
75
76#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
77#define KVM_VHPT_OFS KVM_P2M_SIZE
78#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
79#define VHPT_SHIFT 18
80#define VHPT_SIZE (1UL << VHPT_SHIFT)
81#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
82
83#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
84#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
85#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
86#define VTLB_SHIFT 17
87#define VTLB_SIZE (1UL<<VTLB_SHIFT)
88#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
89
90#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
91#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
92#define KVM_VPD_BLOCK_SIZE (2UL<<20)
93#define VPD_SHIFT 16
94#define VPD_SIZE (1UL<<VPD_SHIFT)
95
96#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
97#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
98#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
99#define VCPU_SHIFT 18
100#define VCPU_SIZE (1UL<<VCPU_SHIFT)
101#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
102
103#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
104#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
105#define KVM_VM_BLOCK_SIZE (1UL<<19)
106
107#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
108#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
109#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
110
111/* Get vpd, vhpt, tlb, vcpu, base*/
112#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
113#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
114#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
115#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
116
117/*IO section definitions*/
118#define IOREQ_READ 1
119#define IOREQ_WRITE 0
120
121#define STATE_IOREQ_NONE 0
122#define STATE_IOREQ_READY 1
123#define STATE_IOREQ_INPROCESS 2
124#define STATE_IORESP_READY 3
125
126/*Guest Physical address layout.*/
127#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
128#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
129#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
130#define GPFN_PIB (3UL << 60) /* PIB base */
131#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
132#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
133#define GPFN_GFW (6UL << 60) /* Guest Firmware */
134#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
135
136#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
137#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
138#define INVALID_MFN (~0UL)
139#define MEM_G (1UL << 30)
140#define MEM_M (1UL << 20)
141#define MMIO_START (3 * MEM_G)
142#define MMIO_SIZE (512 * MEM_M)
143#define VGA_IO_START 0xA0000UL
144#define VGA_IO_SIZE 0x20000
145#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
146#define LEGACY_IO_SIZE (64 * MEM_M)
147#define IO_SAPIC_START 0xfec00000UL
148#define IO_SAPIC_SIZE 0x100000
149#define PIB_START 0xfee00000UL
150#define PIB_SIZE 0x200000
151#define GFW_START (4 * MEM_G - 16 * MEM_M)
152#define GFW_SIZE (16 * MEM_M)
153
154/*Deliver mode, defined for ioapic.c*/
155#define dest_Fixed IOSAPIC_FIXED
156#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
157
158#define NMI_VECTOR 2
159#define ExtINT_VECTOR 0
160#define NULL_VECTOR (-1)
161#define IA64_SPURIOUS_INT_VECTOR 0x0f
162
163#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
164
165/*
166 *Delivery mode
167 */
168#define SAPIC_DELIV_SHIFT 8
169#define SAPIC_FIXED 0x0
170#define SAPIC_LOWEST_PRIORITY 0x1
171#define SAPIC_PMI 0x2
172#define SAPIC_NMI 0x4
173#define SAPIC_INIT 0x5
174#define SAPIC_EXTINT 0x7
175
176/*
177 * vcpu->requests bit members for arch
178 */
179#define KVM_REQ_PTC_G 32
180#define KVM_REQ_RESUME 33
181
182#define KVM_PAGES_PER_HPAGE 1
183
184struct kvm;
185struct kvm_vcpu;
186struct kvm_guest_debug{
187};
188
189struct kvm_mmio_req {
190 uint64_t addr; /* physical address */
191 uint64_t size; /* size in bytes */
192 uint64_t data; /* data (or paddr of data) */
193 uint8_t state:4;
194 uint8_t dir:1; /* 1=read, 0=write */
195};
196
197/*Pal data struct */
198struct kvm_pal_call{
199 /*In area*/
200 uint64_t gr28;
201 uint64_t gr29;
202 uint64_t gr30;
203 uint64_t gr31;
204 /*Out area*/
205 struct ia64_pal_retval ret;
206};
207
208/* Sal data structure */
209struct kvm_sal_call{
210 /*In area*/
211 uint64_t in0;
212 uint64_t in1;
213 uint64_t in2;
214 uint64_t in3;
215 uint64_t in4;
216 uint64_t in5;
217 uint64_t in6;
218 uint64_t in7;
219 struct sal_ret_values ret;
220};
221
222/*Guest change rr6*/
223struct kvm_switch_rr6 {
224 uint64_t old_rr;
225 uint64_t new_rr;
226};
227
228union ia64_ipi_a{
229 unsigned long val;
230 struct {
231 unsigned long rv : 3;
232 unsigned long ir : 1;
233 unsigned long eid : 8;
234 unsigned long id : 8;
235 unsigned long ib_base : 44;
236 };
237};
238
239union ia64_ipi_d {
240 unsigned long val;
241 struct {
242 unsigned long vector : 8;
243 unsigned long dm : 3;
244 unsigned long ig : 53;
245 };
246};
247
248/*ipi check exit data*/
249struct kvm_ipi_data{
250 union ia64_ipi_a addr;
251 union ia64_ipi_d data;
252};
253
254/*global purge data*/
255struct kvm_ptc_g {
256 unsigned long vaddr;
257 unsigned long rr;
258 unsigned long ps;
259 struct kvm_vcpu *vcpu;
260};
261
262/*Exit control data */
263struct exit_ctl_data{
264 uint32_t exit_reason;
265 uint32_t vm_status;
266 union {
267 struct kvm_mmio_req ioreq;
268 struct kvm_pal_call pal_data;
269 struct kvm_sal_call sal_data;
270 struct kvm_switch_rr6 rr_data;
271 struct kvm_ipi_data ipi_data;
272 struct kvm_ptc_g ptc_g_data;
273 } u;
274};
275
276union pte_flags {
277 unsigned long val;
278 struct {
279 unsigned long p : 1; /*0 */
280 unsigned long : 1; /* 1 */
281 unsigned long ma : 3; /* 2-4 */
282 unsigned long a : 1; /* 5 */
283 unsigned long d : 1; /* 6 */
284 unsigned long pl : 2; /* 7-8 */
285 unsigned long ar : 3; /* 9-11 */
286 unsigned long ppn : 38; /* 12-49 */
287 unsigned long : 2; /* 50-51 */
288 unsigned long ed : 1; /* 52 */
289 };
290};
291
292union ia64_pta {
293 unsigned long val;
294 struct {
295 unsigned long ve : 1;
296 unsigned long reserved0 : 1;
297 unsigned long size : 6;
298 unsigned long vf : 1;
299 unsigned long reserved1 : 6;
300 unsigned long base : 49;
301 };
302};
303
304struct thash_cb {
305 /* THASH base information */
306 struct thash_data *hash; /* hash table pointer */
307 union ia64_pta pta;
308 int num;
309};
310
311struct kvm_vcpu_stat {
312};
313
314struct kvm_vcpu_arch {
315 int launched;
316 int last_exit;
317 int last_run_cpu;
318 int vmm_tr_slot;
319 int vm_tr_slot;
320
321#define KVM_MP_STATE_RUNNABLE 0
322#define KVM_MP_STATE_UNINITIALIZED 1
323#define KVM_MP_STATE_INIT_RECEIVED 2
324#define KVM_MP_STATE_HALTED 3
325 int mp_state;
326
327#define MAX_PTC_G_NUM 3
328 int ptc_g_count;
329 struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
330
331 /*halt timer to wake up sleepy vcpus*/
332 struct hrtimer hlt_timer;
333 long ht_active;
334
335 struct kvm_lapic *apic; /* kernel irqchip context */
336 struct vpd *vpd;
337
338 /* Exit data for vmm_transition*/
339 struct exit_ctl_data exit_data;
340
341 cpumask_t cache_coherent_map;
342
343 unsigned long vmm_rr;
344 unsigned long host_rr6;
345 unsigned long psbits[8];
346 unsigned long cr_iipa;
347 unsigned long cr_isr;
348 unsigned long vsa_base;
349 unsigned long dirty_log_lock_pa;
350 unsigned long __gp;
351 /* TR and TC. */
352 struct thash_data itrs[NITRS];
353 struct thash_data dtrs[NDTRS];
354 /* Bit is set if there is a tr/tc for the region. */
355 unsigned char itr_regions;
356 unsigned char dtr_regions;
357 unsigned char tc_regions;
358 /* purge all */
359 unsigned long ptce_base;
360 unsigned long ptce_count[2];
361 unsigned long ptce_stride[2];
362 /* itc/itm */
363 unsigned long last_itc;
364 long itc_offset;
365 unsigned long itc_check;
366 unsigned long timer_check;
367 unsigned long timer_pending;
368
369 unsigned long vrr[8];
370 unsigned long ibr[8];
371 unsigned long dbr[8];
372 unsigned long insvc[4]; /* Interrupt in service. */
373 unsigned long xtp;
374
375 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
376 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
377 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
378 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
379 unsigned long fp_psr; /*used for lazy float register */
380 unsigned long saved_gp;
381 /*for phycial emulation */
382 int mode_flags;
383 struct thash_cb vtlb;
384 struct thash_cb vhpt;
385 char irq_check;
386 char irq_new_pending;
387
388 unsigned long opcode;
389 unsigned long cause;
390 union context host;
391 union context guest;
392};
393
394struct kvm_vm_stat {
395 u64 remote_tlb_flush;
396};
397
398struct kvm_sal_data {
399 unsigned long boot_ip;
400 unsigned long boot_gp;
401};
402
403struct kvm_arch {
404 unsigned long vm_base;
405 unsigned long metaphysical_rr0;
406 unsigned long metaphysical_rr4;
407 unsigned long vmm_init_rr;
408 unsigned long vhpt_base;
409 unsigned long vtlb_base;
410 unsigned long vpd_base;
411 spinlock_t dirty_log_lock;
412 struct kvm_ioapic *vioapic;
413 struct kvm_vm_stat stat;
414 struct kvm_sal_data rdv_sal_data;
415};
416
417union cpuid3_t {
418 u64 value;
419 struct {
420 u64 number : 8;
421 u64 revision : 8;
422 u64 model : 8;
423 u64 family : 8;
424 u64 archrev : 8;
425 u64 rv : 24;
426 };
427};
428
429struct kvm_pt_regs {
430 /* The following registers are saved by SAVE_MIN: */
431 unsigned long b6; /* scratch */
432 unsigned long b7; /* scratch */
433
434 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
435 unsigned long ar_ssd; /* reserved for future use (scratch) */
436
437 unsigned long r8; /* scratch (return value register 0) */
438 unsigned long r9; /* scratch (return value register 1) */
439 unsigned long r10; /* scratch (return value register 2) */
440 unsigned long r11; /* scratch (return value register 3) */
441
442 unsigned long cr_ipsr; /* interrupted task's psr */
443 unsigned long cr_iip; /* interrupted task's instruction pointer */
444 unsigned long cr_ifs; /* interrupted task's function state */
445
446 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
447 unsigned long ar_pfs; /* prev function state */
448 unsigned long ar_rsc; /* RSE configuration */
449 /* The following two are valid only if cr_ipsr.cpl > 0: */
450 unsigned long ar_rnat; /* RSE NaT */
451 unsigned long ar_bspstore; /* RSE bspstore */
452
453 unsigned long pr; /* 64 predicate registers (1 bit each) */
454 unsigned long b0; /* return pointer (bp) */
455 unsigned long loadrs; /* size of dirty partition << 16 */
456
457 unsigned long r1; /* the gp pointer */
458 unsigned long r12; /* interrupted task's memory stack pointer */
459 unsigned long r13; /* thread pointer */
460
461 unsigned long ar_fpsr; /* floating point status (preserved) */
462 unsigned long r15; /* scratch */
463
464 /* The remaining registers are NOT saved for system calls. */
465 unsigned long r14; /* scratch */
466 unsigned long r2; /* scratch */
467 unsigned long r3; /* scratch */
468 unsigned long r16; /* scratch */
469 unsigned long r17; /* scratch */
470 unsigned long r18; /* scratch */
471 unsigned long r19; /* scratch */
472 unsigned long r20; /* scratch */
473 unsigned long r21; /* scratch */
474 unsigned long r22; /* scratch */
475 unsigned long r23; /* scratch */
476 unsigned long r24; /* scratch */
477 unsigned long r25; /* scratch */
478 unsigned long r26; /* scratch */
479 unsigned long r27; /* scratch */
480 unsigned long r28; /* scratch */
481 unsigned long r29; /* scratch */
482 unsigned long r30; /* scratch */
483 unsigned long r31; /* scratch */
484 unsigned long ar_ccv; /* compare/exchange value (scratch) */
485
486 /*
487 * Floating point registers that the kernel considers scratch:
488 */
489 struct ia64_fpreg f6; /* scratch */
490 struct ia64_fpreg f7; /* scratch */
491 struct ia64_fpreg f8; /* scratch */
492 struct ia64_fpreg f9; /* scratch */
493 struct ia64_fpreg f10; /* scratch */
494 struct ia64_fpreg f11; /* scratch */
495
496 unsigned long r4; /* preserved */
497 unsigned long r5; /* preserved */
498 unsigned long r6; /* preserved */
499 unsigned long r7; /* preserved */
500 unsigned long eml_unat; /* used for emulating instruction */
501 unsigned long pad0; /* alignment pad */
502};
503
504static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
505{
506 return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
507}
508
509typedef int kvm_vmm_entry(void);
510typedef void kvm_tramp_entry(union context *host, union context *guest);
511
512struct kvm_vmm_info{
513 struct module *module;
514 kvm_vmm_entry *vmm_entry;
515 kvm_tramp_entry *tramp_entry;
516 unsigned long vmm_ivt;
517};
518
519int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
520int kvm_emulate_halt(struct kvm_vcpu *vcpu);
521int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
522void kvm_sal_emul(struct kvm_vcpu *vcpu);
523
524#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644
index 000000000000..9f9796bb3441
--- /dev/null
+++ b/include/asm-ia64/kvm_para.h
@@ -0,0 +1,29 @@
1#ifndef __IA64_KVM_PARA_H
2#define __IA64_KVM_PARA_H
3
4/*
5 * asm-ia64/kvm_para.h
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24static inline unsigned int kvm_arch_para_features(void)
25{
26 return 0;
27}
28
29#endif
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 741f7ecb986a..6aff126fc07e 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -119,6 +119,69 @@ struct ia64_psr {
119 __u64 reserved4 : 19; 119 __u64 reserved4 : 19;
120}; 120};
121 121
122union ia64_isr {
123 __u64 val;
124 struct {
125 __u64 code : 16;
126 __u64 vector : 8;
127 __u64 reserved1 : 8;
128 __u64 x : 1;
129 __u64 w : 1;
130 __u64 r : 1;
131 __u64 na : 1;
132 __u64 sp : 1;
133 __u64 rs : 1;
134 __u64 ir : 1;
135 __u64 ni : 1;
136 __u64 so : 1;
137 __u64 ei : 2;
138 __u64 ed : 1;
139 __u64 reserved2 : 20;
140 };
141};
142
143union ia64_lid {
144 __u64 val;
145 struct {
146 __u64 rv : 16;
147 __u64 eid : 8;
148 __u64 id : 8;
149 __u64 ig : 32;
150 };
151};
152
153union ia64_tpr {
154 __u64 val;
155 struct {
156 __u64 ig0 : 4;
157 __u64 mic : 4;
158 __u64 rsv : 8;
159 __u64 mmi : 1;
160 __u64 ig1 : 47;
161 };
162};
163
164union ia64_itir {
165 __u64 val;
166 struct {
167 __u64 rv3 : 2; /* 0-1 */
168 __u64 ps : 6; /* 2-7 */
169 __u64 key : 24; /* 8-31 */
170 __u64 rv4 : 32; /* 32-63 */
171 };
172};
173
174union ia64_rr {
175 __u64 val;
176 struct {
177 __u64 ve : 1; /* enable hw walker */
178 __u64 reserved0: 1; /* reserved */
179 __u64 ps : 6; /* log page size */
180 __u64 rid : 24; /* region id */
181 __u64 reserved1: 32; /* reserved */
182 };
183};
184
122/* 185/*
123 * CPU type, hardware bug flags, and per-CPU state. Frequently used 186 * CPU type, hardware bug flags, and per-CPU state. Frequently used
124 * state comes earlier: 187 * state comes earlier:
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index ec75ce4cdb8c..c2bd126c3b4e 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
591 return 63 - lz; 591 return 63 - lz;
592} 592}
593 593
594static inline unsigned long __fls(unsigned long x)
595{
596 return __ilog2(x);
597}
598
594#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 599#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
595 600
596/* 601/*
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h
index 89655c0cdcd6..b493a5e46c63 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_ide.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h
@@ -70,7 +70,6 @@ typedef struct
70 ide_hwif_t *hwif; 70 ide_hwif_t *hwif;
71#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 71#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
72 ide_drive_t *drive; 72 ide_drive_t *drive;
73 u8 white_list, black_list;
74 struct dbdma_cmd *dma_table_cpu; 73 struct dbdma_cmd *dma_table_cpu;
75 dma_addr_t dma_table_dma; 74 dma_addr_t dma_table_dma;
76#endif 75#endif
@@ -81,47 +80,6 @@ typedef struct
81#endif 80#endif
82} _auide_hwif; 81} _auide_hwif;
83 82
84#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
85/* HD white list */
86static const struct drive_list_entry dma_white_list [] = {
87/*
88 * Hitachi
89 */
90 { "HITACHI_DK14FA-20" , NULL },
91 { "HTS726060M9AT00" , NULL },
92/*
93 * Maxtor
94 */
95 { "Maxtor 6E040L0" , NULL },
96 { "Maxtor 6Y080P0" , NULL },
97 { "Maxtor 6Y160P0" , NULL },
98/*
99 * Seagate
100 */
101 { "ST3120026A" , NULL },
102 { "ST320014A" , NULL },
103 { "ST94011A" , NULL },
104 { "ST340016A" , NULL },
105/*
106 * Western Digital
107 */
108 { "WDC WD400UE-00HCT0" , NULL },
109 { "WDC WD400JB-00JJC0" , NULL },
110 { NULL , NULL }
111};
112
113/* HD black list */
114static const struct drive_list_entry dma_black_list [] = {
115/*
116 * Western Digital
117 */
118 { "WDC WD100EB-00CGH0" , NULL },
119 { "WDC WD200BB-00AUA1" , NULL },
120 { "WDC AC24300L" , NULL },
121 { NULL , NULL }
122};
123#endif
124
125/******************************************************************************* 83/*******************************************************************************
126* PIO Mode timing calculation : * 84* PIO Mode timing calculation : *
127* * 85* *
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index f8eebcbad01f..7a6ea10bd231 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -210,6 +210,7 @@ static __inline__ int fls(int x)
210 return ret; 210 return ret;
211} 211}
212 212
213#include <asm-generic/bitops/__fls.h>
213#include <asm-generic/bitops/fls64.h> 214#include <asm-generic/bitops/fls64.h>
214#include <asm-generic/bitops/hweight.h> 215#include <asm-generic/bitops/hweight.h>
215#include <asm-generic/bitops/lock.h> 216#include <asm-generic/bitops/lock.h>
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index a99a74929475..897eade3afbe 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
313 return 32 - lz; 313 return 32 - lz;
314} 314}
315 315
316static __inline__ unsigned long __fls(unsigned long x)
317{
318 return __ilog2(x);
319}
320
316/* 321/*
317 * 64-bit can do this using one cntlzd (count leading zeroes doubleword) 322 * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
318 * instruction; for 32-bit we use the generic version, which does two 323 * instruction; for 32-bit we use the generic version, which does two
diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h
index d1b530fbf8dd..f993e4198d5c 100644
--- a/include/asm-powerpc/kvm.h
+++ b/include/asm-powerpc/kvm.h
@@ -1,6 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
1#ifndef __LINUX_KVM_POWERPC_H 20#ifndef __LINUX_KVM_POWERPC_H
2#define __LINUX_KVM_POWERPC_H 21#define __LINUX_KVM_POWERPC_H
3 22
4/* powerpc does not support KVM */ 23#include <asm/types.h>
24
25struct kvm_regs {
26 __u64 pc;
27 __u64 cr;
28 __u64 ctr;
29 __u64 lr;
30 __u64 xer;
31 __u64 msr;
32 __u64 srr0;
33 __u64 srr1;
34 __u64 pid;
35
36 __u64 sprg0;
37 __u64 sprg1;
38 __u64 sprg2;
39 __u64 sprg3;
40 __u64 sprg4;
41 __u64 sprg5;
42 __u64 sprg6;
43 __u64 sprg7;
44
45 __u64 gpr[32];
46};
47
48struct kvm_sregs {
49};
50
51struct kvm_fpu {
52 __u64 fpr[32];
53};
5 54
6#endif 55#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h
new file mode 100644
index 000000000000..2197764796d9
--- /dev/null
+++ b/include/asm-powerpc/kvm_asm.h
@@ -0,0 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_ASM_H__
21#define __POWERPC_KVM_ASM_H__
22
23/* IVPR must be 64KiB-aligned. */
24#define VCPU_SIZE_ORDER 4
25#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
26#define VCPU_TLB_PGSZ PPC44x_TLB_64K
27#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
28
29#define BOOKE_INTERRUPT_CRITICAL 0
30#define BOOKE_INTERRUPT_MACHINE_CHECK 1
31#define BOOKE_INTERRUPT_DATA_STORAGE 2
32#define BOOKE_INTERRUPT_INST_STORAGE 3
33#define BOOKE_INTERRUPT_EXTERNAL 4
34#define BOOKE_INTERRUPT_ALIGNMENT 5
35#define BOOKE_INTERRUPT_PROGRAM 6
36#define BOOKE_INTERRUPT_FP_UNAVAIL 7
37#define BOOKE_INTERRUPT_SYSCALL 8
38#define BOOKE_INTERRUPT_AP_UNAVAIL 9
39#define BOOKE_INTERRUPT_DECREMENTER 10
40#define BOOKE_INTERRUPT_FIT 11
41#define BOOKE_INTERRUPT_WATCHDOG 12
42#define BOOKE_INTERRUPT_DTLB_MISS 13
43#define BOOKE_INTERRUPT_ITLB_MISS 14
44#define BOOKE_INTERRUPT_DEBUG 15
45#define BOOKE_MAX_INTERRUPT 15
46
47#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_GUEST_NV RESUME_FLAG_NV
52#define RESUME_HOST RESUME_FLAG_HOST
53#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
54
55#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
new file mode 100644
index 000000000000..04ffbb8e0a35
--- /dev/null
+++ b/include/asm-powerpc/kvm_host.h
@@ -0,0 +1,152 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_HOST_H__
21#define __POWERPC_KVM_HOST_H__
22
23#include <linux/mutex.h>
24#include <linux/timer.h>
25#include <linux/types.h>
26#include <linux/kvm_types.h>
27#include <asm/kvm_asm.h>
28
29#define KVM_MAX_VCPUS 1
30#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4
33
34/* We don't currently support large pages. */
35#define KVM_PAGES_PER_HPAGE (1<<31)
36
37struct kvm;
38struct kvm_run;
39struct kvm_vcpu;
40
41struct kvm_vm_stat {
42 u32 remote_tlb_flush;
43};
44
45struct kvm_vcpu_stat {
46 u32 sum_exits;
47 u32 mmio_exits;
48 u32 dcr_exits;
49 u32 signal_exits;
50 u32 light_exits;
51 /* Account for special types of light exits: */
52 u32 itlb_real_miss_exits;
53 u32 itlb_virt_miss_exits;
54 u32 dtlb_real_miss_exits;
55 u32 dtlb_virt_miss_exits;
56 u32 syscall_exits;
57 u32 isi_exits;
58 u32 dsi_exits;
59 u32 emulated_inst_exits;
60 u32 dec_exits;
61 u32 ext_intr_exits;
62};
63
64struct tlbe {
65 u32 tid; /* Only the low 8 bits are used. */
66 u32 word0;
67 u32 word1;
68 u32 word2;
69};
70
71struct kvm_arch {
72};
73
74struct kvm_vcpu_arch {
75 /* Unmodified copy of the guest's TLB. */
76 struct tlbe guest_tlb[PPC44x_TLB_SIZE];
77 /* TLB that's actually used when the guest is running. */
78 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
79 /* Pages which are referenced in the shadow TLB. */
80 struct page *shadow_pages[PPC44x_TLB_SIZE];
81 /* Copy of the host's TLB. */
82 struct tlbe host_tlb[PPC44x_TLB_SIZE];
83
84 u32 host_stack;
85 u32 host_pid;
86
87 u64 fpr[32];
88 u32 gpr[32];
89
90 u32 pc;
91 u32 cr;
92 u32 ctr;
93 u32 lr;
94 u32 xer;
95
96 u32 msr;
97 u32 mmucr;
98 u32 sprg0;
99 u32 sprg1;
100 u32 sprg2;
101 u32 sprg3;
102 u32 sprg4;
103 u32 sprg5;
104 u32 sprg6;
105 u32 sprg7;
106 u32 srr0;
107 u32 srr1;
108 u32 csrr0;
109 u32 csrr1;
110 u32 dsrr0;
111 u32 dsrr1;
112 u32 dear;
113 u32 esr;
114 u32 dec;
115 u32 decar;
116 u32 tbl;
117 u32 tbu;
118 u32 tcr;
119 u32 tsr;
120 u32 ivor[16];
121 u32 ivpr;
122 u32 pir;
123 u32 pid;
124 u32 pvr;
125 u32 ccr0;
126 u32 ccr1;
127 u32 dbcr0;
128 u32 dbcr1;
129
130 u32 last_inst;
131 u32 fault_dear;
132 u32 fault_esr;
133 gpa_t paddr_accessed;
134
135 u8 io_gpr; /* GPR used as IO source/target */
136 u8 mmio_is_bigendian;
137 u8 dcr_needed;
138 u8 dcr_is_write;
139
140 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
141
142 struct timer_list dec_timer;
143 unsigned long pending_exceptions;
144};
145
146struct kvm_guest_debug {
147 int enabled;
148 unsigned long bp[4];
149 int singlestep;
150};
151
152#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h
new file mode 100644
index 000000000000..2d48f6a63d0b
--- /dev/null
+++ b/include/asm-powerpc/kvm_para.h
@@ -0,0 +1,37 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PARA_H__
21#define __POWERPC_KVM_PARA_H__
22
23#ifdef __KERNEL__
24
25static inline int kvm_para_available(void)
26{
27 return 0;
28}
29
30static inline unsigned int kvm_arch_para_features(void)
31{
32 return 0;
33}
34
35#endif /* __KERNEL__ */
36
37#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
new file mode 100644
index 000000000000..7ac820308a7e
--- /dev/null
+++ b/include/asm-powerpc/kvm_ppc.h
@@ -0,0 +1,88 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
31
32struct kvm_tlb {
33 struct tlbe guest_tlb[PPC44x_TLB_SIZE];
34 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
35};
36
37enum emulation_result {
38 EMULATE_DONE, /* no further processing */
39 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
40 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
41 EMULATE_FAIL, /* can't emulate this instruction */
42};
43
44extern const unsigned char exception_priority[];
45extern const unsigned char priority_exception[];
46
47extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
48extern char kvmppc_handlers_start[];
49extern unsigned long kvmppc_handler_len;
50
51extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
52extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
53 unsigned int rt, unsigned int bytes,
54 int is_bigendian);
55extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
56 u32 val, unsigned int bytes, int is_bigendian);
57
58extern int kvmppc_emulate_instruction(struct kvm_run *run,
59 struct kvm_vcpu *vcpu);
60
61extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
62 u64 asid, u32 flags);
63extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid);
64extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
65
66extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
67
68static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
69{
70 unsigned int priority = exception_priority[exception];
71 set_bit(priority, &vcpu->arch.pending_exceptions);
72}
73
74static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
75{
76 unsigned int priority = exception_priority[exception];
77 clear_bit(priority, &vcpu->arch.pending_exceptions);
78}
79
80static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
81{
82 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
83 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
84
85 vcpu->arch.msr = new_msr;
86}
87
88#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
index c8b02d97f753..a825524c981a 100644
--- a/include/asm-powerpc/mmu-44x.h
+++ b/include/asm-powerpc/mmu-44x.h
@@ -53,6 +53,8 @@
53 53
54#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
55 55
56extern unsigned int tlb_44x_hwater;
57
56typedef struct { 58typedef struct {
57 unsigned long id; 59 unsigned long id;
58 unsigned long vdso_base; 60 unsigned long vdso_base;
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index e92b429d2be1..13c9805349f1 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,6 +7,7 @@ header-y += tape390.h
7header-y += ucontext.h 7header-y += ucontext.h
8header-y += vtoc.h 8header-y += vtoc.h
9header-y += zcrypt.h 9header-y += zcrypt.h
10header-y += kvm.h
10 11
11unifdef-y += cmb.h 12unifdef-y += cmb.h
12unifdef-y += debug.h 13unifdef-y += debug.h
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 965394e69452..b4eb24ab5af9 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
769} 769}
770 770
771#include <asm-generic/bitops/fls.h> 771#include <asm-generic/bitops/fls.h>
772#include <asm-generic/bitops/__fls.h>
772#include <asm-generic/bitops/fls64.h> 773#include <asm-generic/bitops/fls64.h>
773 774
774#include <asm-generic/bitops/hweight.h> 775#include <asm-generic/bitops/hweight.h>
diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h
index 573f2a351386..d74002f95794 100644
--- a/include/asm-s390/kvm.h
+++ b/include/asm-s390/kvm.h
@@ -1,6 +1,45 @@
1#ifndef __LINUX_KVM_S390_H 1#ifndef __LINUX_KVM_S390_H
2#define __LINUX_KVM_S390_H 2#define __LINUX_KVM_S390_H
3 3
4/* s390 does not support KVM */ 4/*
5 * asm-s390/kvm.h - KVM s390 specific structures and definitions
6 *
7 * Copyright IBM Corp. 2008
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License (version 2 only)
11 * as published by the Free Software Foundation.
12 *
13 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */
16#include <asm/types.h>
17
18/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
19struct kvm_pic_state {
20 /* no PIC for s390 */
21};
22
23struct kvm_ioapic_state {
24 /* no IOAPIC for s390 */
25};
26
27/* for KVM_GET_REGS and KVM_SET_REGS */
28struct kvm_regs {
29 /* general purpose regs for s390 */
30 __u64 gprs[16];
31};
32
33/* for KVM_GET_SREGS and KVM_SET_SREGS */
34struct kvm_sregs {
35 __u32 acrs[16];
36 __u64 crs[16];
37};
38
39/* for KVM_GET_FPU and KVM_SET_FPU */
40struct kvm_fpu {
41 __u32 fpc;
42 __u64 fprs[16];
43};
5 44
6#endif 45#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644
index 000000000000..f8204a4f2e02
--- /dev/null
+++ b/include/asm-s390/kvm_host.h
@@ -0,0 +1,234 @@
1/*
2 * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13
14#ifndef ASM_KVM_HOST_H
15#define ASM_KVM_HOST_H
16#include <linux/kvm_host.h>
17#include <asm/debug.h>
18
19#define KVM_MAX_VCPUS 64
20#define KVM_MEMORY_SLOTS 32
21/* memory slots that does not exposed to userspace */
22#define KVM_PRIVATE_MEM_SLOTS 4
23
24struct kvm_guest_debug {
25};
26
27struct sca_entry {
28 atomic_t scn;
29 __u64 reserved;
30 __u64 sda;
31 __u64 reserved2[2];
32} __attribute__((packed));
33
34
35struct sca_block {
36 __u64 ipte_control;
37 __u64 reserved[5];
38 __u64 mcn;
39 __u64 reserved2;
40 struct sca_entry cpu[64];
41} __attribute__((packed));
42
43#define KVM_PAGES_PER_HPAGE 256
44
45#define CPUSTAT_HOST 0x80000000
46#define CPUSTAT_WAIT 0x10000000
47#define CPUSTAT_ECALL_PEND 0x08000000
48#define CPUSTAT_STOP_INT 0x04000000
49#define CPUSTAT_IO_INT 0x02000000
50#define CPUSTAT_EXT_INT 0x01000000
51#define CPUSTAT_RUNNING 0x00800000
52#define CPUSTAT_RETAINED 0x00400000
53#define CPUSTAT_TIMING_SUB 0x00020000
54#define CPUSTAT_SIE_SUB 0x00010000
55#define CPUSTAT_RRF 0x00008000
56#define CPUSTAT_SLSV 0x00004000
57#define CPUSTAT_SLSR 0x00002000
58#define CPUSTAT_ZARCH 0x00000800
59#define CPUSTAT_MCDS 0x00000100
60#define CPUSTAT_SM 0x00000080
61#define CPUSTAT_G 0x00000008
62#define CPUSTAT_J 0x00000002
63#define CPUSTAT_P 0x00000001
64
65struct sie_block {
66 atomic_t cpuflags; /* 0x0000 */
67 __u32 prefix; /* 0x0004 */
68 __u8 reserved8[32]; /* 0x0008 */
69 __u64 cputm; /* 0x0028 */
70 __u64 ckc; /* 0x0030 */
71 __u64 epoch; /* 0x0038 */
72 __u8 reserved40[4]; /* 0x0040 */
73#define LCTL_CR0 0x8000
74 __u16 lctl; /* 0x0044 */
75 __s16 icpua; /* 0x0046 */
76 __u32 ictl; /* 0x0048 */
77 __u32 eca; /* 0x004c */
78 __u8 icptcode; /* 0x0050 */
79 __u8 reserved51; /* 0x0051 */
80 __u16 ihcpu; /* 0x0052 */
81 __u8 reserved54[2]; /* 0x0054 */
82 __u16 ipa; /* 0x0056 */
83 __u32 ipb; /* 0x0058 */
84 __u32 scaoh; /* 0x005c */
85 __u8 reserved60; /* 0x0060 */
86 __u8 ecb; /* 0x0061 */
87 __u8 reserved62[2]; /* 0x0062 */
88 __u32 scaol; /* 0x0064 */
89 __u8 reserved68[4]; /* 0x0068 */
90 __u32 todpr; /* 0x006c */
91 __u8 reserved70[16]; /* 0x0070 */
92 __u64 gmsor; /* 0x0080 */
93 __u64 gmslm; /* 0x0088 */
94 psw_t gpsw; /* 0x0090 */
95 __u64 gg14; /* 0x00a0 */
96 __u64 gg15; /* 0x00a8 */
97 __u8 reservedb0[30]; /* 0x00b0 */
98 __u16 iprcc; /* 0x00ce */
99 __u8 reservedd0[48]; /* 0x00d0 */
100 __u64 gcr[16]; /* 0x0100 */
101 __u64 gbea; /* 0x0180 */
102 __u8 reserved188[120]; /* 0x0188 */
103} __attribute__((packed));
104
105struct kvm_vcpu_stat {
106 u32 exit_userspace;
107 u32 exit_external_request;
108 u32 exit_external_interrupt;
109 u32 exit_stop_request;
110 u32 exit_validity;
111 u32 exit_instruction;
112 u32 instruction_lctl;
113 u32 instruction_lctg;
114 u32 exit_program_interruption;
115 u32 exit_instr_and_program;
116 u32 deliver_emergency_signal;
117 u32 deliver_service_signal;
118 u32 deliver_virtio_interrupt;
119 u32 deliver_stop_signal;
120 u32 deliver_prefix_signal;
121 u32 deliver_restart_signal;
122 u32 deliver_program_int;
123 u32 exit_wait_state;
124 u32 instruction_stidp;
125 u32 instruction_spx;
126 u32 instruction_stpx;
127 u32 instruction_stap;
128 u32 instruction_storage_key;
129 u32 instruction_stsch;
130 u32 instruction_chsc;
131 u32 instruction_stsi;
132 u32 instruction_stfl;
133 u32 instruction_sigp_sense;
134 u32 instruction_sigp_emergency;
135 u32 instruction_sigp_stop;
136 u32 instruction_sigp_arch;
137 u32 instruction_sigp_prefix;
138 u32 instruction_sigp_restart;
139 u32 diagnose_44;
140};
141
142struct io_info {
143 __u16 subchannel_id; /* 0x0b8 */
144 __u16 subchannel_nr; /* 0x0ba */
145 __u32 io_int_parm; /* 0x0bc */
146 __u32 io_int_word; /* 0x0c0 */
147};
148
149struct ext_info {
150 __u32 ext_params;
151 __u64 ext_params2;
152};
153
154#define PGM_OPERATION 0x01
155#define PGM_PRIVILEGED_OPERATION 0x02
156#define PGM_EXECUTE 0x03
157#define PGM_PROTECTION 0x04
158#define PGM_ADDRESSING 0x05
159#define PGM_SPECIFICATION 0x06
160#define PGM_DATA 0x07
161
162struct pgm_info {
163 __u16 code;
164};
165
166struct prefix_info {
167 __u32 address;
168};
169
170struct interrupt_info {
171 struct list_head list;
172 u64 type;
173 union {
174 struct io_info io;
175 struct ext_info ext;
176 struct pgm_info pgm;
177 struct prefix_info prefix;
178 };
179};
180
181/* for local_interrupt.action_flags */
182#define ACTION_STORE_ON_STOP 1
183#define ACTION_STOP_ON_STOP 2
184
185struct local_interrupt {
186 spinlock_t lock;
187 struct list_head list;
188 atomic_t active;
189 struct float_interrupt *float_int;
190 int timer_due; /* event indicator for waitqueue below */
191 wait_queue_head_t wq;
192 atomic_t *cpuflags;
193 unsigned int action_bits;
194};
195
196struct float_interrupt {
197 spinlock_t lock;
198 struct list_head list;
199 atomic_t active;
200 int next_rr_cpu;
201 unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
202 struct local_interrupt *local_int[64];
203};
204
205
206struct kvm_vcpu_arch {
207 struct sie_block *sie_block;
208 unsigned long guest_gprs[16];
209 s390_fp_regs host_fpregs;
210 unsigned int host_acrs[NUM_ACRS];
211 s390_fp_regs guest_fpregs;
212 unsigned int guest_acrs[NUM_ACRS];
213 struct local_interrupt local_int;
214 struct timer_list ckc_timer;
215 union {
216 cpuid_t cpu_id;
217 u64 stidp_data;
218 };
219};
220
221struct kvm_vm_stat {
222 u32 remote_tlb_flush;
223};
224
225struct kvm_arch{
226 unsigned long guest_origin;
227 unsigned long guest_memsize;
228 struct sca_block *sca;
229 debug_info_t *dbf;
230 struct float_interrupt float_int;
231};
232
233extern int sie64a(struct sie_block *, __u64 *);
234#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644
index 000000000000..2c503796b619
--- /dev/null
+++ b/include/asm-s390/kvm_para.h
@@ -0,0 +1,150 @@
1/*
2 * asm-s390/kvm_para.h - definition for paravirtual devices on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#ifndef __S390_KVM_PARA_H
14#define __S390_KVM_PARA_H
15
16/*
17 * Hypercalls for KVM on s390. The calling convention is similar to the
18 * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
19 * as hypercall number and R7 as parameter 6. The return value is
20 * written to R2. We use the diagnose instruction as hypercall. To avoid
21 * conflicts with existing diagnoses for LPAR and z/VM, we do not use
22 * the instruction encoded number, but specify the number in R1 and
23 * use 0x500 as KVM hypercall
24 *
25 * Copyright IBM Corp. 2007,2008
26 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
27 *
28 * This work is licensed under the terms of the GNU GPL, version 2.
29 */
30
31static inline long kvm_hypercall0(unsigned long nr)
32{
33 register unsigned long __nr asm("1") = nr;
34 register long __rc asm("2");
35
36 asm volatile ("diag 2,4,0x500\n"
37 : "=d" (__rc) : "d" (__nr): "memory", "cc");
38 return __rc;
39}
40
41static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
42{
43 register unsigned long __nr asm("1") = nr;
44 register unsigned long __p1 asm("2") = p1;
45 register long __rc asm("2");
46
47 asm volatile ("diag 2,4,0x500\n"
48 : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
49 return __rc;
50}
51
52static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
53 unsigned long p2)
54{
55 register unsigned long __nr asm("1") = nr;
56 register unsigned long __p1 asm("2") = p1;
57 register unsigned long __p2 asm("3") = p2;
58 register long __rc asm("2");
59
60 asm volatile ("diag 2,4,0x500\n"
61 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
62 : "memory", "cc");
63 return __rc;
64}
65
66static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
67 unsigned long p2, unsigned long p3)
68{
69 register unsigned long __nr asm("1") = nr;
70 register unsigned long __p1 asm("2") = p1;
71 register unsigned long __p2 asm("3") = p2;
72 register unsigned long __p3 asm("4") = p3;
73 register long __rc asm("2");
74
75 asm volatile ("diag 2,4,0x500\n"
76 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
77 "d" (__p3) : "memory", "cc");
78 return __rc;
79}
80
81
82static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
83 unsigned long p2, unsigned long p3,
84 unsigned long p4)
85{
86 register unsigned long __nr asm("1") = nr;
87 register unsigned long __p1 asm("2") = p1;
88 register unsigned long __p2 asm("3") = p2;
89 register unsigned long __p3 asm("4") = p3;
90 register unsigned long __p4 asm("5") = p4;
91 register long __rc asm("2");
92
93 asm volatile ("diag 2,4,0x500\n"
94 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
95 "d" (__p3), "d" (__p4) : "memory", "cc");
96 return __rc;
97}
98
99static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
100 unsigned long p2, unsigned long p3,
101 unsigned long p4, unsigned long p5)
102{
103 register unsigned long __nr asm("1") = nr;
104 register unsigned long __p1 asm("2") = p1;
105 register unsigned long __p2 asm("3") = p2;
106 register unsigned long __p3 asm("4") = p3;
107 register unsigned long __p4 asm("5") = p4;
108 register unsigned long __p5 asm("6") = p5;
109 register long __rc asm("2");
110
111 asm volatile ("diag 2,4,0x500\n"
112 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
113 "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
114 return __rc;
115}
116
117static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
118 unsigned long p2, unsigned long p3,
119 unsigned long p4, unsigned long p5,
120 unsigned long p6)
121{
122 register unsigned long __nr asm("1") = nr;
123 register unsigned long __p1 asm("2") = p1;
124 register unsigned long __p2 asm("3") = p2;
125 register unsigned long __p3 asm("4") = p3;
126 register unsigned long __p4 asm("5") = p4;
127 register unsigned long __p5 asm("6") = p5;
128 register unsigned long __p6 asm("7") = p6;
129 register long __rc asm("2");
130
131 asm volatile ("diag 2,4,0x500\n"
132 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
133 "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
134 : "memory", "cc");
135 return __rc;
136}
137
138/* kvm on s390 is always paravirtualization enabled */
139static inline int kvm_para_available(void)
140{
141 return 1;
142}
143
144/* No feature bits are currently assigned for kvm on s390 */
145static inline unsigned int kvm_arch_para_features(void)
146{
147 return 0;
148}
149
150#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644
index 000000000000..5c871a990c29
--- /dev/null
+++ b/include/asm-s390/kvm_virtio.h
@@ -0,0 +1,53 @@
1/*
2 * kvm_virtio.h - definition for virtio for kvm on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_VIRTIO_H
14#define __KVM_S390_VIRTIO_H
15
16#include <linux/types.h>
17
18struct kvm_device_desc {
19 /* The device type: console, network, disk etc. Type 0 terminates. */
20 __u8 type;
21 /* The number of virtqueues (first in config array) */
22 __u8 num_vq;
23 /*
24 * The number of bytes of feature bits. Multiply by 2: one for host
25 * features and one for guest acknowledgements.
26 */
27 __u8 feature_len;
28 /* The number of bytes of the config array after virtqueues. */
29 __u8 config_len;
30 /* A status byte, written by the Guest. */
31 __u8 status;
32 __u8 config[0];
33};
34
35/*
36 * This is how we expect the device configuration field for a virtqueue
37 * to be laid out in config space.
38 */
39struct kvm_vqconfig {
40 /* The token returned with an interrupt. Set by the guest */
41 __u64 token;
42 /* The address of the virtio ring */
43 __u64 address;
44 /* The number of entries in the virtio_ring */
45 __u16 num;
46
47};
48
49#define KVM_S390_VIRTIO_NOTIFY 0
50#define KVM_S390_VIRTIO_RESET 1
51#define KVM_S390_VIRTIO_SET_STATUS 2
52
53#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 5de3efb31445..0bc51d52a899 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -381,27 +381,32 @@ struct _lowcore
381 /* whether the kernel died with panic() or not */ 381 /* whether the kernel died with panic() or not */
382 __u32 panic_magic; /* 0xe00 */ 382 __u32 panic_magic; /* 0xe00 */
383 383
384 __u8 pad13[0x1200-0xe04]; /* 0xe04 */ 384 __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
385
386 /* 64 bit extparam used for pfault, diag 250 etc */
387 __u64 ext_params2; /* 0x11B8 */
388
389 __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */
385 390
386 /* System info area */ 391 /* System info area */
387 392
388 __u64 floating_pt_save_area[16]; /* 0x1200 */ 393 __u64 floating_pt_save_area[16]; /* 0x1200 */
389 __u64 gpregs_save_area[16]; /* 0x1280 */ 394 __u64 gpregs_save_area[16]; /* 0x1280 */
390 __u32 st_status_fixed_logout[4]; /* 0x1300 */ 395 __u32 st_status_fixed_logout[4]; /* 0x1300 */
391 __u8 pad14[0x1318-0x1310]; /* 0x1310 */ 396 __u8 pad15[0x1318-0x1310]; /* 0x1310 */
392 __u32 prefixreg_save_area; /* 0x1318 */ 397 __u32 prefixreg_save_area; /* 0x1318 */
393 __u32 fpt_creg_save_area; /* 0x131c */ 398 __u32 fpt_creg_save_area; /* 0x131c */
394 __u8 pad15[0x1324-0x1320]; /* 0x1320 */ 399 __u8 pad16[0x1324-0x1320]; /* 0x1320 */
395 __u32 tod_progreg_save_area; /* 0x1324 */ 400 __u32 tod_progreg_save_area; /* 0x1324 */
396 __u32 cpu_timer_save_area[2]; /* 0x1328 */ 401 __u32 cpu_timer_save_area[2]; /* 0x1328 */
397 __u32 clock_comp_save_area[2]; /* 0x1330 */ 402 __u32 clock_comp_save_area[2]; /* 0x1330 */
398 __u8 pad16[0x1340-0x1338]; /* 0x1338 */ 403 __u8 pad17[0x1340-0x1338]; /* 0x1338 */
399 __u32 access_regs_save_area[16]; /* 0x1340 */ 404 __u32 access_regs_save_area[16]; /* 0x1340 */
400 __u64 cregs_save_area[16]; /* 0x1380 */ 405 __u64 cregs_save_area[16]; /* 0x1380 */
401 406
402 /* align to the top of the prefix area */ 407 /* align to the top of the prefix area */
403 408
404 __u8 pad17[0x2000-0x1400]; /* 0x1400 */ 409 __u8 pad18[0x2000-0x1400]; /* 0x1400 */
405#endif /* !__s390x__ */ 410#endif /* !__s390x__ */
406} __attribute__((packed)); /* End structure*/ 411} __attribute__((packed)); /* End structure*/
407 412
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index 1698e29c5b20..5dd5e7b3476f 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -7,6 +7,7 @@ typedef struct {
7 unsigned long asce_bits; 7 unsigned long asce_bits;
8 unsigned long asce_limit; 8 unsigned long asce_limit;
9 int noexec; 9 int noexec;
10 int pgstes;
10} mm_context_t; 11} mm_context_t;
11 12
12#endif 13#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index b5a34c6f91a9..4c2fbf48c9c4 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
20#ifdef CONFIG_64BIT 20#ifdef CONFIG_64BIT
21 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 21 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
22#endif 22#endif
23 mm->context.noexec = s390_noexec; 23 if (current->mm->context.pgstes) {
24 mm->context.noexec = 0;
25 mm->context.pgstes = 1;
26 } else {
27 mm->context.noexec = s390_noexec;
28 mm->context.pgstes = 0;
29 }
24 mm->context.asce_limit = STACK_TOP_MAX; 30 mm->context.asce_limit = STACK_TOP_MAX;
25 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 31 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
26 return 0; 32 return 0;
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65154dc9a9e5..4c0698c0dda5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -30,6 +30,7 @@
30 */ 30 */
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <linux/mm_types.h> 32#include <linux/mm_types.h>
33#include <asm/bitops.h>
33#include <asm/bug.h> 34#include <asm/bug.h>
34#include <asm/processor.h> 35#include <asm/processor.h>
35 36
@@ -258,6 +259,13 @@ extern char empty_zero_page[PAGE_SIZE];
258 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 259 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
259 */ 260 */
260 261
262/* Page status table bits for virtualization */
263#define RCP_PCL_BIT 55
264#define RCP_HR_BIT 54
265#define RCP_HC_BIT 53
266#define RCP_GR_BIT 50
267#define RCP_GC_BIT 49
268
261#ifndef __s390x__ 269#ifndef __s390x__
262 270
263/* Bits in the segment table address-space-control-element */ 271/* Bits in the segment table address-space-control-element */
@@ -513,6 +521,48 @@ static inline int pte_file(pte_t pte)
513#define __HAVE_ARCH_PTE_SAME 521#define __HAVE_ARCH_PTE_SAME
514#define pte_same(a,b) (pte_val(a) == pte_val(b)) 522#define pte_same(a,b) (pte_val(a) == pte_val(b))
515 523
524static inline void rcp_lock(pte_t *ptep)
525{
526#ifdef CONFIG_PGSTE
527 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
528 preempt_disable();
529 while (test_and_set_bit(RCP_PCL_BIT, pgste))
530 ;
531#endif
532}
533
534static inline void rcp_unlock(pte_t *ptep)
535{
536#ifdef CONFIG_PGSTE
537 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
538 clear_bit(RCP_PCL_BIT, pgste);
539 preempt_enable();
540#endif
541}
542
543/* forward declaration for SetPageUptodate in page-flags.h*/
544static inline void page_clear_dirty(struct page *page);
545#include <linux/page-flags.h>
546
547static inline void ptep_rcp_copy(pte_t *ptep)
548{
549#ifdef CONFIG_PGSTE
550 struct page *page = virt_to_page(pte_val(*ptep));
551 unsigned int skey;
552 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
553
554 skey = page_get_storage_key(page_to_phys(page));
555 if (skey & _PAGE_CHANGED)
556 set_bit_simple(RCP_GC_BIT, pgste);
557 if (skey & _PAGE_REFERENCED)
558 set_bit_simple(RCP_GR_BIT, pgste);
559 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
560 SetPageDirty(page);
561 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
562 SetPageReferenced(page);
563#endif
564}
565
516/* 566/*
517 * query functions pte_write/pte_dirty/pte_young only work if 567 * query functions pte_write/pte_dirty/pte_young only work if
518 * pte_present() is true. Undefined behaviour if not.. 568 * pte_present() is true. Undefined behaviour if not..
@@ -599,6 +649,8 @@ static inline void pmd_clear(pmd_t *pmd)
599 649
600static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 650static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
601{ 651{
652 if (mm->context.pgstes)
653 ptep_rcp_copy(ptep);
602 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 654 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
603 if (mm->context.noexec) 655 if (mm->context.noexec)
604 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; 656 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -667,6 +719,24 @@ static inline pte_t pte_mkyoung(pte_t pte)
667static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 719static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
668 unsigned long addr, pte_t *ptep) 720 unsigned long addr, pte_t *ptep)
669{ 721{
722#ifdef CONFIG_PGSTE
723 unsigned long physpage;
724 int young;
725 unsigned long *pgste;
726
727 if (!vma->vm_mm->context.pgstes)
728 return 0;
729 physpage = pte_val(*ptep) & PAGE_MASK;
730 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
731
732 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
733 rcp_lock(ptep);
734 if (young)
735 set_bit_simple(RCP_GR_BIT, pgste);
736 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
737 rcp_unlock(ptep);
738 return young;
739#endif
670 return 0; 740 return 0;
671} 741}
672 742
@@ -674,7 +744,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
674static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 744static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
675 unsigned long address, pte_t *ptep) 745 unsigned long address, pte_t *ptep)
676{ 746{
677 /* No need to flush TLB; bits are in storage key */ 747 /* No need to flush TLB
748 * On s390 reference bits are in storage key and never in TLB
749 * With virtualization we handle the reference bit, without we
750 * we can simply return */
751#ifdef CONFIG_PGSTE
752 return ptep_test_and_clear_young(vma, address, ptep);
753#endif
678 return 0; 754 return 0;
679} 755}
680 756
@@ -693,15 +769,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
693 : "=m" (*ptep) : "m" (*ptep), 769 : "=m" (*ptep) : "m" (*ptep),
694 "a" (pto), "a" (address)); 770 "a" (pto), "a" (address));
695 } 771 }
696 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
697} 772}
698 773
699static inline void ptep_invalidate(struct mm_struct *mm, 774static inline void ptep_invalidate(struct mm_struct *mm,
700 unsigned long address, pte_t *ptep) 775 unsigned long address, pte_t *ptep)
701{ 776{
777 if (mm->context.pgstes) {
778 rcp_lock(ptep);
779 __ptep_ipte(address, ptep);
780 ptep_rcp_copy(ptep);
781 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
782 rcp_unlock(ptep);
783 return;
784 }
702 __ptep_ipte(address, ptep); 785 __ptep_ipte(address, ptep);
703 if (mm->context.noexec) 786 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
787 if (mm->context.noexec) {
704 __ptep_ipte(address, ptep + PTRS_PER_PTE); 788 __ptep_ipte(address, ptep + PTRS_PER_PTE);
789 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
790 }
705} 791}
706 792
707/* 793/*
@@ -966,6 +1052,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
966 1052
967extern int add_shared_memory(unsigned long start, unsigned long size); 1053extern int add_shared_memory(unsigned long start, unsigned long size);
968extern int remove_shared_memory(unsigned long start, unsigned long size); 1054extern int remove_shared_memory(unsigned long start, unsigned long size);
1055extern int s390_enable_sie(void);
969 1056
970/* 1057/*
971 * No page table caches to initialise 1058 * No page table caches to initialise
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index a76a6b8fd887..aaf4b518b940 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -62,6 +62,7 @@ extern unsigned long machine_flags;
62#define MACHINE_IS_VM (machine_flags & 1) 62#define MACHINE_IS_VM (machine_flags & 1)
63#define MACHINE_IS_P390 (machine_flags & 4) 63#define MACHINE_IS_P390 (machine_flags & 4)
64#define MACHINE_HAS_MVPG (machine_flags & 16) 64#define MACHINE_HAS_MVPG (machine_flags & 16)
65#define MACHINE_IS_KVM (machine_flags & 64)
65#define MACHINE_HAS_IDTE (machine_flags & 128) 66#define MACHINE_HAS_IDTE (machine_flags & 128)
66#define MACHINE_HAS_DIAG9C (machine_flags & 256) 67#define MACHINE_HAS_DIAG9C (machine_flags & 256)
67 68
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index b6ba5a60dec2..d7d382f63ee5 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
95#include <asm-generic/bitops/ext2-atomic.h> 95#include <asm-generic/bitops/ext2-atomic.h>
96#include <asm-generic/bitops/minix.h> 96#include <asm-generic/bitops/minix.h>
97#include <asm-generic/bitops/fls.h> 97#include <asm-generic/bitops/fls.h>
98#include <asm-generic/bitops/__fls.h>
98#include <asm-generic/bitops/fls64.h> 99#include <asm-generic/bitops/fls64.h>
99 100
100#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 982ce8992b91..11f9d8146cdf 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
34#include <asm-generic/bitops/ffz.h> 34#include <asm-generic/bitops/ffz.h>
35#include <asm-generic/bitops/__ffs.h> 35#include <asm-generic/bitops/__ffs.h>
36#include <asm-generic/bitops/fls.h> 36#include <asm-generic/bitops/fls.h>
37#include <asm-generic/bitops/__fls.h>
37#include <asm-generic/bitops/fls64.h> 38#include <asm-generic/bitops/fls64.h>
38 39
39#ifdef __KERNEL__ 40#ifdef __KERNEL__
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 9cbd9a668af8..b4a46b7be794 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,6 +1,8 @@
1#ifndef _MACH_BIOS_EBDA_H 1#ifndef _MACH_BIOS_EBDA_H
2#define _MACH_BIOS_EBDA_H 2#define _MACH_BIOS_EBDA_H
3 3
4#include <asm/io.h>
5
4/* 6/*
5 * there is a real-mode segmented pointer pointing to the 7 * there is a real-mode segmented pointer pointing to the
6 * 4K EBDA area at 0x40E. 8 * 4K EBDA area at 0x40E.
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 1ae7b270a1ef..b81a4d4d3337 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
62 */ 62 */
63static inline void __set_bit(int nr, volatile void *addr) 63static inline void __set_bit(int nr, volatile void *addr)
64{ 64{
65 asm volatile("bts %1,%0" 65 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
66 : ADDR
67 : "Ir" (nr) : "memory");
68} 66}
69 67
70
71/** 68/**
72 * clear_bit - Clears a bit in memory 69 * clear_bit - Clears a bit in memory
73 * @nr: Bit to clear 70 * @nr: Bit to clear
@@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
297static int test_bit(int nr, const volatile unsigned long *addr); 294static int test_bit(int nr, const volatile unsigned long *addr);
298#endif 295#endif
299 296
300#define test_bit(nr,addr) \ 297#define test_bit(nr, addr) \
301 (__builtin_constant_p(nr) ? \ 298 (__builtin_constant_p((nr)) \
302 constant_test_bit((nr),(addr)) : \ 299 ? constant_test_bit((nr), (addr)) \
303 variable_test_bit((nr),(addr))) 300 : variable_test_bit((nr), (addr)))
301
302/**
303 * __ffs - find first set bit in word
304 * @word: The word to search
305 *
306 * Undefined if no bit exists, so code should check against 0 first.
307 */
308static inline unsigned long __ffs(unsigned long word)
309{
310 asm("bsf %1,%0"
311 : "=r" (word)
312 : "rm" (word));
313 return word;
314}
315
316/**
317 * ffz - find first zero bit in word
318 * @word: The word to search
319 *
320 * Undefined if no zero exists, so code should check against ~0UL first.
321 */
322static inline unsigned long ffz(unsigned long word)
323{
324 asm("bsf %1,%0"
325 : "=r" (word)
326 : "r" (~word));
327 return word;
328}
329
330/*
331 * __fls: find last set bit in word
332 * @word: The word to search
333 *
334 * Undefined if no zero exists, so code should check against ~0UL first.
335 */
336static inline unsigned long __fls(unsigned long word)
337{
338 asm("bsr %1,%0"
339 : "=r" (word)
340 : "rm" (word));
341 return word;
342}
343
344#ifdef __KERNEL__
345/**
346 * ffs - find first set bit in word
347 * @x: the word to search
348 *
349 * This is defined the same way as the libc and compiler builtin ffs
350 * routines, therefore differs in spirit from the other bitops.
351 *
352 * ffs(value) returns 0 if value is 0 or the position of the first
353 * set bit if value is nonzero. The first (least significant) bit
354 * is at position 1.
355 */
356static inline int ffs(int x)
357{
358 int r;
359#ifdef CONFIG_X86_CMOV
360 asm("bsfl %1,%0\n\t"
361 "cmovzl %2,%0"
362 : "=r" (r) : "rm" (x), "r" (-1));
363#else
364 asm("bsfl %1,%0\n\t"
365 "jnz 1f\n\t"
366 "movl $-1,%0\n"
367 "1:" : "=r" (r) : "rm" (x));
368#endif
369 return r + 1;
370}
371
372/**
373 * fls - find last set bit in word
374 * @x: the word to search
375 *
376 * This is defined in a similar way as the libc and compiler builtin
377 * ffs, but returns the position of the most significant set bit.
378 *
379 * fls(value) returns 0 if value is 0 or the position of the last
380 * set bit if value is nonzero. The last (most significant) bit is
381 * at position 32.
382 */
383static inline int fls(int x)
384{
385 int r;
386#ifdef CONFIG_X86_CMOV
387 asm("bsrl %1,%0\n\t"
388 "cmovzl %2,%0"
389 : "=&r" (r) : "rm" (x), "rm" (-1));
390#else
391 asm("bsrl %1,%0\n\t"
392 "jnz 1f\n\t"
393 "movl $-1,%0\n"
394 "1:" : "=r" (r) : "rm" (x));
395#endif
396 return r + 1;
397}
398#endif /* __KERNEL__ */
304 399
305#undef BASE_ADDR 400#undef BASE_ADDR
306#undef BIT_ADDR 401#undef BIT_ADDR
307#undef ADDR 402#undef ADDR
308 403
309#ifdef CONFIG_X86_32 404static inline void set_bit_string(unsigned long *bitmap,
310# include "bitops_32.h" 405 unsigned long i, int len)
311#else 406{
312# include "bitops_64.h" 407 unsigned long end = i + len;
313#endif 408 while (i < end) {
409 __set_bit(i, bitmap);
410 i++;
411 }
412}
413
414#ifdef __KERNEL__
415
416#include <asm-generic/bitops/sched.h>
417
418#define ARCH_HAS_FAST_MULTIPLIER 1
419
420#include <asm-generic/bitops/hweight.h>
421
422#endif /* __KERNEL__ */
423
424#include <asm-generic/bitops/fls64.h>
425
426#ifdef __KERNEL__
427
428#include <asm-generic/bitops/ext2-non-atomic.h>
429
430#define ext2_set_bit_atomic(lock, nr, addr) \
431 test_and_set_bit((nr), (unsigned long *)(addr))
432#define ext2_clear_bit_atomic(lock, nr, addr) \
433 test_and_clear_bit((nr), (unsigned long *)(addr))
434
435#include <asm-generic/bitops/minix.h>
314 436
437#endif /* __KERNEL__ */
315#endif /* _ASM_X86_BITOPS_H */ 438#endif /* _ASM_X86_BITOPS_H */
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
deleted file mode 100644
index 2513a81f82aa..000000000000
--- a/include/asm-x86/bitops_32.h
+++ /dev/null
@@ -1,166 +0,0 @@
1#ifndef _I386_BITOPS_H
2#define _I386_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8/**
9 * find_first_zero_bit - find the first zero bit in a memory region
10 * @addr: The address to start the search at
11 * @size: The maximum size to search
12 *
13 * Returns the bit number of the first zero bit, not the number of the byte
14 * containing a bit.
15 */
16static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
17{
18 int d0, d1, d2;
19 int res;
20
21 if (!size)
22 return 0;
23 /* This looks at memory.
24 * Mark it volatile to tell gcc not to move it around
25 */
26 asm volatile("movl $-1,%%eax\n\t"
27 "xorl %%edx,%%edx\n\t"
28 "repe; scasl\n\t"
29 "je 1f\n\t"
30 "xorl -4(%%edi),%%eax\n\t"
31 "subl $4,%%edi\n\t"
32 "bsfl %%eax,%%edx\n"
33 "1:\tsubl %%ebx,%%edi\n\t"
34 "shll $3,%%edi\n\t"
35 "addl %%edi,%%edx"
36 : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
37 : "1" ((size + 31) >> 5), "2" (addr),
38 "b" (addr) : "memory");
39 return res;
40}
41
42/**
43 * find_next_zero_bit - find the first zero bit in a memory region
44 * @addr: The address to base the search on
45 * @offset: The bit number to start searching at
46 * @size: The maximum size to search
47 */
48int find_next_zero_bit(const unsigned long *addr, int size, int offset);
49
50/**
51 * __ffs - find first bit in word.
52 * @word: The word to search
53 *
54 * Undefined if no bit exists, so code should check against 0 first.
55 */
56static inline unsigned long __ffs(unsigned long word)
57{
58 __asm__("bsfl %1,%0"
59 :"=r" (word)
60 :"rm" (word));
61 return word;
62}
63
64/**
65 * find_first_bit - find the first set bit in a memory region
66 * @addr: The address to start the search at
67 * @size: The maximum size to search
68 *
69 * Returns the bit number of the first set bit, not the number of the byte
70 * containing a bit.
71 */
72static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
73{
74 unsigned x = 0;
75
76 while (x < size) {
77 unsigned long val = *addr++;
78 if (val)
79 return __ffs(val) + x;
80 x += sizeof(*addr) << 3;
81 }
82 return x;
83}
84
85/**
86 * find_next_bit - find the first set bit in a memory region
87 * @addr: The address to base the search on
88 * @offset: The bit number to start searching at
89 * @size: The maximum size to search
90 */
91int find_next_bit(const unsigned long *addr, int size, int offset);
92
93/**
94 * ffz - find first zero in word.
95 * @word: The word to search
96 *
97 * Undefined if no zero exists, so code should check against ~0UL first.
98 */
99static inline unsigned long ffz(unsigned long word)
100{
101 __asm__("bsfl %1,%0"
102 :"=r" (word)
103 :"r" (~word));
104 return word;
105}
106
107#ifdef __KERNEL__
108
109#include <asm-generic/bitops/sched.h>
110
111/**
112 * ffs - find first bit set
113 * @x: the word to search
114 *
115 * This is defined the same way as
116 * the libc and compiler builtin ffs routines, therefore
117 * differs in spirit from the above ffz() (man ffs).
118 */
119static inline int ffs(int x)
120{
121 int r;
122
123 __asm__("bsfl %1,%0\n\t"
124 "jnz 1f\n\t"
125 "movl $-1,%0\n"
126 "1:" : "=r" (r) : "rm" (x));
127 return r+1;
128}
129
130/**
131 * fls - find last bit set
132 * @x: the word to search
133 *
134 * This is defined the same way as ffs().
135 */
136static inline int fls(int x)
137{
138 int r;
139
140 __asm__("bsrl %1,%0\n\t"
141 "jnz 1f\n\t"
142 "movl $-1,%0\n"
143 "1:" : "=r" (r) : "rm" (x));
144 return r+1;
145}
146
147#include <asm-generic/bitops/hweight.h>
148
149#endif /* __KERNEL__ */
150
151#include <asm-generic/bitops/fls64.h>
152
153#ifdef __KERNEL__
154
155#include <asm-generic/bitops/ext2-non-atomic.h>
156
157#define ext2_set_bit_atomic(lock, nr, addr) \
158 test_and_set_bit((nr), (unsigned long *)(addr))
159#define ext2_clear_bit_atomic(lock, nr, addr) \
160 test_and_clear_bit((nr), (unsigned long *)(addr))
161
162#include <asm-generic/bitops/minix.h>
163
164#endif /* __KERNEL__ */
165
166#endif /* _I386_BITOPS_H */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
deleted file mode 100644
index 365f8207ea59..000000000000
--- a/include/asm-x86/bitops_64.h
+++ /dev/null
@@ -1,162 +0,0 @@
1#ifndef _X86_64_BITOPS_H
2#define _X86_64_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
9extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
10extern long find_first_bit(const unsigned long *addr, unsigned long size);
11extern long find_next_bit(const unsigned long *addr, long size, long offset);
12
13/* return index of first bet set in val or max when no bit is set */
14static inline long __scanbit(unsigned long val, unsigned long max)
15{
16 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
17 return val;
18}
19
20#define find_next_bit(addr,size,off) \
21((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
22 ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
23 find_next_bit(addr,size,off)))
24
25#define find_next_zero_bit(addr,size,off) \
26((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
27 ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
28 find_next_zero_bit(addr,size,off)))
29
30#define find_first_bit(addr, size) \
31 ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
32 ? (__scanbit(*(unsigned long *)(addr), (size))) \
33 : find_first_bit((addr), (size))))
34
35#define find_first_zero_bit(addr, size) \
36 ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
37 ? (__scanbit(~*(unsigned long *)(addr), (size))) \
38 : find_first_zero_bit((addr), (size))))
39
40static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
41 int len)
42{
43 unsigned long end = i + len;
44 while (i < end) {
45 __set_bit(i, bitmap);
46 i++;
47 }
48}
49
50/**
51 * ffz - find first zero in word.
52 * @word: The word to search
53 *
54 * Undefined if no zero exists, so code should check against ~0UL first.
55 */
56static inline unsigned long ffz(unsigned long word)
57{
58 __asm__("bsfq %1,%0"
59 :"=r" (word)
60 :"r" (~word));
61 return word;
62}
63
64/**
65 * __ffs - find first bit in word.
66 * @word: The word to search
67 *
68 * Undefined if no bit exists, so code should check against 0 first.
69 */
70static inline unsigned long __ffs(unsigned long word)
71{
72 __asm__("bsfq %1,%0"
73 :"=r" (word)
74 :"rm" (word));
75 return word;
76}
77
78/*
79 * __fls: find last bit set.
80 * @word: The word to search
81 *
82 * Undefined if no zero exists, so code should check against ~0UL first.
83 */
84static inline unsigned long __fls(unsigned long word)
85{
86 __asm__("bsrq %1,%0"
87 :"=r" (word)
88 :"rm" (word));
89 return word;
90}
91
92#ifdef __KERNEL__
93
94#include <asm-generic/bitops/sched.h>
95
96/**
97 * ffs - find first bit set
98 * @x: the word to search
99 *
100 * This is defined the same way as
101 * the libc and compiler builtin ffs routines, therefore
102 * differs in spirit from the above ffz (man ffs).
103 */
104static inline int ffs(int x)
105{
106 int r;
107
108 __asm__("bsfl %1,%0\n\t"
109 "cmovzl %2,%0"
110 : "=r" (r) : "rm" (x), "r" (-1));
111 return r+1;
112}
113
114/**
115 * fls64 - find last bit set in 64 bit word
116 * @x: the word to search
117 *
118 * This is defined the same way as fls.
119 */
120static inline int fls64(__u64 x)
121{
122 if (x == 0)
123 return 0;
124 return __fls(x) + 1;
125}
126
127/**
128 * fls - find last bit set
129 * @x: the word to search
130 *
131 * This is defined the same way as ffs.
132 */
133static inline int fls(int x)
134{
135 int r;
136
137 __asm__("bsrl %1,%0\n\t"
138 "cmovzl %2,%0"
139 : "=&r" (r) : "rm" (x), "rm" (-1));
140 return r+1;
141}
142
143#define ARCH_HAS_FAST_MULTIPLIER 1
144
145#include <asm-generic/bitops/hweight.h>
146
147#endif /* __KERNEL__ */
148
149#ifdef __KERNEL__
150
151#include <asm-generic/bitops/ext2-non-atomic.h>
152
153#define ext2_set_bit_atomic(lock, nr, addr) \
154 test_and_set_bit((nr), (unsigned long *)(addr))
155#define ext2_clear_bit_atomic(lock, nr, addr) \
156 test_and_clear_bit((nr), (unsigned long *)(addr))
157
158#include <asm-generic/bitops/minix.h>
159
160#endif /* __KERNEL__ */
161
162#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index 51151356840f..e8659909e5f6 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -9,6 +9,17 @@
9#include <asm/ist.h> 9#include <asm/ist.h>
10#include <video/edid.h> 10#include <video/edid.h>
11 11
12/* setup data types */
13#define SETUP_NONE 0
14
15/* extensible setup data list node */
16struct setup_data {
17 u64 next;
18 u32 type;
19 u32 len;
20 u8 data[0];
21};
22
12struct setup_header { 23struct setup_header {
13 __u8 setup_sects; 24 __u8 setup_sects;
14 __u16 root_flags; 25 __u16 root_flags;
@@ -46,6 +57,9 @@ struct setup_header {
46 __u32 cmdline_size; 57 __u32 cmdline_size;
47 __u32 hardware_subarch; 58 __u32 hardware_subarch;
48 __u64 hardware_subarch_data; 59 __u64 hardware_subarch_data;
60 __u32 payload_offset;
61 __u32 payload_length;
62 __u64 setup_data;
49} __attribute__((packed)); 63} __attribute__((packed));
50 64
51struct sys_desc_table { 65struct sys_desc_table {
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index f478c57eb060..71c4d685d30d 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -48,7 +48,8 @@ extern struct e820map e820;
48extern void update_e820(void); 48extern void update_e820(void);
49 49
50extern void reserve_early(unsigned long start, unsigned long end, char *name); 50extern void reserve_early(unsigned long start, unsigned long end, char *name);
51extern void early_res_to_bootmem(void); 51extern void free_early(unsigned long start, unsigned long end);
52extern void early_res_to_bootmem(unsigned long start, unsigned long end);
52 53
53#endif/*!__ASSEMBLY__*/ 54#endif/*!__ASSEMBLY__*/
54 55
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 0c9e17c73e05..d593e14f0341 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_IO_APIC_H 1#ifndef __ASM_IO_APIC_H
2#define __ASM_IO_APIC_H 2#define __ASM_IO_APIC_H
3 3
4#include <asm/types.h> 4#include <linux/types.h>
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
6#include <asm/apicdef.h> 6#include <asm/apicdef.h>
7 7
@@ -110,11 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
110 * MP-BIOS irq configuration table structures: 110 * MP-BIOS irq configuration table structures:
111 */ 111 */
112 112
113#define MP_MAX_IOAPIC_PIN 127
114
113struct mp_ioapic_routing { 115struct mp_ioapic_routing {
114 int apic_id; 116 int apic_id;
115 int gsi_base; 117 int gsi_base;
116 int gsi_end; 118 int gsi_end;
117 u32 pin_programmed[4]; 119 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
118}; 120};
119 121
120/* I/O APIC entries */ 122/* I/O APIC entries */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120426a3..80eefef2cc76 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
188 struct kvm_cpuid_entry2 entries[0]; 188 struct kvm_cpuid_entry2 entries[0];
189}; 189};
190 190
191/* for KVM_GET_PIT and KVM_SET_PIT */
192struct kvm_pit_channel_state {
193 __u32 count; /* can be 65536 */
194 __u16 latched_count;
195 __u8 count_latched;
196 __u8 status_latched;
197 __u8 status;
198 __u8 read_state;
199 __u8 write_state;
200 __u8 write_latch;
201 __u8 rw_mode;
202 __u8 mode;
203 __u8 bcd;
204 __u8 gate;
205 __s64 count_load_time;
206};
207
208struct kvm_pit_state {
209 struct kvm_pit_channel_state channels[3];
210};
211
212#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
213#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
214#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
215#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
216#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
217#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
218#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
219#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
220#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
221#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
222#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
223#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
224#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
225#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
226#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
227#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231
191#endif 232#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390b2844..9d963cd6533c 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
20 20
21#include <asm/desc.h> 21#include <asm/desc.h>
22 22
23#define KVM_MAX_VCPUS 16
24#define KVM_MEMORY_SLOTS 32
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 4
27
28#define KVM_PIO_PAGE_OFFSET 1
29
23#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 30#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 31#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 32#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
39#define INVALID_PAGE (~(hpa_t)0) 46#define INVALID_PAGE (~(hpa_t)0)
40#define UNMAPPED_GVA (~(gpa_t)0) 47#define UNMAPPED_GVA (~(gpa_t)0)
41 48
49/* shadow tables are PAE even on non-PAE hosts */
50#define KVM_HPAGE_SHIFT 21
51#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
52#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
53
54#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
55
42#define DE_VECTOR 0 56#define DE_VECTOR 0
43#define UD_VECTOR 6 57#define UD_VECTOR 6
44#define NM_VECTOR 7 58#define NM_VECTOR 7
@@ -48,6 +62,7 @@
48#define SS_VECTOR 12 62#define SS_VECTOR 12
49#define GP_VECTOR 13 63#define GP_VECTOR 13
50#define PF_VECTOR 14 64#define PF_VECTOR 14
65#define MC_VECTOR 18
51 66
52#define SELECTOR_TI_MASK (1 << 2) 67#define SELECTOR_TI_MASK (1 << 2)
53#define SELECTOR_RPL_MASK 0x03 68#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
58 73
59#define KVM_PERMILLE_MMU_PAGES 20 74#define KVM_PERMILLE_MMU_PAGES 20
60#define KVM_MIN_ALLOC_MMU_PAGES 64 75#define KVM_MIN_ALLOC_MMU_PAGES 64
61#define KVM_NUM_MMU_PAGES 1024 76#define KVM_MMU_HASH_SHIFT 10
77#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
62#define KVM_MIN_FREE_MMU_PAGES 5 78#define KVM_MIN_FREE_MMU_PAGES 5
63#define KVM_REFILL_PAGES 25 79#define KVM_REFILL_PAGES 25
64#define KVM_MAX_CPUID_ENTRIES 40 80#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
106 122
107#define KVM_NR_MEM_OBJS 40 123#define KVM_NR_MEM_OBJS 40
108 124
125struct kvm_guest_debug {
126 int enabled;
127 unsigned long bp[4];
128 int singlestep;
129};
130
109/* 131/*
110 * We don't want allocation failures within the mmu code, so we preallocate 132 * We don't want allocation failures within the mmu code, so we preallocate
111 * enough memory for a single page fault in a cache. 133 * enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
140 unsigned pad_for_nice_hex_output:6; 162 unsigned pad_for_nice_hex_output:6;
141 unsigned metaphysical:1; 163 unsigned metaphysical:1;
142 unsigned access:3; 164 unsigned access:3;
165 unsigned invalid:1;
143 }; 166 };
144}; 167};
145 168
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
204 u64 shadow_efer; 227 u64 shadow_efer;
205 u64 apic_base; 228 u64 apic_base;
206 struct kvm_lapic *apic; /* kernel irqchip context */ 229 struct kvm_lapic *apic; /* kernel irqchip context */
207#define VCPU_MP_STATE_RUNNABLE 0
208#define VCPU_MP_STATE_UNINITIALIZED 1
209#define VCPU_MP_STATE_INIT_RECEIVED 2
210#define VCPU_MP_STATE_SIPI_RECEIVED 3
211#define VCPU_MP_STATE_HALTED 4
212 int mp_state; 230 int mp_state;
213 int sipi_vector; 231 int sipi_vector;
214 u64 ia32_misc_enable_msr; 232 u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
226 u64 *last_pte_updated; 244 u64 *last_pte_updated;
227 245
228 struct { 246 struct {
229 gfn_t gfn; /* presumed gfn during guest pte update */ 247 gfn_t gfn; /* presumed gfn during guest pte update */
230 struct page *page; /* page corresponding to that gfn */ 248 pfn_t pfn; /* pfn corresponding to that gfn */
249 int largepage;
231 } update_pte; 250 } update_pte;
232 251
233 struct i387_fxsave_struct host_fx_image; 252 struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
261 /* emulate context */ 280 /* emulate context */
262 281
263 struct x86_emulate_ctxt emulate_ctxt; 282 struct x86_emulate_ctxt emulate_ctxt;
283
284 gpa_t time;
285 struct kvm_vcpu_time_info hv_clock;
286 unsigned int time_offset;
287 struct page *time_page;
264}; 288};
265 289
266struct kvm_mem_alias { 290struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
283 struct list_head active_mmu_pages; 307 struct list_head active_mmu_pages;
284 struct kvm_pic *vpic; 308 struct kvm_pic *vpic;
285 struct kvm_ioapic *vioapic; 309 struct kvm_ioapic *vioapic;
310 struct kvm_pit *vpit;
286 311
287 int round_robin_prev_vcpu; 312 int round_robin_prev_vcpu;
288 unsigned int tss_addr; 313 unsigned int tss_addr;
289 struct page *apic_access_page; 314 struct page *apic_access_page;
315
316 gpa_t wall_clock;
290}; 317};
291 318
292struct kvm_vm_stat { 319struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
298 u32 mmu_recycled; 325 u32 mmu_recycled;
299 u32 mmu_cache_miss; 326 u32 mmu_cache_miss;
300 u32 remote_tlb_flush; 327 u32 remote_tlb_flush;
328 u32 lpages;
301}; 329};
302 330
303struct kvm_vcpu_stat { 331struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
320 u32 fpu_reload; 348 u32 fpu_reload;
321 u32 insn_emulation; 349 u32 insn_emulation;
322 u32 insn_emulation_fail; 350 u32 insn_emulation_fail;
351 u32 hypercalls;
323}; 352};
324 353
325struct descriptor_table { 354struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
355 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 384 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
356 void (*get_segment)(struct kvm_vcpu *vcpu, 385 void (*get_segment)(struct kvm_vcpu *vcpu,
357 struct kvm_segment *var, int seg); 386 struct kvm_segment *var, int seg);
387 int (*get_cpl)(struct kvm_vcpu *vcpu);
358 void (*set_segment)(struct kvm_vcpu *vcpu, 388 void (*set_segment)(struct kvm_vcpu *vcpu,
359 struct kvm_segment *var, int seg); 389 struct kvm_segment *var, int seg);
360 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 390 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
410unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 440unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
411void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 441void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
412 442
443int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
444
445int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
446 const void *val, int bytes);
447int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
448 gpa_t addr, unsigned long *ret);
449
450extern bool tdp_enabled;
451
413enum emulation_result { 452enum emulation_result {
414 EMULATE_DONE, /* no further processing */ 453 EMULATE_DONE, /* no further processing */
415 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 454 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
429unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 468unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
430void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 469void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
431 unsigned long *rflags); 470 unsigned long *rflags);
471void kvm_enable_efer_bits(u64);
432int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 472int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
433int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 473int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
434 474
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
448int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 488int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
449 unsigned long value); 489 unsigned long value);
450 490
451void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 491int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
452void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 492
453void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 493void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
454void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 494void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
455unsigned long get_cr8(struct kvm_vcpu *vcpu); 495void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
456void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 496void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
497unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
498void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
457void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 499void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
458 500
459int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 501int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
491 533
492int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 534int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
493 535
536void kvm_enable_tdp(void);
537
494int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 538int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
495int complete_pio(struct kvm_vcpu *vcpu); 539int complete_pio(struct kvm_vcpu *vcpu);
496 540
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
600#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" 644#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
601#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" 645#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
602#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" 646#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
647#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
603 648
604#define MSR_IA32_TIME_STAMP_COUNTER 0x010 649#define MSR_IA32_TIME_STAMP_COUNTER 0x010
605 650
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
610#define RMODE_TSS_SIZE \ 655#define RMODE_TSS_SIZE \
611 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 656 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
612 657
658enum {
659 TASK_SWITCH_CALL = 0,
660 TASK_SWITCH_IRET = 1,
661 TASK_SWITCH_JMP = 2,
662 TASK_SWITCH_GATE = 3,
663};
664
665#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
666 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
667 vcpu, 5, d1, d2, d3, d4, d5)
668#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
669 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
670 vcpu, 4, d1, d2, d3, d4, 0)
671#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
672 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
673 vcpu, 3, d1, d2, d3, 0, 0)
674#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
675 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
676 vcpu, 2, d1, d2, 0, 0, 0)
677#define KVMTRACE_1D(evt, vcpu, d1, name) \
678 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
679 vcpu, 1, d1, 0, 0, 0, 0)
680#define KVMTRACE_0D(evt, vcpu, name) \
681 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
682 vcpu, 0, 0, 0, 0, 0, 0)
683
613#endif 684#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8d8c53..509845942070 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
10 * paravirtualization, the appropriate feature bit should be checked. 10 * paravirtualization, the appropriate feature bit should be checked.
11 */ 11 */
12#define KVM_CPUID_FEATURES 0x40000001 12#define KVM_CPUID_FEATURES 0x40000001
13#define KVM_FEATURE_CLOCKSOURCE 0
14#define KVM_FEATURE_NOP_IO_DELAY 1
15#define KVM_FEATURE_MMU_OP 2
16
17#define MSR_KVM_WALL_CLOCK 0x11
18#define MSR_KVM_SYSTEM_TIME 0x12
19
20#define KVM_MAX_MMU_OP_BATCH 32
21
22/* Operations for KVM_HC_MMU_OP */
23#define KVM_MMU_OP_WRITE_PTE 1
24#define KVM_MMU_OP_FLUSH_TLB 2
25#define KVM_MMU_OP_RELEASE_PT 3
26
27/* Payload for KVM_HC_MMU_OP */
28struct kvm_mmu_op_header {
29 __u32 op;
30 __u32 pad;
31};
32
33struct kvm_mmu_op_write_pte {
34 struct kvm_mmu_op_header header;
35 __u64 pte_phys;
36 __u64 pte_val;
37};
38
39struct kvm_mmu_op_flush_tlb {
40 struct kvm_mmu_op_header header;
41};
42
43struct kvm_mmu_op_release_pt {
44 struct kvm_mmu_op_header header;
45 __u64 pt_phys;
46};
13 47
14#ifdef __KERNEL__ 48#ifdef __KERNEL__
15#include <asm/processor.h> 49#include <asm/processor.h>
16 50
51/* xen binary-compatible interface. See xen headers for details */
52struct kvm_vcpu_time_info {
53 uint32_t version;
54 uint32_t pad0;
55 uint64_t tsc_timestamp;
56 uint64_t system_time;
57 uint32_t tsc_to_system_mul;
58 int8_t tsc_shift;
59 int8_t pad[3];
60} __attribute__((__packed__)); /* 32 bytes */
61
62struct kvm_wall_clock {
63 uint32_t wc_version;
64 uint32_t wc_sec;
65 uint32_t wc_nsec;
66} __attribute__((__packed__));
67
68
69extern void kvmclock_init(void);
70
71
17/* This instruction is vmcall. On non-VT architectures, it will generate a 72/* This instruction is vmcall. On non-VT architectures, it will generate a
18 * trap that we will then rewrite to the appropriate instruction. 73 * trap that we will then rewrite to the appropriate instruction.
19 */ 74 */
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 3ff2c5bff93a..56d0e1fa0258 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -33,7 +33,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
33 *((volatile long *) phys_to_virt(0x467)) = 0; 33 *((volatile long *) phys_to_virt(0x467)) = 0;
34} 34}
35 35
36static inline void smpboot_setup_io_apic(void) 36static inline void __init smpboot_setup_io_apic(void)
37{ 37{
38 /* 38 /*
39 * Here we can be sure that there is an IO-APIC in the system. Let's 39 * Here we can be sure that there is an IO-APIC in the system. Let's
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 168b6447cf18..577ab79c4c27 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -198,16 +198,16 @@ do { \
198 */ 198 */
199#define update_mmu_cache(vma, address, pte) do { } while (0) 199#define update_mmu_cache(vma, address, pte) do { } while (0)
200 200
201void native_pagetable_setup_start(pgd_t *base); 201extern void native_pagetable_setup_start(pgd_t *base);
202void native_pagetable_setup_done(pgd_t *base); 202extern void native_pagetable_setup_done(pgd_t *base);
203 203
204#ifndef CONFIG_PARAVIRT 204#ifndef CONFIG_PARAVIRT
205static inline void paravirt_pagetable_setup_start(pgd_t *base) 205static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
206{ 206{
207 native_pagetable_setup_start(base); 207 native_pagetable_setup_start(base);
208} 208}
209 209
210static inline void paravirt_pagetable_setup_done(pgd_t *base) 210static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
211{ 211{
212 native_pagetable_setup_done(base); 212 native_pagetable_setup_done(base);
213} 213}
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
index fe312a5ba204..bb7133dc155d 100644
--- a/include/asm-x86/posix_types.h
+++ b/include/asm-x86/posix_types.h
@@ -1,5 +1,11 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2# if defined(CONFIG_X86_32) || defined(__i386__) 2# ifdef CONFIG_X86_32
3# include "posix_types_32.h"
4# else
5# include "posix_types_64.h"
6# endif
7#else
8# ifdef __i386__
3# include "posix_types_32.h" 9# include "posix_types_32.h"
4# else 10# else
5# include "posix_types_64.h" 11# include "posix_types_64.h"
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index e6bf92ddeb21..2e7974ec77ec 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -118,7 +118,6 @@ struct cpuinfo_x86 {
118#define X86_VENDOR_CYRIX 1 118#define X86_VENDOR_CYRIX 1
119#define X86_VENDOR_AMD 2 119#define X86_VENDOR_AMD 2
120#define X86_VENDOR_UMC 3 120#define X86_VENDOR_UMC 3
121#define X86_VENDOR_NEXGEN 4
122#define X86_VENDOR_CENTAUR 5 121#define X86_VENDOR_CENTAUR 5
123#define X86_VENDOR_TRANSMETA 7 122#define X86_VENDOR_TRANSMETA 7
124#define X86_VENDOR_NSC 8 123#define X86_VENDOR_NSC 8
@@ -723,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
723 722
724static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 723static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
725{ 724{
725 trace_hardirqs_on();
726 /* "mwait %eax, %ecx;" */ 726 /* "mwait %eax, %ecx;" */
727 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 727 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
728 :: "a" (eax), "c" (ecx)); 728 :: "a" (eax), "c" (ecx));
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 24ec061566c5..9f922b0b95d6 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -231,6 +231,8 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
231extern int do_set_thread_area(struct task_struct *p, int idx, 231extern int do_set_thread_area(struct task_struct *p, int idx,
232 struct user_desc __user *info, int can_allocate); 232 struct user_desc __user *info, int can_allocate);
233 233
234#define __ARCH_WANT_COMPAT_SYS_PTRACE
235
234#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
235 237
236#endif /* !__ASSEMBLY__ */ 238#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b4f84b..e63741f19392 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length); 17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void);
18 20
19#endif /* _ASM_REBOOT_H */ 21#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 3451c576e6af..c9448bd8968f 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -60,15 +60,4 @@ enum {
60 ALT_CALGARY = 5, /* Second Planar Calgary */ 60 ALT_CALGARY = 5, /* Second Planar Calgary */
61}; 61};
62 62
63/*
64 * there is a real-mode segmented pointer pointing to the
65 * 4K EBDA area at 0x40E.
66 */
67static inline unsigned long get_bios_ebda(void)
68{
69 unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
70 address <<= 4;
71 return address;
72}
73
74#endif /* __ASM_RIO_H */ 63#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h
index effc7ad8e12f..2a58ed3e51d8 100644
--- a/include/asm-x86/unistd.h
+++ b/include/asm-x86/unistd.h
@@ -1,5 +1,11 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2# if defined(CONFIG_X86_32) || defined(__i386__) 2# ifdef CONFIG_X86_32
3# include "unistd_32.h"
4# else
5# include "unistd_64.h"
6# endif
7#else
8# ifdef __i386__
3# include "unistd_32.h" 9# include "unistd_32.h"
4# else 10# else
5# include "unistd_64.h" 11# include "unistd_64.h"
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index cbb5ccb27de3..bda6f04791d4 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -210,7 +210,6 @@ unifdef-y += hayesesp.h
210unifdef-y += hdlcdrv.h 210unifdef-y += hdlcdrv.h
211unifdef-y += hdlc.h 211unifdef-y += hdlc.h
212unifdef-y += hdreg.h 212unifdef-y += hdreg.h
213unifdef-y += hdsmart.h
214unifdef-y += hid.h 213unifdef-y += hid.h
215unifdef-y += hiddev.h 214unifdef-y += hiddev.h
216unifdef-y += hidraw.h 215unifdef-y += hidraw.h
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 40d54731de7e..48bde600a2db 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -112,4 +112,144 @@ static inline unsigned fls_long(unsigned long l)
112 return fls64(l); 112 return fls64(l);
113} 113}
114 114
115#ifdef __KERNEL__
116#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
117extern unsigned long __find_first_bit(const unsigned long *addr,
118 unsigned long size);
119
120/**
121 * find_first_bit - find the first set bit in a memory region
122 * @addr: The address to start the search at
123 * @size: The maximum size to search
124 *
125 * Returns the bit number of the first set bit.
126 */
127static __always_inline unsigned long
128find_first_bit(const unsigned long *addr, unsigned long size)
129{
130 /* Avoid a function call if the bitmap size is a constant */
131 /* and not bigger than BITS_PER_LONG. */
132
133 /* insert a sentinel so that __ffs returns size if there */
134 /* are no set bits in the bitmap */
135 if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
136 return __ffs((*addr) | (1ul << size));
137
138 /* the result of __ffs(0) is undefined, so it needs to be */
139 /* handled separately */
140 if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
141 return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
142
143 /* size is not constant or too big */
144 return __find_first_bit(addr, size);
145}
146
147extern unsigned long __find_first_zero_bit(const unsigned long *addr,
148 unsigned long size);
149
150/**
151 * find_first_zero_bit - find the first cleared bit in a memory region
152 * @addr: The address to start the search at
153 * @size: The maximum size to search
154 *
155 * Returns the bit number of the first cleared bit.
156 */
157static __always_inline unsigned long
158find_first_zero_bit(const unsigned long *addr, unsigned long size)
159{
160 /* Avoid a function call if the bitmap size is a constant */
161 /* and not bigger than BITS_PER_LONG. */
162
163 /* insert a sentinel so that __ffs returns size if there */
164 /* are no set bits in the bitmap */
165 if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
166 return __ffs(~(*addr) | (1ul << size));
167 }
168
169 /* the result of __ffs(0) is undefined, so it needs to be */
170 /* handled separately */
171 if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
172 return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
173
174 /* size is not constant or too big */
175 return __find_first_zero_bit(addr, size);
176}
177#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
178
179#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
180extern unsigned long __find_next_bit(const unsigned long *addr,
181 unsigned long size, unsigned long offset);
182
183/**
184 * find_next_bit - find the next set bit in a memory region
185 * @addr: The address to base the search on
186 * @offset: The bitnumber to start searching at
187 * @size: The bitmap size in bits
188 */
189static __always_inline unsigned long
190find_next_bit(const unsigned long *addr, unsigned long size,
191 unsigned long offset)
192{
193 unsigned long value;
194
195 /* Avoid a function call if the bitmap size is a constant */
196 /* and not bigger than BITS_PER_LONG. */
197
198 /* insert a sentinel so that __ffs returns size if there */
199 /* are no set bits in the bitmap */
200 if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
201 value = (*addr) & ((~0ul) << offset);
202 value |= (1ul << size);
203 return __ffs(value);
204 }
205
206 /* the result of __ffs(0) is undefined, so it needs to be */
207 /* handled separately */
208 if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
209 value = (*addr) & ((~0ul) << offset);
210 return (value == 0) ? BITS_PER_LONG : __ffs(value);
211 }
212
213 /* size is not constant or too big */
214 return __find_next_bit(addr, size, offset);
215}
216
217extern unsigned long __find_next_zero_bit(const unsigned long *addr,
218 unsigned long size, unsigned long offset);
219
220/**
221 * find_next_zero_bit - find the next cleared bit in a memory region
222 * @addr: The address to base the search on
223 * @offset: The bitnumber to start searching at
224 * @size: The bitmap size in bits
225 */
226static __always_inline unsigned long
227find_next_zero_bit(const unsigned long *addr, unsigned long size,
228 unsigned long offset)
229{
230 unsigned long value;
231
232 /* Avoid a function call if the bitmap size is a constant */
233 /* and not bigger than BITS_PER_LONG. */
234
235 /* insert a sentinel so that __ffs returns size if there */
236 /* are no set bits in the bitmap */
237 if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
238 value = (~(*addr)) & ((~0ul) << offset);
239 value |= (1ul << size);
240 return __ffs(value);
241 }
242
243 /* the result of __ffs(0) is undefined, so it needs to be */
244 /* handled separately */
245 if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
246 value = (~(*addr)) & ((~0ul) << offset);
247 return (value == 0) ? BITS_PER_LONG : __ffs(value);
248 }
249
250 /* size is not constant or too big */
251 return __find_next_zero_bit(addr, size, offset);
252}
253#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
254#endif /* __KERNEL__ */
115#endif 255#endif
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index e8406c55c6d3..cf0303a60611 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -56,19 +56,25 @@ struct sg_io_v4 {
56#if defined(CONFIG_BLK_DEV_BSG) 56#if defined(CONFIG_BLK_DEV_BSG)
57struct bsg_class_device { 57struct bsg_class_device {
58 struct device *class_dev; 58 struct device *class_dev;
59 struct device *dev; 59 struct device *parent;
60 int minor; 60 int minor;
61 struct request_queue *queue; 61 struct request_queue *queue;
62 struct kref ref;
63 void (*release)(struct device *);
62}; 64};
63 65
64extern int bsg_register_queue(struct request_queue *, struct device *, const char *); 66extern int bsg_register_queue(struct request_queue *q,
67 struct device *parent, const char *name,
68 void (*release)(struct device *));
65extern void bsg_unregister_queue(struct request_queue *); 69extern void bsg_unregister_queue(struct request_queue *);
66#else 70#else
67static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name) 71static inline int bsg_register_queue(struct request_queue *q,
72 struct device *parent, const char *name,
73 void (*release)(struct device *))
68{ 74{
69 return 0; 75 return 0;
70} 76}
71static inline void bsg_unregister_queue(struct request_queue *rq) 77static inline void bsg_unregister_queue(struct request_queue *q)
72{ 78{
73} 79}
74#endif 80#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index fe23792f05c1..b2fd7547b58d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -28,9 +28,16 @@
28#define __must_be_array(a) \ 28#define __must_be_array(a) \
29 BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) 29 BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0])))
30 30
31#define inline inline __attribute__((always_inline)) 31/*
32#define __inline__ __inline__ __attribute__((always_inline)) 32 * Force always-inline if the user requests it so via the .config:
33#define __inline __inline __attribute__((always_inline)) 33 */
34#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
35 !defined(CONFIG_OPTIMIZE_INLINING) && (__GNUC__ >= 4)
36# define inline inline __attribute__((always_inline))
37# define __inline__ __inline__ __attribute__((always_inline))
38# define __inline __inline __attribute__((always_inline))
39#endif
40
34#define __deprecated __attribute__((deprecated)) 41#define __deprecated __attribute__((deprecated))
35#define __packed __attribute__((packed)) 42#define __packed __attribute__((packed))
36#define __weak __attribute__((weak)) 43#define __weak __attribute__((weak))
diff --git a/include/linux/file.h b/include/linux/file.h
index 653477021e4c..69baf5a4f0a5 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -117,7 +117,8 @@ struct task_struct;
117 117
118struct files_struct *get_files_struct(struct task_struct *); 118struct files_struct *get_files_struct(struct task_struct *);
119void put_files_struct(struct files_struct *fs); 119void put_files_struct(struct files_struct *fs);
120void reset_files_struct(struct task_struct *, struct files_struct *); 120void reset_files_struct(struct files_struct *);
121int unshare_files(struct files_struct **);
121 122
122extern struct kmem_cache *files_cachep; 123extern struct kmem_cache *files_cachep;
123 124
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6556f2f967e5..d6d7c52055c6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1309,7 +1309,7 @@ struct super_operations {
1309 int (*statfs) (struct dentry *, struct kstatfs *); 1309 int (*statfs) (struct dentry *, struct kstatfs *);
1310 int (*remount_fs) (struct super_block *, int *, char *); 1310 int (*remount_fs) (struct super_block *, int *, char *);
1311 void (*clear_inode) (struct inode *); 1311 void (*clear_inode) (struct inode *);
1312 void (*umount_begin) (struct vfsmount *, int); 1312 void (*umount_begin) (struct super_block *);
1313 1313
1314 int (*show_options)(struct seq_file *, struct vfsmount *); 1314 int (*show_options)(struct seq_file *, struct vfsmount *);
1315 int (*show_stats)(struct seq_file *, struct vfsmount *); 1315 int (*show_stats)(struct seq_file *, struct vfsmount *);
@@ -2034,9 +2034,6 @@ static inline ino_t parent_ino(struct dentry *dentry)
2034 return res; 2034 return res;
2035} 2035}
2036 2036
2037/* kernel/fork.c */
2038extern int unshare_files(void);
2039
2040/* Transaction based IO helpers */ 2037/* Transaction based IO helpers */
2041 2038
2042/* 2039/*
diff --git a/include/linux/hdsmart.h b/include/linux/hdsmart.h
deleted file mode 100644
index 4f4faf9d4238..000000000000
--- a/include/linux/hdsmart.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * linux/include/linux/hdsmart.h
3 *
4 * Copyright (C) 1999-2000 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17#ifndef _LINUX_HDSMART_H
18#define _LINUX_HDSMART_H
19
20#ifndef __KERNEL__
21#define OFFLINE_FULL_SCAN 0
22#define SHORT_SELF_TEST 1
23#define EXTEND_SELF_TEST 2
24#define SHORT_CAPTIVE_SELF_TEST 129
25#define EXTEND_CAPTIVE_SELF_TEST 130
26
27/* smart_attribute is the vendor specific in SFF-8035 spec */
28typedef struct ata_smart_attribute_s {
29 unsigned char id;
30 unsigned short status_flag;
31 unsigned char normalized;
32 unsigned char worse_normal;
33 unsigned char raw[6];
34 unsigned char reserv;
35} __attribute__ ((packed)) ata_smart_attribute_t;
36
37/* smart_values is format of the read drive Atrribute command */
38typedef struct ata_smart_values_s {
39 unsigned short revnumber;
40 ata_smart_attribute_t vendor_attributes [30];
41 unsigned char offline_data_collection_status;
42 unsigned char self_test_exec_status;
43 unsigned short total_time_to_complete_off_line;
44 unsigned char vendor_specific_366;
45 unsigned char offline_data_collection_capability;
46 unsigned short smart_capability;
47 unsigned char errorlog_capability;
48 unsigned char vendor_specific_371;
49 unsigned char short_test_completion_time;
50 unsigned char extend_test_completion_time;
51 unsigned char reserved_374_385 [12];
52 unsigned char vendor_specific_386_509 [125];
53 unsigned char chksum;
54} __attribute__ ((packed)) ata_smart_values_t;
55
56/* Smart Threshold data structures */
57/* Vendor attribute of SMART Threshold */
58typedef struct ata_smart_threshold_entry_s {
59 unsigned char id;
60 unsigned char normalized_threshold;
61 unsigned char reserved[10];
62} __attribute__ ((packed)) ata_smart_threshold_entry_t;
63
64/* Format of Read SMART THreshold Command */
65typedef struct ata_smart_thresholds_s {
66 unsigned short revnumber;
67 ata_smart_threshold_entry_t thres_entries[30];
68 unsigned char reserved[149];
69 unsigned char chksum;
70} __attribute__ ((packed)) ata_smart_thresholds_t;
71
72typedef struct ata_smart_errorlog_command_struct_s {
73 unsigned char devicecontrolreg;
74 unsigned char featuresreg;
75 unsigned char sector_count;
76 unsigned char sector_number;
77 unsigned char cylinder_low;
78 unsigned char cylinder_high;
79 unsigned char drive_head;
80 unsigned char commandreg;
81 unsigned int timestamp;
82} __attribute__ ((packed)) ata_smart_errorlog_command_struct_t;
83
84typedef struct ata_smart_errorlog_error_struct_s {
85 unsigned char error_condition;
86 unsigned char extended_error[14];
87 unsigned char state;
88 unsigned short timestamp;
89} __attribute__ ((packed)) ata_smart_errorlog_error_struct_t;
90
91typedef struct ata_smart_errorlog_struct_s {
92 ata_smart_errorlog_command_struct_t commands[6];
93 ata_smart_errorlog_error_struct_t error_struct;
94} __attribute__ ((packed)) ata_smart_errorlog_struct_t;
95
96typedef struct ata_smart_errorlog_s {
97 unsigned char revnumber;
98 unsigned char error_log_pointer;
99 ata_smart_errorlog_struct_t errorlog_struct[5];
100 unsigned short ata_error_count;
101 unsigned short non_fatal_count;
102 unsigned short drive_timeout_count;
103 unsigned char reserved[53];
104 unsigned char chksum;
105} __attribute__ ((packed)) ata_smart_errorlog_t;
106
107typedef struct ata_smart_selftestlog_struct_s {
108 unsigned char selftestnumber;
109 unsigned char selfteststatus;
110 unsigned short timestamp;
111 unsigned char selftestfailurecheckpoint;
112 unsigned int lbafirstfailure;
113 unsigned char vendorspecific[15];
114} __attribute__ ((packed)) ata_smart_selftestlog_struct_t;
115
116typedef struct ata_smart_selftestlog_s {
117 unsigned short revnumber;
118 ata_smart_selftestlog_struct_t selftest_struct[21];
119 unsigned char vendorspecific[2];
120 unsigned char mostrecenttest;
121 unsigned char resevered[2];
122 unsigned char chksum;
123} __attribute__ ((packed)) ata_smart_selftestlog_t;
124#endif /* __KERNEL__ */
125
126#endif /* _LINUX_HDSMART_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 5f3e82ae901a..32fd77bb4436 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */
48#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ 48#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
49 49
50/* 50/*
51 * Tune flags
52 */
53#define IDE_TUNE_NOAUTO 2
54#define IDE_TUNE_AUTO 1
55#define IDE_TUNE_DEFAULT 0
56
57/*
58 * state flags 51 * state flags
59 */ 52 */
60 53
@@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */
68 */ 61 */
69#define IDE_NR_PORTS (10) 62#define IDE_NR_PORTS (10)
70 63
71#define IDE_DATA_OFFSET (0) 64struct ide_io_ports {
72#define IDE_ERROR_OFFSET (1) 65 unsigned long data_addr;
73#define IDE_NSECTOR_OFFSET (2) 66
74#define IDE_SECTOR_OFFSET (3) 67 union {
75#define IDE_LCYL_OFFSET (4) 68 unsigned long error_addr; /* read: error */
76#define IDE_HCYL_OFFSET (5) 69 unsigned long feature_addr; /* write: feature */
77#define IDE_SELECT_OFFSET (6) 70 };
78#define IDE_STATUS_OFFSET (7) 71
79#define IDE_CONTROL_OFFSET (8) 72 unsigned long nsect_addr;
80#define IDE_IRQ_OFFSET (9) 73 unsigned long lbal_addr;
81 74 unsigned long lbam_addr;
82#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET 75 unsigned long lbah_addr;
83#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET 76
84#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET 77 unsigned long device_addr;
85#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET 78
86#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET 79 union {
87#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET 80 unsigned long status_addr; /*  read: status  */
81 unsigned long command_addr; /* write: command */
82 };
83
84 unsigned long ctl_addr;
85
86 unsigned long irq_addr;
87};
88 88
89#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) 89#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
90#define BAD_R_STAT (BUSY_STAT | ERR_STAT) 90#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
@@ -163,14 +163,17 @@ typedef u8 hwif_chipset_t;
163 * Structure to hold all information about the location of this port 163 * Structure to hold all information about the location of this port
164 */ 164 */
165typedef struct hw_regs_s { 165typedef struct hw_regs_s {
166 unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */ 166 union {
167 struct ide_io_ports io_ports;
168 unsigned long io_ports_array[IDE_NR_PORTS];
169 };
170
167 int irq; /* our irq number */ 171 int irq; /* our irq number */
168 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ 172 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
169 hwif_chipset_t chipset; 173 hwif_chipset_t chipset;
170 struct device *dev; 174 struct device *dev;
171} hw_regs_t; 175} hw_regs_t;
172 176
173struct hwif_s * ide_find_port(unsigned long);
174void ide_init_port_data(struct hwif_s *, unsigned int); 177void ide_init_port_data(struct hwif_s *, unsigned int);
175void ide_init_port_hw(struct hwif_s *, hw_regs_t *); 178void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
176 179
@@ -180,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
180{ 183{
181 unsigned int i; 184 unsigned int i;
182 185
183 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 186 for (i = 0; i <= 7; i++)
184 hw->io_ports[i] = io_addr++; 187 hw->io_ports_array[i] = io_addr++;
185 188
186 hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr; 189 hw->io_ports.ctl_addr = ctl_addr;
187} 190}
188 191
189#include <asm/ide.h> 192#include <asm/ide.h>
@@ -329,7 +332,6 @@ typedef struct ide_drive_s {
329 unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ 332 unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */
330 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ 333 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
331 unsigned nodma : 1; /* disallow DMA */ 334 unsigned nodma : 1; /* disallow DMA */
332 unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */
333 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ 335 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
334 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ 336 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
335 unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ 337 unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
@@ -388,6 +390,43 @@ typedef struct ide_drive_s {
388 390
389struct ide_port_info; 391struct ide_port_info;
390 392
393struct ide_port_ops {
394 /* host specific initialization of devices on a port */
395 void (*port_init_devs)(struct hwif_s *);
396 /* routine to program host for PIO mode */
397 void (*set_pio_mode)(ide_drive_t *, const u8);
398 /* routine to program host for DMA mode */
399 void (*set_dma_mode)(ide_drive_t *, const u8);
400 /* tweaks hardware to select drive */
401 void (*selectproc)(ide_drive_t *);
402 /* chipset polling based on hba specifics */
403 int (*reset_poll)(ide_drive_t *);
404 /* chipset specific changes to default for device-hba resets */
405 void (*pre_reset)(ide_drive_t *);
406 /* routine to reset controller after a disk reset */
407 void (*resetproc)(ide_drive_t *);
408 /* special host masking for drive selection */
409 void (*maskproc)(ide_drive_t *, int);
410 /* check host's drive quirk list */
411 void (*quirkproc)(ide_drive_t *);
412
413 u8 (*mdma_filter)(ide_drive_t *);
414 u8 (*udma_filter)(ide_drive_t *);
415
416 u8 (*cable_detect)(struct hwif_s *);
417};
418
419struct ide_dma_ops {
420 void (*dma_host_set)(struct ide_drive_s *, int);
421 int (*dma_setup)(struct ide_drive_s *);
422 void (*dma_exec_cmd)(struct ide_drive_s *, u8);
423 void (*dma_start)(struct ide_drive_s *);
424 int (*dma_end)(struct ide_drive_s *);
425 int (*dma_test_irq)(struct ide_drive_s *);
426 void (*dma_lost_irq)(struct ide_drive_s *);
427 void (*dma_timeout)(struct ide_drive_s *);
428};
429
391typedef struct hwif_s { 430typedef struct hwif_s {
392 struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ 431 struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
393 struct hwif_s *mate; /* other hwif from same PCI chip */ 432 struct hwif_s *mate; /* other hwif from same PCI chip */
@@ -396,8 +435,8 @@ typedef struct hwif_s {
396 435
397 char name[6]; /* name of interface, eg. "ide0" */ 436 char name[6]; /* name of interface, eg. "ide0" */
398 437
399 /* task file registers for pata and sata */ 438 struct ide_io_ports io_ports;
400 unsigned long io_ports[IDE_NR_PORTS]; 439
401 unsigned long sata_scr[SATA_NR_PORTS]; 440 unsigned long sata_scr[SATA_NR_PORTS];
402 441
403 ide_drive_t drives[MAX_DRIVES]; /* drive info */ 442 ide_drive_t drives[MAX_DRIVES]; /* drive info */
@@ -421,38 +460,12 @@ typedef struct hwif_s {
421 460
422 struct device *dev; 461 struct device *dev;
423 462
424 const struct ide_port_info *cds; /* chipset device struct */
425
426 ide_ack_intr_t *ack_intr; 463 ide_ack_intr_t *ack_intr;
427 464
428 void (*rw_disk)(ide_drive_t *, struct request *); 465 void (*rw_disk)(ide_drive_t *, struct request *);
429 466
430#if 0 467 const struct ide_port_ops *port_ops;
431 ide_hwif_ops_t *hwifops; 468 const struct ide_dma_ops *dma_ops;
432#else
433 /* host specific initialization of devices on a port */
434 void (*port_init_devs)(struct hwif_s *);
435 /* routine to program host for PIO mode */
436 void (*set_pio_mode)(ide_drive_t *, const u8);
437 /* routine to program host for DMA mode */
438 void (*set_dma_mode)(ide_drive_t *, const u8);
439 /* tweaks hardware to select drive */
440 void (*selectproc)(ide_drive_t *);
441 /* chipset polling based on hba specifics */
442 int (*reset_poll)(ide_drive_t *);
443 /* chipset specific changes to default for device-hba resets */
444 void (*pre_reset)(ide_drive_t *);
445 /* routine to reset controller after a disk reset */
446 void (*resetproc)(ide_drive_t *);
447 /* special host masking for drive selection */
448 void (*maskproc)(ide_drive_t *, int);
449 /* check host's drive quirk list */
450 void (*quirkproc)(ide_drive_t *);
451#endif
452 u8 (*mdma_filter)(ide_drive_t *);
453 u8 (*udma_filter)(ide_drive_t *);
454
455 u8 (*cable_detect)(struct hwif_s *);
456 469
457 void (*ata_input_data)(ide_drive_t *, void *, u32); 470 void (*ata_input_data)(ide_drive_t *, void *, u32);
458 void (*ata_output_data)(ide_drive_t *, void *, u32); 471 void (*ata_output_data)(ide_drive_t *, void *, u32);
@@ -460,15 +473,7 @@ typedef struct hwif_s {
460 void (*atapi_input_bytes)(ide_drive_t *, void *, u32); 473 void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
461 void (*atapi_output_bytes)(ide_drive_t *, void *, u32); 474 void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
462 475
463 void (*dma_host_set)(ide_drive_t *, int);
464 int (*dma_setup)(ide_drive_t *);
465 void (*dma_exec_cmd)(ide_drive_t *, u8);
466 void (*dma_start)(ide_drive_t *);
467 int (*ide_dma_end)(ide_drive_t *drive);
468 int (*ide_dma_test_irq)(ide_drive_t *drive);
469 void (*ide_dma_clear_irq)(ide_drive_t *drive); 476 void (*ide_dma_clear_irq)(ide_drive_t *drive);
470 void (*dma_lost_irq)(ide_drive_t *drive);
471 void (*dma_timeout)(ide_drive_t *drive);
472 477
473 void (*OUTB)(u8 addr, unsigned long port); 478 void (*OUTB)(u8 addr, unsigned long port);
474 void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port); 479 void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
@@ -515,14 +520,11 @@ typedef struct hwif_s {
515 unsigned long extra_base; /* extra addr for dma ports */ 520 unsigned long extra_base; /* extra addr for dma ports */
516 unsigned extra_ports; /* number of extra dma ports */ 521 unsigned extra_ports; /* number of extra dma ports */
517 522
518 unsigned noprobe : 1; /* don't probe for this interface */
519 unsigned present : 1; /* this interface exists */ 523 unsigned present : 1; /* this interface exists */
520 unsigned serialized : 1; /* serialized all channel operation */ 524 unsigned serialized : 1; /* serialized all channel operation */
521 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ 525 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
522 unsigned reset : 1; /* reset after probe */
523 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 526 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
524 unsigned mmio : 1; /* host uses MMIO */ 527 unsigned mmio : 1; /* host uses MMIO */
525 unsigned straight8 : 1; /* Alan's straight 8 check */
526 528
527 struct device gendev; 529 struct device gendev;
528 struct device *portdev; 530 struct device *portdev;
@@ -703,10 +705,6 @@ void ide_add_generic_settings(ide_drive_t *);
703read_proc_t proc_ide_read_capacity; 705read_proc_t proc_ide_read_capacity;
704read_proc_t proc_ide_read_geometry; 706read_proc_t proc_ide_read_geometry;
705 707
706#ifdef CONFIG_BLK_DEV_IDEPCI
707void ide_pci_create_host_proc(const char *, get_info_t *);
708#endif
709
710/* 708/*
711 * Standard exit stuff: 709 * Standard exit stuff:
712 */ 710 */
@@ -807,8 +805,21 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
807#ifndef _IDE_C 805#ifndef _IDE_C
808extern ide_hwif_t ide_hwifs[]; /* master data repository */ 806extern ide_hwif_t ide_hwifs[]; /* master data repository */
809#endif 807#endif
808extern int ide_noacpi;
809extern int ide_acpigtf;
810extern int ide_acpionboot;
810extern int noautodma; 811extern int noautodma;
811 812
813extern int ide_vlb_clk;
814extern int ide_pci_clk;
815
816ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
817
818static inline ide_hwif_t *ide_find_port(void)
819{
820 return ide_find_port_slot(NULL);
821}
822
812extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); 823extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
813int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 824int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
814 int uptodate, int nr_sectors); 825 int uptodate, int nr_sectors);
@@ -1004,10 +1015,15 @@ void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8
1004void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); 1015void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1005 1016
1006#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 1017#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1007void ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); 1018int ide_pci_set_master(struct pci_dev *, const char *);
1019unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1020int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1008#else 1021#else
1009static inline void ide_hwif_setup_dma(ide_hwif_t *hwif, 1022static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
1010 const struct ide_port_info *d) { } 1023 const struct ide_port_info *d)
1024{
1025 return -EINVAL;
1026}
1011#endif 1027#endif
1012 1028
1013extern void default_hwif_iops(ide_hwif_t *); 1029extern void default_hwif_iops(ide_hwif_t *);
@@ -1027,8 +1043,8 @@ enum {
1027 IDE_HFLAG_SINGLE = (1 << 1), 1043 IDE_HFLAG_SINGLE = (1 << 1),
1028 /* don't use legacy PIO blacklist */ 1044 /* don't use legacy PIO blacklist */
1029 IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), 1045 IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
1030 /* don't use conservative PIO "downgrade" */ 1046 /* set for the second port of QD65xx */
1031 IDE_HFLAG_PIO_NO_DOWNGRADE = (1 << 3), 1047 IDE_HFLAG_QD_2ND_PORT = (1 << 3),
1032 /* use PIO8/9 for prefetch off/on */ 1048 /* use PIO8/9 for prefetch off/on */
1033 IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), 1049 IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
1034 /* use PIO6/7 for fast-devsel off/on */ 1050 /* use PIO6/7 for fast-devsel off/on */
@@ -1050,14 +1066,12 @@ enum {
1050 IDE_HFLAG_VDMA = (1 << 11), 1066 IDE_HFLAG_VDMA = (1 << 11),
1051 /* ATAPI DMA is unsupported */ 1067 /* ATAPI DMA is unsupported */
1052 IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), 1068 IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
1053 /* set if host is a "bootable" controller */ 1069 /* set if host is a "non-bootable" controller */
1054 IDE_HFLAG_BOOTABLE = (1 << 13), 1070 IDE_HFLAG_NON_BOOTABLE = (1 << 13),
1055 /* host doesn't support DMA */ 1071 /* host doesn't support DMA */
1056 IDE_HFLAG_NO_DMA = (1 << 14), 1072 IDE_HFLAG_NO_DMA = (1 << 14),
1057 /* check if host is PCI IDE device before allowing DMA */ 1073 /* check if host is PCI IDE device before allowing DMA */
1058 IDE_HFLAG_NO_AUTODMA = (1 << 15), 1074 IDE_HFLAG_NO_AUTODMA = (1 << 15),
1059 /* don't autotune PIO */
1060 IDE_HFLAG_NO_AUTOTUNE = (1 << 16),
1061 /* host is CS5510/CS5520 */ 1075 /* host is CS5510/CS5520 */
1062 IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA, 1076 IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA,
1063 /* no LBA48 */ 1077 /* no LBA48 */
@@ -1079,8 +1093,8 @@ enum {
1079 /* unmask IRQs */ 1093 /* unmask IRQs */
1080 IDE_HFLAG_UNMASK_IRQS = (1 << 25), 1094 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1081 IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), 1095 IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26),
1082 /* host is CY82C693 */ 1096 /* serialize ports if DMA is possible (for sl82c105) */
1083 IDE_HFLAG_CY82C693 = (1 << 27), 1097 IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
1084 /* force host out of "simplex" mode */ 1098 /* force host out of "simplex" mode */
1085 IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), 1099 IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
1086 /* DSC overlap is unsupported */ 1100 /* DSC overlap is unsupported */
@@ -1092,9 +1106,9 @@ enum {
1092}; 1106};
1093 1107
1094#ifdef CONFIG_BLK_DEV_OFFBOARD 1108#ifdef CONFIG_BLK_DEV_OFFBOARD
1095# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_BOOTABLE
1096#else
1097# define IDE_HFLAG_OFF_BOARD 0 1109# define IDE_HFLAG_OFF_BOARD 0
1110#else
1111# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
1098#endif 1112#endif
1099 1113
1100struct ide_port_info { 1114struct ide_port_info {
@@ -1102,10 +1116,14 @@ struct ide_port_info {
1102 unsigned int (*init_chipset)(struct pci_dev *, const char *); 1116 unsigned int (*init_chipset)(struct pci_dev *, const char *);
1103 void (*init_iops)(ide_hwif_t *); 1117 void (*init_iops)(ide_hwif_t *);
1104 void (*init_hwif)(ide_hwif_t *); 1118 void (*init_hwif)(ide_hwif_t *);
1105 void (*init_dma)(ide_hwif_t *, unsigned long); 1119 int (*init_dma)(ide_hwif_t *,
1120 const struct ide_port_info *);
1121
1122 const struct ide_port_ops *port_ops;
1123 const struct ide_dma_ops *dma_ops;
1124
1106 ide_pci_enablebit_t enablebits[2]; 1125 ide_pci_enablebit_t enablebits[2];
1107 hwif_chipset_t chipset; 1126 hwif_chipset_t chipset;
1108 u8 extra;
1109 u32 host_flags; 1127 u32 host_flags;
1110 u8 pio_mask; 1128 u8 pio_mask;
1111 u8 swdma_mask; 1129 u8 swdma_mask;
@@ -1152,13 +1170,16 @@ void ide_destroy_dmatable(ide_drive_t *);
1152 1170
1153#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 1171#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1154extern int ide_build_dmatable(ide_drive_t *, struct request *); 1172extern int ide_build_dmatable(ide_drive_t *, struct request *);
1155extern int ide_release_dma(ide_hwif_t *); 1173int ide_allocate_dma_engine(ide_hwif_t *);
1156extern void ide_setup_dma(ide_hwif_t *, unsigned long); 1174void ide_release_dma_engine(ide_hwif_t *);
1175void ide_setup_dma(ide_hwif_t *, unsigned long);
1157 1176
1158void ide_dma_host_set(ide_drive_t *, int); 1177void ide_dma_host_set(ide_drive_t *, int);
1159extern int ide_dma_setup(ide_drive_t *); 1178extern int ide_dma_setup(ide_drive_t *);
1179void ide_dma_exec_cmd(ide_drive_t *, u8);
1160extern void ide_dma_start(ide_drive_t *); 1180extern void ide_dma_start(ide_drive_t *);
1161extern int __ide_dma_end(ide_drive_t *); 1181extern int __ide_dma_end(ide_drive_t *);
1182int ide_dma_test_irq(ide_drive_t *);
1162extern void ide_dma_lost_irq(ide_drive_t *); 1183extern void ide_dma_lost_irq(ide_drive_t *);
1163extern void ide_dma_timeout(ide_drive_t *); 1184extern void ide_dma_timeout(ide_drive_t *);
1164#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 1185#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -1176,7 +1197,7 @@ static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1176#endif /* CONFIG_BLK_DEV_IDEDMA */ 1197#endif /* CONFIG_BLK_DEV_IDEDMA */
1177 1198
1178#ifndef CONFIG_BLK_DEV_IDEDMA_SFF 1199#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
1179static inline void ide_release_dma(ide_hwif_t *drive) {;} 1200static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1180#endif 1201#endif
1181 1202
1182#ifdef CONFIG_BLK_DEV_IDEACPI 1203#ifdef CONFIG_BLK_DEV_IDEACPI
@@ -1196,17 +1217,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1196#endif 1217#endif
1197 1218
1198void ide_remove_port_from_hwgroup(ide_hwif_t *); 1219void ide_remove_port_from_hwgroup(ide_hwif_t *);
1199extern int ide_hwif_request_regions(ide_hwif_t *hwif); 1220void ide_unregister(ide_hwif_t *);
1200extern void ide_hwif_release_regions(ide_hwif_t* hwif);
1201void ide_unregister(unsigned int);
1202 1221
1203void ide_register_region(struct gendisk *); 1222void ide_register_region(struct gendisk *);
1204void ide_unregister_region(struct gendisk *); 1223void ide_unregister_region(struct gendisk *);
1205 1224
1206void ide_undecoded_slave(ide_drive_t *); 1225void ide_undecoded_slave(ide_drive_t *);
1207 1226
1227void ide_port_apply_params(ide_hwif_t *);
1228
1208int ide_device_add_all(u8 *idx, const struct ide_port_info *); 1229int ide_device_add_all(u8 *idx, const struct ide_port_info *);
1209int ide_device_add(u8 idx[4], const struct ide_port_info *); 1230int ide_device_add(u8 idx[4], const struct ide_port_info *);
1231int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
1210void ide_port_unregister_devices(ide_hwif_t *); 1232void ide_port_unregister_devices(ide_hwif_t *);
1211void ide_port_scan(ide_hwif_t *); 1233void ide_port_scan(ide_hwif_t *);
1212 1234
@@ -1315,29 +1337,28 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
1315{ 1337{
1316 ide_hwif_t *hwif = drive->hwif; 1338 ide_hwif_t *hwif = drive->hwif;
1317 1339
1318 hwif->OUTB(drive->ctl | (on ? 0 : 2), 1340 hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
1319 hwif->io_ports[IDE_CONTROL_OFFSET]);
1320} 1341}
1321 1342
1322static inline u8 ide_read_status(ide_drive_t *drive) 1343static inline u8 ide_read_status(ide_drive_t *drive)
1323{ 1344{
1324 ide_hwif_t *hwif = drive->hwif; 1345 ide_hwif_t *hwif = drive->hwif;
1325 1346
1326 return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1347 return hwif->INB(hwif->io_ports.status_addr);
1327} 1348}
1328 1349
1329static inline u8 ide_read_altstatus(ide_drive_t *drive) 1350static inline u8 ide_read_altstatus(ide_drive_t *drive)
1330{ 1351{
1331 ide_hwif_t *hwif = drive->hwif; 1352 ide_hwif_t *hwif = drive->hwif;
1332 1353
1333 return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]); 1354 return hwif->INB(hwif->io_ports.ctl_addr);
1334} 1355}
1335 1356
1336static inline u8 ide_read_error(ide_drive_t *drive) 1357static inline u8 ide_read_error(ide_drive_t *drive)
1337{ 1358{
1338 ide_hwif_t *hwif = drive->hwif; 1359 ide_hwif_t *hwif = drive->hwif;
1339 1360
1340 return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); 1361 return hwif->INB(hwif->io_ports.error_addr);
1341} 1362}
1342 1363
1343/* 1364/*
@@ -1350,7 +1371,7 @@ static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
1350 1371
1351 /* FIXME: use ->atapi_input_bytes */ 1372 /* FIXME: use ->atapi_input_bytes */
1352 while (bcount--) 1373 while (bcount--)
1353 (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]); 1374 (void)hwif->INB(hwif->io_ports.data_addr);
1354} 1375}
1355 1376
1356static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) 1377static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
@@ -1359,7 +1380,7 @@ static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
1359 1380
1360 /* FIXME: use ->atapi_output_bytes */ 1381 /* FIXME: use ->atapi_output_bytes */
1361 while (bcount--) 1382 while (bcount--)
1362 hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]); 1383 hwif->OUTB(0, hwif->io_ports.data_addr);
1363} 1384}
1364 1385
1365#endif /* _IDE_H */ 1386#endif /* _IDE_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c1ec04fd000d..a281afeddfbb 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -8,11 +8,18 @@
8 */ 8 */
9 9
10#include <asm/types.h> 10#include <asm/types.h>
11#include <linux/compiler.h>
11#include <linux/ioctl.h> 12#include <linux/ioctl.h>
12#include <asm/kvm.h> 13#include <asm/kvm.h>
13 14
14#define KVM_API_VERSION 12 15#define KVM_API_VERSION 12
15 16
17/* for KVM_TRACE_ENABLE */
18struct kvm_user_trace_setup {
19 __u32 buf_size; /* sub_buffer size of each per-cpu */
20 __u32 buf_nr; /* the number of sub_buffers of each per-cpu */
21};
22
16/* for KVM_CREATE_MEMORY_REGION */ 23/* for KVM_CREATE_MEMORY_REGION */
17struct kvm_memory_region { 24struct kvm_memory_region {
18 __u32 slot; 25 __u32 slot;
@@ -73,6 +80,9 @@ struct kvm_irqchip {
73#define KVM_EXIT_INTR 10 80#define KVM_EXIT_INTR 10
74#define KVM_EXIT_SET_TPR 11 81#define KVM_EXIT_SET_TPR 11
75#define KVM_EXIT_TPR_ACCESS 12 82#define KVM_EXIT_TPR_ACCESS 12
83#define KVM_EXIT_S390_SIEIC 13
84#define KVM_EXIT_S390_RESET 14
85#define KVM_EXIT_DCR 15
76 86
77/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ 87/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
78struct kvm_run { 88struct kvm_run {
@@ -137,6 +147,27 @@ struct kvm_run {
137 __u32 is_write; 147 __u32 is_write;
138 __u32 pad; 148 __u32 pad;
139 } tpr_access; 149 } tpr_access;
150 /* KVM_EXIT_S390_SIEIC */
151 struct {
152 __u8 icptcode;
153 __u64 mask; /* psw upper half */
154 __u64 addr; /* psw lower half */
155 __u16 ipa;
156 __u32 ipb;
157 } s390_sieic;
158 /* KVM_EXIT_S390_RESET */
159#define KVM_S390_RESET_POR 1
160#define KVM_S390_RESET_CLEAR 2
161#define KVM_S390_RESET_SUBSYSTEM 4
162#define KVM_S390_RESET_CPU_INIT 8
163#define KVM_S390_RESET_IPL 16
164 __u64 s390_reset_flags;
165 /* KVM_EXIT_DCR */
166 struct {
167 __u32 dcrn;
168 __u32 data;
169 __u8 is_write;
170 } dcr;
140 /* Fix the size of the union. */ 171 /* Fix the size of the union. */
141 char padding[256]; 172 char padding[256];
142 }; 173 };
@@ -204,6 +235,74 @@ struct kvm_vapic_addr {
204 __u64 vapic_addr; 235 __u64 vapic_addr;
205}; 236};
206 237
238/* for KVM_SET_MPSTATE */
239
240#define KVM_MP_STATE_RUNNABLE 0
241#define KVM_MP_STATE_UNINITIALIZED 1
242#define KVM_MP_STATE_INIT_RECEIVED 2
243#define KVM_MP_STATE_HALTED 3
244#define KVM_MP_STATE_SIPI_RECEIVED 4
245
246struct kvm_mp_state {
247 __u32 mp_state;
248};
249
250struct kvm_s390_psw {
251 __u64 mask;
252 __u64 addr;
253};
254
255/* valid values for type in kvm_s390_interrupt */
256#define KVM_S390_SIGP_STOP 0xfffe0000u
257#define KVM_S390_PROGRAM_INT 0xfffe0001u
258#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
259#define KVM_S390_RESTART 0xfffe0003u
260#define KVM_S390_INT_VIRTIO 0xffff2603u
261#define KVM_S390_INT_SERVICE 0xffff2401u
262#define KVM_S390_INT_EMERGENCY 0xffff1201u
263
264struct kvm_s390_interrupt {
265 __u32 type;
266 __u32 parm;
267 __u64 parm64;
268};
269
270#define KVM_TRC_SHIFT 16
271/*
272 * kvm trace categories
273 */
274#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
275#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
276
277/*
278 * kvm trace action
279 */
280#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
281#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
282#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
283
284#define KVM_TRC_HEAD_SIZE 12
285#define KVM_TRC_CYCLE_SIZE 8
286#define KVM_TRC_EXTRA_MAX 7
287
288/* This structure represents a single trace buffer record. */
289struct kvm_trace_rec {
290 __u32 event:28;
291 __u32 extra_u32:3;
292 __u32 cycle_in:1;
293 __u32 pid;
294 __u32 vcpu_id;
295 union {
296 struct {
297 __u32 cycle_lo, cycle_hi;
298 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
299 } cycle;
300 struct {
301 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
302 } nocycle;
303 } u;
304};
305
207#define KVMIO 0xAE 306#define KVMIO 0xAE
208 307
209/* 308/*
@@ -212,6 +311,8 @@ struct kvm_vapic_addr {
212#define KVM_GET_API_VERSION _IO(KVMIO, 0x00) 311#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
213#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ 312#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
214#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) 313#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
314
315#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
215/* 316/*
216 * Check if a kvm extension is available. Argument is extension number, 317 * Check if a kvm extension is available. Argument is extension number,
217 * return is 1 (yes) or 0 (no, sorry). 318 * return is 1 (yes) or 0 (no, sorry).
@@ -222,7 +323,12 @@ struct kvm_vapic_addr {
222 */ 323 */
223#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ 324#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
224#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) 325#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
225 326/*
327 * ioctls for kvm trace
328 */
329#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
330#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07)
331#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08)
226/* 332/*
227 * Extension capability list. 333 * Extension capability list.
228 */ 334 */
@@ -233,6 +339,13 @@ struct kvm_vapic_addr {
233#define KVM_CAP_SET_TSS_ADDR 4 339#define KVM_CAP_SET_TSS_ADDR 4
234#define KVM_CAP_VAPIC 6 340#define KVM_CAP_VAPIC 6
235#define KVM_CAP_EXT_CPUID 7 341#define KVM_CAP_EXT_CPUID 7
342#define KVM_CAP_CLOCKSOURCE 8
343#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
344#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
345#define KVM_CAP_PIT 11
346#define KVM_CAP_NOP_IO_DELAY 12
347#define KVM_CAP_PV_MMU 13
348#define KVM_CAP_MP_STATE 14
236 349
237/* 350/*
238 * ioctls for VM fds 351 * ioctls for VM fds
@@ -255,6 +368,9 @@ struct kvm_vapic_addr {
255#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) 368#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
256#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) 369#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
257#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) 370#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
371#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
372#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
373#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
258 374
259/* 375/*
260 * ioctls for vcpu fds 376 * ioctls for vcpu fds
@@ -281,5 +397,17 @@ struct kvm_vapic_addr {
281#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) 397#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
282/* Available with KVM_CAP_VAPIC */ 398/* Available with KVM_CAP_VAPIC */
283#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) 399#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
400/* valid for virtual machine (for floating interrupt)_and_ vcpu */
401#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
402/* store status for s390 */
403#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
404#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
405#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
406/* initial ipl psw for s390 */
407#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
408/* initial reset for s390 */
409#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
410#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
411#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
284 412
285#endif 413#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 928b0d59e9ba..398978972b7a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -15,6 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/preempt.h> 17#include <linux/preempt.h>
18#include <linux/marker.h>
18#include <asm/signal.h> 19#include <asm/signal.h>
19 20
20#include <linux/kvm.h> 21#include <linux/kvm.h>
@@ -24,29 +25,18 @@
24 25
25#include <asm/kvm_host.h> 26#include <asm/kvm_host.h>
26 27
27#define KVM_MAX_VCPUS 4
28#define KVM_MEMORY_SLOTS 8
29/* memory slots that does not exposed to userspace */
30#define KVM_PRIVATE_MEM_SLOTS 4
31
32#define KVM_PIO_PAGE_OFFSET 1
33
34/* 28/*
35 * vcpu->requests bit members 29 * vcpu->requests bit members
36 */ 30 */
37#define KVM_REQ_TLB_FLUSH 0 31#define KVM_REQ_TLB_FLUSH 0
38#define KVM_REQ_MIGRATE_TIMER 1 32#define KVM_REQ_MIGRATE_TIMER 1
39#define KVM_REQ_REPORT_TPR_ACCESS 2 33#define KVM_REQ_REPORT_TPR_ACCESS 2
34#define KVM_REQ_MMU_RELOAD 3
35#define KVM_REQ_TRIPLE_FAULT 4
40 36
41struct kvm_vcpu; 37struct kvm_vcpu;
42extern struct kmem_cache *kvm_vcpu_cache; 38extern struct kmem_cache *kvm_vcpu_cache;
43 39
44struct kvm_guest_debug {
45 int enabled;
46 unsigned long bp[4];
47 int singlestep;
48};
49
50/* 40/*
51 * It would be nice to use something smarter than a linear search, TBD... 41 * It would be nice to use something smarter than a linear search, TBD...
52 * Thankfully we dont expect many devices to register (famous last words :), 42 * Thankfully we dont expect many devices to register (famous last words :),
@@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
67 57
68struct kvm_vcpu { 58struct kvm_vcpu {
69 struct kvm *kvm; 59 struct kvm *kvm;
60#ifdef CONFIG_PREEMPT_NOTIFIERS
70 struct preempt_notifier preempt_notifier; 61 struct preempt_notifier preempt_notifier;
62#endif
71 int vcpu_id; 63 int vcpu_id;
72 struct mutex mutex; 64 struct mutex mutex;
73 int cpu; 65 int cpu;
@@ -100,6 +92,10 @@ struct kvm_memory_slot {
100 unsigned long flags; 92 unsigned long flags;
101 unsigned long *rmap; 93 unsigned long *rmap;
102 unsigned long *dirty_bitmap; 94 unsigned long *dirty_bitmap;
95 struct {
96 unsigned long rmap_pde;
97 int write_count;
98 } *lpage_info;
103 unsigned long userspace_addr; 99 unsigned long userspace_addr;
104 int user_alloc; 100 int user_alloc;
105}; 101};
@@ -114,11 +110,11 @@ struct kvm {
114 KVM_PRIVATE_MEM_SLOTS]; 110 KVM_PRIVATE_MEM_SLOTS];
115 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 111 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
116 struct list_head vm_list; 112 struct list_head vm_list;
117 struct file *filp;
118 struct kvm_io_bus mmio_bus; 113 struct kvm_io_bus mmio_bus;
119 struct kvm_io_bus pio_bus; 114 struct kvm_io_bus pio_bus;
120 struct kvm_vm_stat stat; 115 struct kvm_vm_stat stat;
121 struct kvm_arch arch; 116 struct kvm_arch arch;
117 atomic_t users_count;
122}; 118};
123 119
124/* The guest did something we don't support. */ 120/* The guest did something we don't support. */
@@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
145 struct module *module); 141 struct module *module);
146void kvm_exit(void); 142void kvm_exit(void);
147 143
144void kvm_get_kvm(struct kvm *kvm);
145void kvm_put_kvm(struct kvm *kvm);
146
148#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 147#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
149#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 148#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
150static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 149static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
151struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); 150struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
152 151
153extern struct page *bad_page; 152extern struct page *bad_page;
153extern pfn_t bad_pfn;
154 154
155int is_error_page(struct page *page); 155int is_error_page(struct page *page);
156int is_error_pfn(pfn_t pfn);
156int kvm_is_error_hva(unsigned long addr); 157int kvm_is_error_hva(unsigned long addr);
157int kvm_set_memory_region(struct kvm *kvm, 158int kvm_set_memory_region(struct kvm *kvm,
158 struct kvm_userspace_memory_region *mem, 159 struct kvm_userspace_memory_region *mem,
@@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
166 int user_alloc); 167 int user_alloc);
167gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 168gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
168struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 169struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
170unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
169void kvm_release_page_clean(struct page *page); 171void kvm_release_page_clean(struct page *page);
170void kvm_release_page_dirty(struct page *page); 172void kvm_release_page_dirty(struct page *page);
173void kvm_set_page_dirty(struct page *page);
174void kvm_set_page_accessed(struct page *page);
175
176pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
177void kvm_release_pfn_dirty(pfn_t);
178void kvm_release_pfn_clean(pfn_t pfn);
179void kvm_set_pfn_dirty(pfn_t pfn);
180void kvm_set_pfn_accessed(pfn_t pfn);
181void kvm_get_pfn(pfn_t pfn);
182
171int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 183int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
172 int len); 184 int len);
173int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 185int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
188void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 200void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
189void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 201void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
190void kvm_flush_remote_tlbs(struct kvm *kvm); 202void kvm_flush_remote_tlbs(struct kvm *kvm);
203void kvm_reload_remote_mmus(struct kvm *kvm);
191 204
192long kvm_arch_dev_ioctl(struct file *filp, 205long kvm_arch_dev_ioctl(struct file *filp,
193 unsigned int ioctl, unsigned long arg); 206 unsigned int ioctl, unsigned long arg);
@@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
223 struct kvm_sregs *sregs); 236 struct kvm_sregs *sregs);
224int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 237int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
225 struct kvm_sregs *sregs); 238 struct kvm_sregs *sregs);
239int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
240 struct kvm_mp_state *mp_state);
241int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
242 struct kvm_mp_state *mp_state);
226int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 243int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
227 struct kvm_debug_guest *dbg); 244 struct kvm_debug_guest *dbg);
228int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 245int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
@@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
255 272
256int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 273int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
257int kvm_cpu_has_interrupt(struct kvm_vcpu *v); 274int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
275int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
258void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 276void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
259 277
260static inline void kvm_guest_enter(void) 278static inline void kvm_guest_enter(void)
@@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item {
296 struct dentry *dentry; 314 struct dentry *dentry;
297}; 315};
298extern struct kvm_stats_debugfs_item debugfs_entries[]; 316extern struct kvm_stats_debugfs_item debugfs_entries[];
317extern struct dentry *kvm_debugfs_dir;
318
319#ifdef CONFIG_KVM_TRACE
320int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
321void kvm_trace_cleanup(void);
322#else
323static inline
324int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
325{
326 return -EINVAL;
327}
328#define kvm_trace_cleanup() ((void)0)
329#endif
299 330
300#endif 331#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 5497aac0d2f8..3ddce03766ca 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -11,8 +11,11 @@
11 11
12/* Return values for hypercalls */ 12/* Return values for hypercalls */
13#define KVM_ENOSYS 1000 13#define KVM_ENOSYS 1000
14#define KVM_EFAULT EFAULT
15#define KVM_E2BIG E2BIG
14 16
15#define KVM_HC_VAPIC_POLL_IRQ 1 17#define KVM_HC_VAPIC_POLL_IRQ 1
18#define KVM_HC_MMU_OP 2
16 19
17/* 20/*
18 * hypercalls use architecture specific 21 * hypercalls use architecture specific
@@ -20,6 +23,12 @@
20#include <asm/kvm_para.h> 23#include <asm/kvm_para.h>
21 24
22#ifdef __KERNEL__ 25#ifdef __KERNEL__
26#ifdef CONFIG_KVM_GUEST
27void __init kvm_guest_init(void);
28#else
29#define kvm_guest_init() do { } while (0)
30#endif
31
23static inline int kvm_para_has_feature(unsigned int feature) 32static inline int kvm_para_has_feature(unsigned int feature)
24{ 33{
25 if (kvm_arch_para_features() & (1UL << feature)) 34 if (kvm_arch_para_features() & (1UL << feature))
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c4e46decb22..9b6f395c9625 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -38,6 +38,8 @@ typedef unsigned long hva_t;
38typedef u64 hpa_t; 38typedef u64 hpa_t;
39typedef unsigned long hfn_t; 39typedef unsigned long hfn_t;
40 40
41typedef hfn_t pfn_t;
42
41struct kvm_pio_request { 43struct kvm_pio_request {
42 unsigned long count; 44 unsigned long count;
43 int cur_count; 45 int cur_count;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ff7df1a2222f..9fa1a8002ce2 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -208,6 +208,38 @@ struct mlx4_mtt {
208 int page_shift; 208 int page_shift;
209}; 209};
210 210
211enum {
212 MLX4_DB_PER_PAGE = PAGE_SIZE / 4
213};
214
215struct mlx4_db_pgdir {
216 struct list_head list;
217 DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
218 DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
219 unsigned long *bits[2];
220 __be32 *db_page;
221 dma_addr_t db_dma;
222};
223
224struct mlx4_ib_user_db_page;
225
226struct mlx4_db {
227 __be32 *db;
228 union {
229 struct mlx4_db_pgdir *pgdir;
230 struct mlx4_ib_user_db_page *user_page;
231 } u;
232 dma_addr_t dma;
233 int index;
234 int order;
235};
236
237struct mlx4_hwq_resources {
238 struct mlx4_db db;
239 struct mlx4_mtt mtt;
240 struct mlx4_buf buf;
241};
242
211struct mlx4_mr { 243struct mlx4_mr {
212 struct mlx4_mtt mtt; 244 struct mlx4_mtt mtt;
213 u64 iova; 245 u64 iova;
@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
341int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 373int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
342 struct mlx4_buf *buf); 374 struct mlx4_buf *buf);
343 375
376int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
377void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
378
379int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
380 int size, int max_direct);
381void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
382 int size);
383
344int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 384int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
345 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); 385 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
346void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 386void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index a5e43febee4f..7f128b266faa 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
296int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 296int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
297 struct mlx4_qp_context *context); 297 struct mlx4_qp_context *context);
298 298
299int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
300 struct mlx4_qp_context *context,
301 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
302
299static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) 303static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
300{ 304{
301 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); 305 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b695875d63e3..286d31521605 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1229,6 +1229,7 @@ void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1229int vmemmap_populate_basepages(struct page *start_page, 1229int vmemmap_populate_basepages(struct page *start_page,
1230 unsigned long pages, int node); 1230 unsigned long pages, int node);
1231int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1231int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1232void vmemmap_populate_print_last(void);
1232 1233
1233#endif /* __KERNEL__ */ 1234#endif /* __KERNEL__ */
1234#endif /* _LINUX_MM_H */ 1235#endif /* _LINUX_MM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d0bd97044abd..9a4f3e63e3bf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1798,6 +1798,8 @@ extern void mmput(struct mm_struct *);
1798extern struct mm_struct *get_task_mm(struct task_struct *task); 1798extern struct mm_struct *get_task_mm(struct task_struct *task);
1799/* Remove the current tasks stale references to the old mm_struct */ 1799/* Remove the current tasks stale references to the old mm_struct */
1800extern void mm_release(struct task_struct *, struct mm_struct *); 1800extern void mm_release(struct task_struct *, struct mm_struct *);
1801/* Allocate a new mm structure and copy contents from tsk->mm */
1802extern struct mm_struct *dup_mm(struct task_struct *tsk);
1801 1803
1802extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1804extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
1803extern void flush_thread(void); 1805extern void flush_thread(void);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 03378e3515b3..add3c5a40827 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -32,7 +32,7 @@ struct attribute {
32 32
33struct attribute_group { 33struct attribute_group {
34 const char *name; 34 const char *name;
35 int (*is_visible)(struct kobject *, 35 mode_t (*is_visible)(struct kobject *,
36 struct attribute *, int); 36 struct attribute *, int);
37 struct attribute **attrs; 37 struct attribute **attrs;
38}; 38};
@@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
105 105
106int __must_check sysfs_create_group(struct kobject *kobj, 106int __must_check sysfs_create_group(struct kobject *kobj,
107 const struct attribute_group *grp); 107 const struct attribute_group *grp);
108int sysfs_update_group(struct kobject *kobj,
109 const struct attribute_group *grp);
108void sysfs_remove_group(struct kobject *kobj, 110void sysfs_remove_group(struct kobject *kobj,
109 const struct attribute_group *grp); 111 const struct attribute_group *grp);
110int sysfs_add_file_to_group(struct kobject *kobj, 112int sysfs_add_file_to_group(struct kobject *kobj,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b8b19e2f57bb..f6a9fe0ef09c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -181,7 +181,8 @@ struct scsi_device {
181 sdev_printk(prefix, (scmd)->device, fmt, ##a) 181 sdev_printk(prefix, (scmd)->device, fmt, ##a)
182 182
183enum scsi_target_state { 183enum scsi_target_state {
184 STARGET_RUNNING = 1, 184 STARGET_CREATED = 1,
185 STARGET_RUNNING,
185 STARGET_DEL, 186 STARGET_DEL,
186}; 187};
187 188
diff --git a/kernel/exit.c b/kernel/exit.c
index cece89f80ab4..97f609f574b1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -507,10 +507,9 @@ void put_files_struct(struct files_struct *files)
507 } 507 }
508} 508}
509 509
510EXPORT_SYMBOL(put_files_struct); 510void reset_files_struct(struct files_struct *files)
511
512void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
513{ 511{
512 struct task_struct *tsk = current;
514 struct files_struct *old; 513 struct files_struct *old;
515 514
516 old = tsk->files; 515 old = tsk->files;
@@ -519,7 +518,6 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
519 task_unlock(tsk); 518 task_unlock(tsk);
520 put_files_struct(old); 519 put_files_struct(old);
521} 520}
522EXPORT_SYMBOL(reset_files_struct);
523 521
524void exit_files(struct task_struct *tsk) 522void exit_files(struct task_struct *tsk)
525{ 523{
diff --git a/kernel/fork.c b/kernel/fork.c
index 89fe414645e9..c674aa8d3c31 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
521 * Allocate a new mm structure and copy contents from the 521 * Allocate a new mm structure and copy contents from the
522 * mm structure of the passed in task structure. 522 * mm structure of the passed in task structure.
523 */ 523 */
524static struct mm_struct *dup_mm(struct task_struct *tsk) 524struct mm_struct *dup_mm(struct task_struct *tsk)
525{ 525{
526 struct mm_struct *mm, *oldmm = current->mm; 526 struct mm_struct *mm, *oldmm = current->mm;
527 int err; 527 int err;
@@ -805,12 +805,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
805 goto out; 805 goto out;
806 } 806 }
807 807
808 /*
809 * Note: we may be using current for both targets (See exec.c)
810 * This works because we cache current->files (old) as oldf. Don't
811 * break this.
812 */
813 tsk->files = NULL;
814 newf = dup_fd(oldf, &error); 808 newf = dup_fd(oldf, &error);
815 if (!newf) 809 if (!newf)
816 goto out; 810 goto out;
@@ -846,34 +840,6 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
846 return 0; 840 return 0;
847} 841}
848 842
849/*
850 * Helper to unshare the files of the current task.
851 * We don't want to expose copy_files internals to
852 * the exec layer of the kernel.
853 */
854
855int unshare_files(void)
856{
857 struct files_struct *files = current->files;
858 int rc;
859
860 BUG_ON(!files);
861
862 /* This can race but the race causes us to copy when we don't
863 need to and drop the copy */
864 if(atomic_read(&files->count) == 1)
865 {
866 atomic_inc(&files->count);
867 return 0;
868 }
869 rc = copy_files(0, current);
870 if(rc)
871 current->files = files;
872 return rc;
873}
874
875EXPORT_SYMBOL(unshare_files);
876
877static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 843static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
878{ 844{
879 struct sighand_struct *sig; 845 struct sighand_struct *sig;
@@ -1811,3 +1777,27 @@ bad_unshare_cleanup_thread:
1811bad_unshare_out: 1777bad_unshare_out:
1812 return err; 1778 return err;
1813} 1779}
1780
1781/*
1782 * Helper to unshare the files of the current task.
1783 * We don't want to expose copy_files internals to
1784 * the exec layer of the kernel.
1785 */
1786
1787int unshare_files(struct files_struct **displaced)
1788{
1789 struct task_struct *task = current;
1790 struct files_struct *copy = NULL;
1791 int error;
1792
1793 error = unshare_fd(CLONE_FILES, &copy);
1794 if (error || !copy) {
1795 *displaced = NULL;
1796 return error;
1797 }
1798 *displaced = task->files;
1799 task_lock(task);
1800 task->files = copy;
1801 task_unlock(task);
1802 return 0;
1803}
diff --git a/lib/Kconfig b/lib/Kconfig
index 2d53dc092e8b..8cc8e8722a3f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -7,6 +7,12 @@ menu "Library routines"
7config BITREVERSE 7config BITREVERSE
8 tristate 8 tristate
9 9
10config GENERIC_FIND_FIRST_BIT
11 def_bool n
12
13config GENERIC_FIND_NEXT_BIT
14 def_bool n
15
10config CRC_CCITT 16config CRC_CCITT
11 tristate "CRC-CCITT functions" 17 tristate "CRC-CCITT functions"
12 help 18 help
diff --git a/lib/Makefile b/lib/Makefile
index bf8000fc7d48..2d7001b7f5a4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 78ccd73a8841..d3f5784807b4 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,14 +16,12 @@
16 16
17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18 18
19/** 19#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
20 * find_next_bit - find the next set bit in a memory region 20/*
21 * @addr: The address to base the search on 21 * Find the next set bit in a memory region.
22 * @offset: The bitnumber to start searching at
23 * @size: The maximum size to search
24 */ 22 */
25unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 23unsigned long __find_next_bit(const unsigned long *addr,
26 unsigned long offset) 24 unsigned long size, unsigned long offset)
27{ 25{
28 const unsigned long *p = addr + BITOP_WORD(offset); 26 const unsigned long *p = addr + BITOP_WORD(offset);
29 unsigned long result = offset & ~(BITS_PER_LONG-1); 27 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -60,15 +58,14 @@ found_first:
60found_middle: 58found_middle:
61 return result + __ffs(tmp); 59 return result + __ffs(tmp);
62} 60}
63 61EXPORT_SYMBOL(__find_next_bit);
64EXPORT_SYMBOL(find_next_bit);
65 62
66/* 63/*
67 * This implementation of find_{first,next}_zero_bit was stolen from 64 * This implementation of find_{first,next}_zero_bit was stolen from
68 * Linus' asm-alpha/bitops.h. 65 * Linus' asm-alpha/bitops.h.
69 */ 66 */
70unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 67unsigned long __find_next_zero_bit(const unsigned long *addr,
71 unsigned long offset) 68 unsigned long size, unsigned long offset)
72{ 69{
73 const unsigned long *p = addr + BITOP_WORD(offset); 70 const unsigned long *p = addr + BITOP_WORD(offset);
74 unsigned long result = offset & ~(BITS_PER_LONG-1); 71 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -105,8 +102,64 @@ found_first:
105found_middle: 102found_middle:
106 return result + ffz(tmp); 103 return result + ffz(tmp);
107} 104}
105EXPORT_SYMBOL(__find_next_zero_bit);
106#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
107
108#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
109/*
110 * Find the first set bit in a memory region.
111 */
112unsigned long __find_first_bit(const unsigned long *addr,
113 unsigned long size)
114{
115 const unsigned long *p = addr;
116 unsigned long result = 0;
117 unsigned long tmp;
108 118
109EXPORT_SYMBOL(find_next_zero_bit); 119 while (size & ~(BITS_PER_LONG-1)) {
120 if ((tmp = *(p++)))
121 goto found;
122 result += BITS_PER_LONG;
123 size -= BITS_PER_LONG;
124 }
125 if (!size)
126 return result;
127
128 tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
129 if (tmp == 0UL) /* Are any bits set? */
130 return result + size; /* Nope. */
131found:
132 return result + __ffs(tmp);
133}
134EXPORT_SYMBOL(__find_first_bit);
135
136/*
137 * Find the first cleared bit in a memory region.
138 */
139unsigned long __find_first_zero_bit(const unsigned long *addr,
140 unsigned long size)
141{
142 const unsigned long *p = addr;
143 unsigned long result = 0;
144 unsigned long tmp;
145
146 while (size & ~(BITS_PER_LONG-1)) {
147 if (~(tmp = *(p++)))
148 goto found;
149 result += BITS_PER_LONG;
150 size -= BITS_PER_LONG;
151 }
152 if (!size)
153 return result;
154
155 tmp = (*p) | (~0UL << size);
156 if (tmp == ~0UL) /* Are any bits zero? */
157 return result + size; /* Nope. */
158found:
159 return result + ffz(tmp);
160}
161EXPORT_SYMBOL(__find_first_zero_bit);
162#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
110 163
111#ifdef __BIG_ENDIAN 164#ifdef __BIG_ENDIAN
112 165
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 2ccea700968f..b6791646143e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -111,44 +111,74 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
111 * might be used for boot-time allocations - or it might get added 111 * might be used for boot-time allocations - or it might get added
112 * to the free page pool later on. 112 * to the free page pool later on.
113 */ 113 */
114static int __init reserve_bootmem_core(bootmem_data_t *bdata, 114static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
115 unsigned long addr, unsigned long size, int flags) 115 unsigned long addr, unsigned long size, int flags)
116{ 116{
117 unsigned long sidx, eidx; 117 unsigned long sidx, eidx;
118 unsigned long i; 118 unsigned long i;
119 int ret; 119
120 BUG_ON(!size);
121
122 /* out of range, don't hold other */
123 if (addr + size < bdata->node_boot_start ||
124 PFN_DOWN(addr) > bdata->node_low_pfn)
125 return 0;
120 126
121 /* 127 /*
122 * round up, partially reserved pages are considered 128 * Round up to index to the range.
123 * fully reserved.
124 */ 129 */
130 if (addr > bdata->node_boot_start)
131 sidx= PFN_DOWN(addr - bdata->node_boot_start);
132 else
133 sidx = 0;
134
135 eidx = PFN_UP(addr + size - bdata->node_boot_start);
136 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
137 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
138
139 for (i = sidx; i < eidx; i++) {
140 if (test_bit(i, bdata->node_bootmem_map)) {
141 if (flags & BOOTMEM_EXCLUSIVE)
142 return -EBUSY;
143 }
144 }
145
146 return 0;
147
148}
149
150static void __init reserve_bootmem_core(bootmem_data_t *bdata,
151 unsigned long addr, unsigned long size, int flags)
152{
153 unsigned long sidx, eidx;
154 unsigned long i;
155
125 BUG_ON(!size); 156 BUG_ON(!size);
126 BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
127 BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
128 BUG_ON(addr < bdata->node_boot_start);
129 157
130 sidx = PFN_DOWN(addr - bdata->node_boot_start); 158 /* out of range */
159 if (addr + size < bdata->node_boot_start ||
160 PFN_DOWN(addr) > bdata->node_low_pfn)
161 return;
162
163 /*
164 * Round up to index to the range.
165 */
166 if (addr > bdata->node_boot_start)
167 sidx= PFN_DOWN(addr - bdata->node_boot_start);
168 else
169 sidx = 0;
170
131 eidx = PFN_UP(addr + size - bdata->node_boot_start); 171 eidx = PFN_UP(addr + size - bdata->node_boot_start);
172 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
173 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
132 174
133 for (i = sidx; i < eidx; i++) 175 for (i = sidx; i < eidx; i++) {
134 if (test_and_set_bit(i, bdata->node_bootmem_map)) { 176 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
135#ifdef CONFIG_DEBUG_BOOTMEM 177#ifdef CONFIG_DEBUG_BOOTMEM
136 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE); 178 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
137#endif 179#endif
138 if (flags & BOOTMEM_EXCLUSIVE) {
139 ret = -EBUSY;
140 goto err;
141 }
142 } 180 }
143 181 }
144 return 0;
145
146err:
147 /* unreserve memory we accidentally reserved */
148 for (i--; i >= sidx; i--)
149 clear_bit(i, bdata->node_bootmem_map);
150
151 return ret;
152} 182}
153 183
154static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, 184static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
@@ -206,9 +236,11 @@ void * __init
206__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, 236__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
207 unsigned long align, unsigned long goal, unsigned long limit) 237 unsigned long align, unsigned long goal, unsigned long limit)
208{ 238{
209 unsigned long offset, remaining_size, areasize, preferred; 239 unsigned long areasize, preferred;
210 unsigned long i, start = 0, incr, eidx, end_pfn; 240 unsigned long i, start = 0, incr, eidx, end_pfn;
211 void *ret; 241 void *ret;
242 unsigned long node_boot_start;
243 void *node_bootmem_map;
212 244
213 if (!size) { 245 if (!size) {
214 printk("__alloc_bootmem_core(): zero-sized request\n"); 246 printk("__alloc_bootmem_core(): zero-sized request\n");
@@ -216,70 +248,83 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
216 } 248 }
217 BUG_ON(align & (align-1)); 249 BUG_ON(align & (align-1));
218 250
219 if (limit && bdata->node_boot_start >= limit)
220 return NULL;
221
222 /* on nodes without memory - bootmem_map is NULL */ 251 /* on nodes without memory - bootmem_map is NULL */
223 if (!bdata->node_bootmem_map) 252 if (!bdata->node_bootmem_map)
224 return NULL; 253 return NULL;
225 254
255 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
256 node_boot_start = bdata->node_boot_start;
257 node_bootmem_map = bdata->node_bootmem_map;
258 if (align) {
259 node_boot_start = ALIGN(bdata->node_boot_start, align);
260 if (node_boot_start > bdata->node_boot_start)
261 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
262 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
263 }
264
265 if (limit && node_boot_start >= limit)
266 return NULL;
267
226 end_pfn = bdata->node_low_pfn; 268 end_pfn = bdata->node_low_pfn;
227 limit = PFN_DOWN(limit); 269 limit = PFN_DOWN(limit);
228 if (limit && end_pfn > limit) 270 if (limit && end_pfn > limit)
229 end_pfn = limit; 271 end_pfn = limit;
230 272
231 eidx = end_pfn - PFN_DOWN(bdata->node_boot_start); 273 eidx = end_pfn - PFN_DOWN(node_boot_start);
232 offset = 0;
233 if (align && (bdata->node_boot_start & (align - 1UL)) != 0)
234 offset = align - (bdata->node_boot_start & (align - 1UL));
235 offset = PFN_DOWN(offset);
236 274
237 /* 275 /*
238 * We try to allocate bootmem pages above 'goal' 276 * We try to allocate bootmem pages above 'goal'
239 * first, then we try to allocate lower pages. 277 * first, then we try to allocate lower pages.
240 */ 278 */
241 if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) { 279 preferred = 0;
242 preferred = goal - bdata->node_boot_start; 280 if (goal && PFN_DOWN(goal) < end_pfn) {
281 if (goal > node_boot_start)
282 preferred = goal - node_boot_start;
243 283
244 if (bdata->last_success >= preferred) 284 if (bdata->last_success > node_boot_start &&
285 bdata->last_success - node_boot_start >= preferred)
245 if (!limit || (limit && limit > bdata->last_success)) 286 if (!limit || (limit && limit > bdata->last_success))
246 preferred = bdata->last_success; 287 preferred = bdata->last_success - node_boot_start;
247 } else 288 }
248 preferred = 0;
249 289
250 preferred = PFN_DOWN(ALIGN(preferred, align)) + offset; 290 preferred = PFN_DOWN(ALIGN(preferred, align));
251 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE; 291 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
252 incr = align >> PAGE_SHIFT ? : 1; 292 incr = align >> PAGE_SHIFT ? : 1;
253 293
254restart_scan: 294restart_scan:
255 for (i = preferred; i < eidx; i += incr) { 295 for (i = preferred; i < eidx;) {
256 unsigned long j; 296 unsigned long j;
257 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i); 297
298 i = find_next_zero_bit(node_bootmem_map, eidx, i);
258 i = ALIGN(i, incr); 299 i = ALIGN(i, incr);
259 if (i >= eidx) 300 if (i >= eidx)
260 break; 301 break;
261 if (test_bit(i, bdata->node_bootmem_map)) 302 if (test_bit(i, node_bootmem_map)) {
303 i += incr;
262 continue; 304 continue;
305 }
263 for (j = i + 1; j < i + areasize; ++j) { 306 for (j = i + 1; j < i + areasize; ++j) {
264 if (j >= eidx) 307 if (j >= eidx)
265 goto fail_block; 308 goto fail_block;
266 if (test_bit(j, bdata->node_bootmem_map)) 309 if (test_bit(j, node_bootmem_map))
267 goto fail_block; 310 goto fail_block;
268 } 311 }
269 start = i; 312 start = i;
270 goto found; 313 goto found;
271 fail_block: 314 fail_block:
272 i = ALIGN(j, incr); 315 i = ALIGN(j, incr);
316 if (i == j)
317 i += incr;
273 } 318 }
274 319
275 if (preferred > offset) { 320 if (preferred > 0) {
276 preferred = offset; 321 preferred = 0;
277 goto restart_scan; 322 goto restart_scan;
278 } 323 }
279 return NULL; 324 return NULL;
280 325
281found: 326found:
282 bdata->last_success = PFN_PHYS(start); 327 bdata->last_success = PFN_PHYS(start) + node_boot_start;
283 BUG_ON(start >= eidx); 328 BUG_ON(start >= eidx);
284 329
285 /* 330 /*
@@ -289,6 +334,7 @@ found:
289 */ 334 */
290 if (align < PAGE_SIZE && 335 if (align < PAGE_SIZE &&
291 bdata->last_offset && bdata->last_pos+1 == start) { 336 bdata->last_offset && bdata->last_pos+1 == start) {
337 unsigned long offset, remaining_size;
292 offset = ALIGN(bdata->last_offset, align); 338 offset = ALIGN(bdata->last_offset, align);
293 BUG_ON(offset > PAGE_SIZE); 339 BUG_ON(offset > PAGE_SIZE);
294 remaining_size = PAGE_SIZE - offset; 340 remaining_size = PAGE_SIZE - offset;
@@ -297,14 +343,12 @@ found:
297 /* last_pos unchanged */ 343 /* last_pos unchanged */
298 bdata->last_offset = offset + size; 344 bdata->last_offset = offset + size;
299 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE + 345 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
300 offset + 346 offset + node_boot_start);
301 bdata->node_boot_start);
302 } else { 347 } else {
303 remaining_size = size - remaining_size; 348 remaining_size = size - remaining_size;
304 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE; 349 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
305 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE + 350 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
306 offset + 351 offset + node_boot_start);
307 bdata->node_boot_start);
308 bdata->last_pos = start + areasize - 1; 352 bdata->last_pos = start + areasize - 1;
309 bdata->last_offset = remaining_size; 353 bdata->last_offset = remaining_size;
310 } 354 }
@@ -312,14 +356,14 @@ found:
312 } else { 356 } else {
313 bdata->last_pos = start + areasize - 1; 357 bdata->last_pos = start + areasize - 1;
314 bdata->last_offset = size & ~PAGE_MASK; 358 bdata->last_offset = size & ~PAGE_MASK;
315 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start); 359 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
316 } 360 }
317 361
318 /* 362 /*
319 * Reserve the area now: 363 * Reserve the area now:
320 */ 364 */
321 for (i = start; i < start + areasize; i++) 365 for (i = start; i < start + areasize; i++)
322 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map))) 366 if (unlikely(test_and_set_bit(i, node_bootmem_map)))
323 BUG(); 367 BUG();
324 memset(ret, 0, size); 368 memset(ret, 0, size);
325 return ret; 369 return ret;
@@ -401,6 +445,11 @@ unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
401void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 445void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
402 unsigned long size, int flags) 446 unsigned long size, int flags)
403{ 447{
448 int ret;
449
450 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
451 if (ret < 0)
452 return;
404 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); 453 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
405} 454}
406 455
@@ -426,7 +475,18 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
426int __init reserve_bootmem(unsigned long addr, unsigned long size, 475int __init reserve_bootmem(unsigned long addr, unsigned long size,
427 int flags) 476 int flags)
428{ 477{
429 return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags); 478 bootmem_data_t *bdata;
479 int ret;
480
481 list_for_each_entry(bdata, &bdata_list, list) {
482 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
483 if (ret < 0)
484 return ret;
485 }
486 list_for_each_entry(bdata, &bdata_list, list)
487 reserve_bootmem_core(bdata, addr, size, flags);
488
489 return 0;
430} 490}
431#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 491#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
432 492
diff --git a/mm/rmap.c b/mm/rmap.c
index 997f06907b6d..e9bb6b1093f6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -413,9 +413,6 @@ int page_referenced(struct page *page, int is_locked,
413{ 413{
414 int referenced = 0; 414 int referenced = 0;
415 415
416 if (page_test_and_clear_young(page))
417 referenced++;
418
419 if (TestClearPageReferenced(page)) 416 if (TestClearPageReferenced(page))
420 referenced++; 417 referenced++;
421 418
@@ -433,6 +430,10 @@ int page_referenced(struct page *page, int is_locked,
433 unlock_page(page); 430 unlock_page(page);
434 } 431 }
435 } 432 }
433
434 if (page_test_and_clear_young(page))
435 referenced++;
436
436 return referenced; 437 return referenced;
437} 438}
438 439
diff --git a/mm/sparse.c b/mm/sparse.c
index 98d6b39c3472..7e9191381f86 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -295,6 +295,9 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
295 return NULL; 295 return NULL;
296} 296}
297 297
298void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
299{
300}
298/* 301/*
299 * Allocate the accumulated non-linear sections, allocate a mem_map 302 * Allocate the accumulated non-linear sections, allocate a mem_map
300 * for each and record the physical to section mapping. 303 * for each and record the physical to section mapping.
@@ -304,22 +307,50 @@ void __init sparse_init(void)
304 unsigned long pnum; 307 unsigned long pnum;
305 struct page *map; 308 struct page *map;
306 unsigned long *usemap; 309 unsigned long *usemap;
310 unsigned long **usemap_map;
311 int size;
312
313 /*
314 * map is using big page (aka 2M in x86 64 bit)
315 * usemap is less one page (aka 24 bytes)
316 * so alloc 2M (with 2M align) and 24 bytes in turn will
317 * make next 2M slip to one more 2M later.
318 * then in big system, the memory will have a lot of holes...
319 * here try to allocate 2M pages continously.
320 *
321 * powerpc need to call sparse_init_one_section right after each
322 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
323 */
324 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
325 usemap_map = alloc_bootmem(size);
326 if (!usemap_map)
327 panic("can not allocate usemap_map\n");
307 328
308 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 329 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
309 if (!present_section_nr(pnum)) 330 if (!present_section_nr(pnum))
310 continue; 331 continue;
332 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
333 }
311 334
312 map = sparse_early_mem_map_alloc(pnum); 335 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
313 if (!map) 336 if (!present_section_nr(pnum))
314 continue; 337 continue;
315 338
316 usemap = sparse_early_usemap_alloc(pnum); 339 usemap = usemap_map[pnum];
317 if (!usemap) 340 if (!usemap)
318 continue; 341 continue;
319 342
343 map = sparse_early_mem_map_alloc(pnum);
344 if (!map)
345 continue;
346
320 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 347 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
321 usemap); 348 usemap);
322 } 349 }
350
351 vmemmap_populate_print_last();
352
353 free_bootmem(__pa(usemap_map), size);
323} 354}
324 355
325#ifdef CONFIG_MEMORY_HOTPLUG 356#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 742003d3a841..9ee3affab346 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <asm/unaligned.h>
16#include "ieee80211_i.h" 17#include "ieee80211_i.h"
17 18
18 19
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 02de8f1522a3..3df809222d1c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,7 +7,6 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <asm/unaligned.h>
11#include "mesh.h" 10#include "mesh.h"
12 11
13#define TEST_FRAME_LEN 8192 12#define TEST_FRAME_LEN 8192
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 24b3c8fe6bca..a098a0454dc8 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -76,7 +76,7 @@ modpost = scripts/mod/modpost \
76 $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \ 76 $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
77 $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \ 77 $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
78 $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \ 78 $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
79 $(if $(iKBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \ 79 $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
80 $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ 80 $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
81 $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ 81 $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
82 $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \ 82 $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f8b42ab0724b..757294b4f322 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1552,10 +1552,10 @@ static void read_symbols(char *modname)
1552 } 1552 }
1553 1553
1554 license = get_modinfo(info.modinfo, info.modinfo_len, "license"); 1554 license = get_modinfo(info.modinfo, info.modinfo_len, "license");
1555 if (!license && !is_vmlinux(modname)) 1555 if (info.modinfo && !license && !is_vmlinux(modname))
1556 fatal("modpost: missing MODULE_LICENSE() in %s\n" 1556 warn("modpost: missing MODULE_LICENSE() in %s\n"
1557 "see include/linux/module.h for " 1557 "see include/linux/module.h for "
1558 "more information\n", modname); 1558 "more information\n", modname);
1559 while (license) { 1559 while (license) {
1560 if (license_is_gpl_compatible(license)) 1560 if (license_is_gpl_compatible(license))
1561 mod->gpl_compatible = 1; 1561 mod->gpl_compatible = 1;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b2e12893e3f4..c82cf15730a1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -40,6 +40,7 @@
40#include <linux/kvm_para.h> 40#include <linux/kvm_para.h>
41#include <linux/pagemap.h> 41#include <linux/pagemap.h>
42#include <linux/mman.h> 42#include <linux/mman.h>
43#include <linux/swap.h>
43 44
44#include <asm/processor.h> 45#include <asm/processor.h>
45#include <asm/io.h> 46#include <asm/io.h>
@@ -59,7 +60,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
59 60
60static __read_mostly struct preempt_ops kvm_preempt_ops; 61static __read_mostly struct preempt_ops kvm_preempt_ops;
61 62
62static struct dentry *debugfs_dir; 63struct dentry *kvm_debugfs_dir;
63 64
64static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 65static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
65 unsigned long arg); 66 unsigned long arg);
@@ -119,6 +120,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
119 smp_call_function_mask(cpus, ack_flush, NULL, 1); 120 smp_call_function_mask(cpus, ack_flush, NULL, 1);
120} 121}
121 122
123void kvm_reload_remote_mmus(struct kvm *kvm)
124{
125 int i, cpu;
126 cpumask_t cpus;
127 struct kvm_vcpu *vcpu;
128
129 cpus_clear(cpus);
130 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
131 vcpu = kvm->vcpus[i];
132 if (!vcpu)
133 continue;
134 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
135 continue;
136 cpu = vcpu->cpu;
137 if (cpu != -1 && cpu != raw_smp_processor_id())
138 cpu_set(cpu, cpus);
139 }
140 if (cpus_empty(cpus))
141 return;
142 smp_call_function_mask(cpus, ack_flush, NULL, 1);
143}
144
145
122int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 146int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
123{ 147{
124 struct page *page; 148 struct page *page;
@@ -170,6 +194,7 @@ static struct kvm *kvm_create_vm(void)
170 mutex_init(&kvm->lock); 194 mutex_init(&kvm->lock);
171 kvm_io_bus_init(&kvm->mmio_bus); 195 kvm_io_bus_init(&kvm->mmio_bus);
172 init_rwsem(&kvm->slots_lock); 196 init_rwsem(&kvm->slots_lock);
197 atomic_set(&kvm->users_count, 1);
173 spin_lock(&kvm_lock); 198 spin_lock(&kvm_lock);
174 list_add(&kvm->vm_list, &vm_list); 199 list_add(&kvm->vm_list, &vm_list);
175 spin_unlock(&kvm_lock); 200 spin_unlock(&kvm_lock);
@@ -189,9 +214,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
189 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 214 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
190 vfree(free->dirty_bitmap); 215 vfree(free->dirty_bitmap);
191 216
217 if (!dont || free->lpage_info != dont->lpage_info)
218 vfree(free->lpage_info);
219
192 free->npages = 0; 220 free->npages = 0;
193 free->dirty_bitmap = NULL; 221 free->dirty_bitmap = NULL;
194 free->rmap = NULL; 222 free->rmap = NULL;
223 free->lpage_info = NULL;
195} 224}
196 225
197void kvm_free_physmem(struct kvm *kvm) 226void kvm_free_physmem(struct kvm *kvm)
@@ -215,11 +244,25 @@ static void kvm_destroy_vm(struct kvm *kvm)
215 mmdrop(mm); 244 mmdrop(mm);
216} 245}
217 246
247void kvm_get_kvm(struct kvm *kvm)
248{
249 atomic_inc(&kvm->users_count);
250}
251EXPORT_SYMBOL_GPL(kvm_get_kvm);
252
253void kvm_put_kvm(struct kvm *kvm)
254{
255 if (atomic_dec_and_test(&kvm->users_count))
256 kvm_destroy_vm(kvm);
257}
258EXPORT_SYMBOL_GPL(kvm_put_kvm);
259
260
218static int kvm_vm_release(struct inode *inode, struct file *filp) 261static int kvm_vm_release(struct inode *inode, struct file *filp)
219{ 262{
220 struct kvm *kvm = filp->private_data; 263 struct kvm *kvm = filp->private_data;
221 264
222 kvm_destroy_vm(kvm); 265 kvm_put_kvm(kvm);
223 return 0; 266 return 0;
224} 267}
225 268
@@ -301,6 +344,25 @@ int __kvm_set_memory_region(struct kvm *kvm,
301 new.user_alloc = user_alloc; 344 new.user_alloc = user_alloc;
302 new.userspace_addr = mem->userspace_addr; 345 new.userspace_addr = mem->userspace_addr;
303 } 346 }
347 if (npages && !new.lpage_info) {
348 int largepages = npages / KVM_PAGES_PER_HPAGE;
349 if (npages % KVM_PAGES_PER_HPAGE)
350 largepages++;
351 if (base_gfn % KVM_PAGES_PER_HPAGE)
352 largepages++;
353
354 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
355
356 if (!new.lpage_info)
357 goto out_free;
358
359 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
360
361 if (base_gfn % KVM_PAGES_PER_HPAGE)
362 new.lpage_info[0].write_count = 1;
363 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
364 new.lpage_info[largepages-1].write_count = 1;
365 }
304 366
305 /* Allocate page dirty bitmap if needed */ 367 /* Allocate page dirty bitmap if needed */
306 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 368 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
@@ -397,6 +459,12 @@ int is_error_page(struct page *page)
397} 459}
398EXPORT_SYMBOL_GPL(is_error_page); 460EXPORT_SYMBOL_GPL(is_error_page);
399 461
462int is_error_pfn(pfn_t pfn)
463{
464 return pfn == bad_pfn;
465}
466EXPORT_SYMBOL_GPL(is_error_pfn);
467
400static inline unsigned long bad_hva(void) 468static inline unsigned long bad_hva(void)
401{ 469{
402 return PAGE_OFFSET; 470 return PAGE_OFFSET;
@@ -444,7 +512,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
444} 512}
445EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 513EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
446 514
447static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 515unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
448{ 516{
449 struct kvm_memory_slot *slot; 517 struct kvm_memory_slot *slot;
450 518
@@ -458,7 +526,7 @@ static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
458/* 526/*
459 * Requires current->mm->mmap_sem to be held 527 * Requires current->mm->mmap_sem to be held
460 */ 528 */
461struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 529pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
462{ 530{
463 struct page *page[1]; 531 struct page *page[1];
464 unsigned long addr; 532 unsigned long addr;
@@ -469,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
469 addr = gfn_to_hva(kvm, gfn); 537 addr = gfn_to_hva(kvm, gfn);
470 if (kvm_is_error_hva(addr)) { 538 if (kvm_is_error_hva(addr)) {
471 get_page(bad_page); 539 get_page(bad_page);
472 return bad_page; 540 return page_to_pfn(bad_page);
473 } 541 }
474 542
475 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, 543 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
@@ -477,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
477 545
478 if (npages != 1) { 546 if (npages != 1) {
479 get_page(bad_page); 547 get_page(bad_page);
480 return bad_page; 548 return page_to_pfn(bad_page);
481 } 549 }
482 550
483 return page[0]; 551 return page_to_pfn(page[0]);
552}
553
554EXPORT_SYMBOL_GPL(gfn_to_pfn);
555
556struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
557{
558 return pfn_to_page(gfn_to_pfn(kvm, gfn));
484} 559}
485 560
486EXPORT_SYMBOL_GPL(gfn_to_page); 561EXPORT_SYMBOL_GPL(gfn_to_page);
487 562
488void kvm_release_page_clean(struct page *page) 563void kvm_release_page_clean(struct page *page)
489{ 564{
490 put_page(page); 565 kvm_release_pfn_clean(page_to_pfn(page));
491} 566}
492EXPORT_SYMBOL_GPL(kvm_release_page_clean); 567EXPORT_SYMBOL_GPL(kvm_release_page_clean);
493 568
569void kvm_release_pfn_clean(pfn_t pfn)
570{
571 put_page(pfn_to_page(pfn));
572}
573EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
574
494void kvm_release_page_dirty(struct page *page) 575void kvm_release_page_dirty(struct page *page)
495{ 576{
577 kvm_release_pfn_dirty(page_to_pfn(page));
578}
579EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
580
581void kvm_release_pfn_dirty(pfn_t pfn)
582{
583 kvm_set_pfn_dirty(pfn);
584 kvm_release_pfn_clean(pfn);
585}
586EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
587
588void kvm_set_page_dirty(struct page *page)
589{
590 kvm_set_pfn_dirty(page_to_pfn(page));
591}
592EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
593
594void kvm_set_pfn_dirty(pfn_t pfn)
595{
596 struct page *page = pfn_to_page(pfn);
496 if (!PageReserved(page)) 597 if (!PageReserved(page))
497 SetPageDirty(page); 598 SetPageDirty(page);
498 put_page(page);
499} 599}
500EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 600EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
601
602void kvm_set_pfn_accessed(pfn_t pfn)
603{
604 mark_page_accessed(pfn_to_page(pfn));
605}
606EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
607
608void kvm_get_pfn(pfn_t pfn)
609{
610 get_page(pfn_to_page(pfn));
611}
612EXPORT_SYMBOL_GPL(kvm_get_pfn);
501 613
502static int next_segment(unsigned long len, int offset) 614static int next_segment(unsigned long len, int offset)
503{ 615{
@@ -554,7 +666,9 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
554 addr = gfn_to_hva(kvm, gfn); 666 addr = gfn_to_hva(kvm, gfn);
555 if (kvm_is_error_hva(addr)) 667 if (kvm_is_error_hva(addr))
556 return -EFAULT; 668 return -EFAULT;
669 pagefault_disable();
557 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 670 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
671 pagefault_enable();
558 if (r) 672 if (r)
559 return -EFAULT; 673 return -EFAULT;
560 return 0; 674 return 0;
@@ -651,6 +765,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
651 * We will block until either an interrupt or a signal wakes us up 765 * We will block until either an interrupt or a signal wakes us up
652 */ 766 */
653 while (!kvm_cpu_has_interrupt(vcpu) 767 while (!kvm_cpu_has_interrupt(vcpu)
768 && !kvm_cpu_has_pending_timer(vcpu)
654 && !signal_pending(current) 769 && !signal_pending(current)
655 && !kvm_arch_vcpu_runnable(vcpu)) { 770 && !kvm_arch_vcpu_runnable(vcpu)) {
656 set_current_state(TASK_INTERRUPTIBLE); 771 set_current_state(TASK_INTERRUPTIBLE);
@@ -678,8 +793,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
678 793
679 if (vmf->pgoff == 0) 794 if (vmf->pgoff == 0)
680 page = virt_to_page(vcpu->run); 795 page = virt_to_page(vcpu->run);
796#ifdef CONFIG_X86
681 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 797 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
682 page = virt_to_page(vcpu->arch.pio_data); 798 page = virt_to_page(vcpu->arch.pio_data);
799#endif
683 else 800 else
684 return VM_FAULT_SIGBUS; 801 return VM_FAULT_SIGBUS;
685 get_page(page); 802 get_page(page);
@@ -701,11 +818,11 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
701{ 818{
702 struct kvm_vcpu *vcpu = filp->private_data; 819 struct kvm_vcpu *vcpu = filp->private_data;
703 820
704 fput(vcpu->kvm->filp); 821 kvm_put_kvm(vcpu->kvm);
705 return 0; 822 return 0;
706} 823}
707 824
708static struct file_operations kvm_vcpu_fops = { 825static const struct file_operations kvm_vcpu_fops = {
709 .release = kvm_vcpu_release, 826 .release = kvm_vcpu_release,
710 .unlocked_ioctl = kvm_vcpu_ioctl, 827 .unlocked_ioctl = kvm_vcpu_ioctl,
711 .compat_ioctl = kvm_vcpu_ioctl, 828 .compat_ioctl = kvm_vcpu_ioctl,
@@ -723,9 +840,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
723 840
724 r = anon_inode_getfd(&fd, &inode, &file, 841 r = anon_inode_getfd(&fd, &inode, &file,
725 "kvm-vcpu", &kvm_vcpu_fops, vcpu); 842 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
726 if (r) 843 if (r) {
844 kvm_put_kvm(vcpu->kvm);
727 return r; 845 return r;
728 atomic_inc(&vcpu->kvm->filp->f_count); 846 }
729 return fd; 847 return fd;
730} 848}
731 849
@@ -760,6 +878,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
760 mutex_unlock(&kvm->lock); 878 mutex_unlock(&kvm->lock);
761 879
762 /* Now it's all set up, let userspace reach it */ 880 /* Now it's all set up, let userspace reach it */
881 kvm_get_kvm(kvm);
763 r = create_vcpu_fd(vcpu); 882 r = create_vcpu_fd(vcpu);
764 if (r < 0) 883 if (r < 0)
765 goto unlink; 884 goto unlink;
@@ -802,28 +921,39 @@ static long kvm_vcpu_ioctl(struct file *filp,
802 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 921 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
803 break; 922 break;
804 case KVM_GET_REGS: { 923 case KVM_GET_REGS: {
805 struct kvm_regs kvm_regs; 924 struct kvm_regs *kvm_regs;
806 925
807 memset(&kvm_regs, 0, sizeof kvm_regs); 926 r = -ENOMEM;
808 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs); 927 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
809 if (r) 928 if (!kvm_regs)
810 goto out; 929 goto out;
930 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
931 if (r)
932 goto out_free1;
811 r = -EFAULT; 933 r = -EFAULT;
812 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs)) 934 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
813 goto out; 935 goto out_free1;
814 r = 0; 936 r = 0;
937out_free1:
938 kfree(kvm_regs);
815 break; 939 break;
816 } 940 }
817 case KVM_SET_REGS: { 941 case KVM_SET_REGS: {
818 struct kvm_regs kvm_regs; 942 struct kvm_regs *kvm_regs;
819 943
820 r = -EFAULT; 944 r = -ENOMEM;
821 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 945 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
946 if (!kvm_regs)
822 goto out; 947 goto out;
823 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs); 948 r = -EFAULT;
949 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
950 goto out_free2;
951 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
824 if (r) 952 if (r)
825 goto out; 953 goto out_free2;
826 r = 0; 954 r = 0;
955out_free2:
956 kfree(kvm_regs);
827 break; 957 break;
828 } 958 }
829 case KVM_GET_SREGS: { 959 case KVM_GET_SREGS: {
@@ -851,6 +981,30 @@ static long kvm_vcpu_ioctl(struct file *filp,
851 r = 0; 981 r = 0;
852 break; 982 break;
853 } 983 }
984 case KVM_GET_MP_STATE: {
985 struct kvm_mp_state mp_state;
986
987 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
988 if (r)
989 goto out;
990 r = -EFAULT;
991 if (copy_to_user(argp, &mp_state, sizeof mp_state))
992 goto out;
993 r = 0;
994 break;
995 }
996 case KVM_SET_MP_STATE: {
997 struct kvm_mp_state mp_state;
998
999 r = -EFAULT;
1000 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1001 goto out;
1002 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1003 if (r)
1004 goto out;
1005 r = 0;
1006 break;
1007 }
854 case KVM_TRANSLATE: { 1008 case KVM_TRANSLATE: {
855 struct kvm_translation tr; 1009 struct kvm_translation tr;
856 1010
@@ -1005,7 +1159,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1005 return 0; 1159 return 0;
1006} 1160}
1007 1161
1008static struct file_operations kvm_vm_fops = { 1162static const struct file_operations kvm_vm_fops = {
1009 .release = kvm_vm_release, 1163 .release = kvm_vm_release,
1010 .unlocked_ioctl = kvm_vm_ioctl, 1164 .unlocked_ioctl = kvm_vm_ioctl,
1011 .compat_ioctl = kvm_vm_ioctl, 1165 .compat_ioctl = kvm_vm_ioctl,
@@ -1024,12 +1178,10 @@ static int kvm_dev_ioctl_create_vm(void)
1024 return PTR_ERR(kvm); 1178 return PTR_ERR(kvm);
1025 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm); 1179 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1026 if (r) { 1180 if (r) {
1027 kvm_destroy_vm(kvm); 1181 kvm_put_kvm(kvm);
1028 return r; 1182 return r;
1029 } 1183 }
1030 1184
1031 kvm->filp = file;
1032
1033 return fd; 1185 return fd;
1034} 1186}
1035 1187
@@ -1059,7 +1211,15 @@ static long kvm_dev_ioctl(struct file *filp,
1059 r = -EINVAL; 1211 r = -EINVAL;
1060 if (arg) 1212 if (arg)
1061 goto out; 1213 goto out;
1062 r = 2 * PAGE_SIZE; 1214 r = PAGE_SIZE; /* struct kvm_run */
1215#ifdef CONFIG_X86
1216 r += PAGE_SIZE; /* pio data page */
1217#endif
1218 break;
1219 case KVM_TRACE_ENABLE:
1220 case KVM_TRACE_PAUSE:
1221 case KVM_TRACE_DISABLE:
1222 r = kvm_trace_ioctl(ioctl, arg);
1063 break; 1223 break;
1064 default: 1224 default:
1065 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1225 return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -1232,9 +1392,9 @@ static void kvm_init_debug(void)
1232{ 1392{
1233 struct kvm_stats_debugfs_item *p; 1393 struct kvm_stats_debugfs_item *p;
1234 1394
1235 debugfs_dir = debugfs_create_dir("kvm", NULL); 1395 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
1236 for (p = debugfs_entries; p->name; ++p) 1396 for (p = debugfs_entries; p->name; ++p)
1237 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir, 1397 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1238 (void *)(long)p->offset, 1398 (void *)(long)p->offset,
1239 stat_fops[p->kind]); 1399 stat_fops[p->kind]);
1240} 1400}
@@ -1245,7 +1405,7 @@ static void kvm_exit_debug(void)
1245 1405
1246 for (p = debugfs_entries; p->name; ++p) 1406 for (p = debugfs_entries; p->name; ++p)
1247 debugfs_remove(p->dentry); 1407 debugfs_remove(p->dentry);
1248 debugfs_remove(debugfs_dir); 1408 debugfs_remove(kvm_debugfs_dir);
1249} 1409}
1250 1410
1251static int kvm_suspend(struct sys_device *dev, pm_message_t state) 1411static int kvm_suspend(struct sys_device *dev, pm_message_t state)
@@ -1272,6 +1432,7 @@ static struct sys_device kvm_sysdev = {
1272}; 1432};
1273 1433
1274struct page *bad_page; 1434struct page *bad_page;
1435pfn_t bad_pfn;
1275 1436
1276static inline 1437static inline
1277struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 1438struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -1313,6 +1474,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
1313 goto out; 1474 goto out;
1314 } 1475 }
1315 1476
1477 bad_pfn = page_to_pfn(bad_page);
1478
1316 r = kvm_arch_hardware_setup(); 1479 r = kvm_arch_hardware_setup();
1317 if (r < 0) 1480 if (r < 0)
1318 goto out_free_0; 1481 goto out_free_0;
@@ -1386,6 +1549,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
1386 1549
1387void kvm_exit(void) 1550void kvm_exit(void)
1388{ 1551{
1552 kvm_trace_cleanup();
1389 misc_deregister(&kvm_dev); 1553 misc_deregister(&kvm_dev);
1390 kmem_cache_destroy(kvm_vcpu_cache); 1554 kmem_cache_destroy(kvm_vcpu_cache);
1391 sysdev_unregister(&kvm_sysdev); 1555 sysdev_unregister(&kvm_sysdev);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
new file mode 100644
index 000000000000..0e495470788d
--- /dev/null
+++ b/virt/kvm/kvm_trace.c
@@ -0,0 +1,276 @@
1/*
2 * kvm trace
3 *
4 * It is designed to allow debugging traces of kvm to be generated
5 * on UP / SMP machines. Each trace entry can be timestamped so that
6 * it's possible to reconstruct a chronological record of trace events.
7 * The implementation refers to blktrace kernel support.
8 *
9 * Copyright (c) 2008 Intel Corporation
10 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
11 *
12 * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
13 *
14 * Date: Feb 2008
15 */
16
17#include <linux/module.h>
18#include <linux/relay.h>
19#include <linux/debugfs.h>
20
21#include <linux/kvm_host.h>
22
23#define KVM_TRACE_STATE_RUNNING (1 << 0)
24#define KVM_TRACE_STATE_PAUSE (1 << 1)
25#define KVM_TRACE_STATE_CLEARUP (1 << 2)
26
27struct kvm_trace {
28 int trace_state;
29 struct rchan *rchan;
30 struct dentry *lost_file;
31 atomic_t lost_records;
32};
33static struct kvm_trace *kvm_trace;
34
35struct kvm_trace_probe {
36 const char *name;
37 const char *format;
38 u32 cycle_in;
39 marker_probe_func *probe_func;
40};
41
42static inline int calc_rec_size(int cycle, int extra)
43{
44 int rec_size = KVM_TRC_HEAD_SIZE;
45
46 rec_size += extra;
47 return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
48}
49
50static void kvm_add_trace(void *probe_private, void *call_data,
51 const char *format, va_list *args)
52{
53 struct kvm_trace_probe *p = probe_private;
54 struct kvm_trace *kt = kvm_trace;
55 struct kvm_trace_rec rec;
56 struct kvm_vcpu *vcpu;
57 int i, extra, size;
58
59 if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
60 return;
61
62 rec.event = va_arg(*args, u32);
63 vcpu = va_arg(*args, struct kvm_vcpu *);
64 rec.pid = current->tgid;
65 rec.vcpu_id = vcpu->vcpu_id;
66
67 extra = va_arg(*args, u32);
68 WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
69 extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
70 rec.extra_u32 = extra;
71
72 rec.cycle_in = p->cycle_in;
73
74 if (rec.cycle_in) {
75 u64 cycle = 0;
76
77 cycle = get_cycles();
78 rec.u.cycle.cycle_lo = (u32)cycle;
79 rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
80
81 for (i = 0; i < rec.extra_u32; i++)
82 rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
83 } else {
84 for (i = 0; i < rec.extra_u32; i++)
85 rec.u.nocycle.extra_u32[i] = va_arg(*args, u32);
86 }
87
88 size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32));
89 relay_write(kt->rchan, &rec, size);
90}
91
92static struct kvm_trace_probe kvm_trace_probes[] = {
93 { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
94 { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
95};
96
97static int lost_records_get(void *data, u64 *val)
98{
99 struct kvm_trace *kt = data;
100
101 *val = atomic_read(&kt->lost_records);
102 return 0;
103}
104
105DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
106
107/*
108 * The relay channel is used in "no-overwrite" mode, it keeps trace of how
109 * many times we encountered a full subbuffer, to tell user space app the
110 * lost records there were.
111 */
112static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
113 void *prev_subbuf, size_t prev_padding)
114{
115 struct kvm_trace *kt;
116
117 if (!relay_buf_full(buf))
118 return 1;
119
120 kt = buf->chan->private_data;
121 atomic_inc(&kt->lost_records);
122
123 return 0;
124}
125
126static struct dentry *kvm_create_buf_file_callack(const char *filename,
127 struct dentry *parent,
128 int mode,
129 struct rchan_buf *buf,
130 int *is_global)
131{
132 return debugfs_create_file(filename, mode, parent, buf,
133 &relay_file_operations);
134}
135
136static int kvm_remove_buf_file_callback(struct dentry *dentry)
137{
138 debugfs_remove(dentry);
139 return 0;
140}
141
142static struct rchan_callbacks kvm_relay_callbacks = {
143 .subbuf_start = kvm_subbuf_start_callback,
144 .create_buf_file = kvm_create_buf_file_callack,
145 .remove_buf_file = kvm_remove_buf_file_callback,
146};
147
148static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
149{
150 struct kvm_trace *kt;
151 int i, r = -ENOMEM;
152
153 if (!kuts->buf_size || !kuts->buf_nr)
154 return -EINVAL;
155
156 kt = kzalloc(sizeof(*kt), GFP_KERNEL);
157 if (!kt)
158 goto err;
159
160 r = -EIO;
161 atomic_set(&kt->lost_records, 0);
162 kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
163 kt, &kvm_trace_lost_ops);
164 if (!kt->lost_file)
165 goto err;
166
167 kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
168 kuts->buf_nr, &kvm_relay_callbacks, kt);
169 if (!kt->rchan)
170 goto err;
171
172 kvm_trace = kt;
173
174 for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
175 struct kvm_trace_probe *p = &kvm_trace_probes[i];
176
177 r = marker_probe_register(p->name, p->format, p->probe_func, p);
178 if (r)
179 printk(KERN_INFO "Unable to register probe %s\n",
180 p->name);
181 }
182
183 kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
184
185 return 0;
186err:
187 if (kt) {
188 if (kt->lost_file)
189 debugfs_remove(kt->lost_file);
190 if (kt->rchan)
191 relay_close(kt->rchan);
192 kfree(kt);
193 }
194 return r;
195}
196
197static int kvm_trace_enable(char __user *arg)
198{
199 struct kvm_user_trace_setup kuts;
200 int ret;
201
202 ret = copy_from_user(&kuts, arg, sizeof(kuts));
203 if (ret)
204 return -EFAULT;
205
206 ret = do_kvm_trace_enable(&kuts);
207 if (ret)
208 return ret;
209
210 return 0;
211}
212
213static int kvm_trace_pause(void)
214{
215 struct kvm_trace *kt = kvm_trace;
216 int r = -EINVAL;
217
218 if (kt == NULL)
219 return r;
220
221 if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
222 kt->trace_state = KVM_TRACE_STATE_PAUSE;
223 relay_flush(kt->rchan);
224 r = 0;
225 }
226
227 return r;
228}
229
230void kvm_trace_cleanup(void)
231{
232 struct kvm_trace *kt = kvm_trace;
233 int i;
234
235 if (kt == NULL)
236 return;
237
238 if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
239 kt->trace_state == KVM_TRACE_STATE_PAUSE) {
240
241 kt->trace_state = KVM_TRACE_STATE_CLEARUP;
242
243 for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
244 struct kvm_trace_probe *p = &kvm_trace_probes[i];
245 marker_probe_unregister(p->name, p->probe_func, p);
246 }
247
248 relay_close(kt->rchan);
249 debugfs_remove(kt->lost_file);
250 kfree(kt);
251 }
252}
253
254int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
255{
256 void __user *argp = (void __user *)arg;
257 long r = -EINVAL;
258
259 if (!capable(CAP_SYS_ADMIN))
260 return -EPERM;
261
262 switch (ioctl) {
263 case KVM_TRACE_ENABLE:
264 r = kvm_trace_enable(argp);
265 break;
266 case KVM_TRACE_PAUSE:
267 r = kvm_trace_pause();
268 break;
269 case KVM_TRACE_DISABLE:
270 r = 0;
271 kvm_trace_cleanup();
272 break;
273 }
274
275 return r;
276}