aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra98
-rw-r--r--Documentation/arm/SA1100/FreeBird4
-rw-r--r--Documentation/input/ntrig.txt126
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--Documentation/kvm/api.txt61
-rw-r--r--Documentation/kvm/ppc-pv.txt196
-rw-r--r--Documentation/kvm/timekeeping.txt612
-rw-r--r--Documentation/vm/numa_memory_policy.txt2
-rw-r--r--MAINTAINERS31
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/icst.c2
-rw-r--r--arch/arm/common/scoop.c12
-rw-r--r--arch/arm/common/uengine.c18
-rw-r--r--arch/arm/include/asm/hardware/icst.h2
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-omap1/Kconfig2
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c2
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/devices.c71
-rw-r--r--arch/arm/mach-omap2/hsmmc.h2
-rw-r--r--arch/arm/mach-s3c2440/mach-at2440evb.c2
-rw-r--r--arch/arm/mach-sa1100/Kconfig6
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1100.c2
-rw-r--r--arch/arm/nwfpe/milieu.h4
-rw-r--r--arch/arm/nwfpe/softfloat-macros4
-rw-r--r--arch/arm/nwfpe/softfloat-specialize4
-rw-r--r--arch/arm/nwfpe/softfloat.c4
-rw-r--r--arch/arm/nwfpe/softfloat.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/adc.h2
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c2
-rw-r--r--arch/h8300/Kconfig.cpu8
-rw-r--r--arch/h8300/README1
-rw-r--r--arch/ia64/kvm/lapic.h1
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m68k/include/asm/cacheflush_no.h2
-rw-r--r--arch/m68k/include/asm/coldfire.h4
-rw-r--r--arch/m68k/include/asm/gpio.h7
-rw-r--r--arch/m68k/include/asm/m548xgpt.h88
-rw-r--r--arch/m68k/include/asm/m548xsim.h55
-rw-r--r--arch/m68k/include/asm/mcfcache.h2
-rw-r--r--arch/m68k/include/asm/mcfsim.h2
-rw-r--r--arch/m68k/include/asm/mcfslt.h44
-rw-r--r--arch/m68k/include/asm/mcfuart.h9
-rw-r--r--arch/m68k/kernel/asm-offsets.c12
-rw-r--r--arch/m68k/mac/macboing.c3
-rw-r--r--arch/m68k/q40/README2
-rw-r--r--arch/m68knommu/Kconfig11
-rw-r--r--arch/m68knommu/Makefile3
-rw-r--r--arch/m68knommu/kernel/.gitignore1
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c11
-rw-r--r--arch/m68knommu/kernel/ptrace.c47
-rw-r--r--arch/m68knommu/kernel/setup.c3
-rw-r--r--arch/m68knommu/kernel/time.c13
-rw-r--r--arch/m68knommu/kernel/traps.c26
-rw-r--r--arch/m68knommu/platform/5206/Makefile4
-rw-r--r--arch/m68knommu/platform/5206e/Makefile4
-rw-r--r--arch/m68knommu/platform/520x/Makefile4
-rw-r--r--arch/m68knommu/platform/523x/Makefile4
-rw-r--r--arch/m68knommu/platform/5249/Makefile4
-rw-r--r--arch/m68knommu/platform/5272/Makefile4
-rw-r--r--arch/m68knommu/platform/5272/config.c16
-rw-r--r--arch/m68knommu/platform/5272/intc.c60
-rw-r--r--arch/m68knommu/platform/527x/Makefile4
-rw-r--r--arch/m68knommu/platform/528x/Makefile4
-rw-r--r--arch/m68knommu/platform/5307/Makefile4
-rw-r--r--arch/m68knommu/platform/532x/Makefile4
-rw-r--r--arch/m68knommu/platform/5407/Makefile4
-rw-r--r--arch/m68knommu/platform/548x/Makefile18
-rw-r--r--arch/m68knommu/platform/548x/config.c115
-rw-r--r--arch/m68knommu/platform/68328/entry.S36
-rw-r--r--arch/m68knommu/platform/68328/head-de2.S6
-rw-r--r--arch/m68knommu/platform/68328/head-ram.S27
-rw-r--r--arch/m68knommu/platform/68328/ints.c6
-rw-r--r--arch/m68knommu/platform/68360/entry.S13
-rw-r--r--arch/m68knommu/platform/68360/ints.c6
-rw-r--r--arch/m68knommu/platform/68VZ328/config.c5
-rw-r--r--arch/m68knommu/platform/coldfire/Makefile5
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S4
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c53
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c10
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c8
-rw-r--r--arch/m68knommu/platform/coldfire/sltimers.c145
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/math-emu/cp1emu.c1
-rw-r--r--arch/mips/math-emu/dp_add.c1
-rw-r--r--arch/mips/math-emu/dp_cmp.c1
-rw-r--r--arch/mips/math-emu/dp_div.c1
-rw-r--r--arch/mips/math-emu/dp_fint.c1
-rw-r--r--arch/mips/math-emu/dp_flong.c1
-rw-r--r--arch/mips/math-emu/dp_frexp.c1
-rw-r--r--arch/mips/math-emu/dp_fsp.c1
-rw-r--r--arch/mips/math-emu/dp_logb.c1
-rw-r--r--arch/mips/math-emu/dp_modf.c1
-rw-r--r--arch/mips/math-emu/dp_mul.c1
-rw-r--r--arch/mips/math-emu/dp_scalb.c1
-rw-r--r--arch/mips/math-emu/dp_simple.c1
-rw-r--r--arch/mips/math-emu/dp_sqrt.c1
-rw-r--r--arch/mips/math-emu/dp_sub.c1
-rw-r--r--arch/mips/math-emu/dp_tint.c1
-rw-r--r--arch/mips/math-emu/dp_tlong.c1
-rw-r--r--arch/mips/math-emu/ieee754.c1
-rw-r--r--arch/mips/math-emu/ieee754.h1
-rw-r--r--arch/mips/math-emu/ieee754d.c1
-rw-r--r--arch/mips/math-emu/ieee754dp.c1
-rw-r--r--arch/mips/math-emu/ieee754dp.h1
-rw-r--r--arch/mips/math-emu/ieee754int.h1
-rw-r--r--arch/mips/math-emu/ieee754m.c1
-rw-r--r--arch/mips/math-emu/ieee754sp.c1
-rw-r--r--arch/mips/math-emu/ieee754sp.h1
-rw-r--r--arch/mips/math-emu/ieee754xcpt.c1
-rw-r--r--arch/mips/math-emu/sp_add.c1
-rw-r--r--arch/mips/math-emu/sp_cmp.c1
-rw-r--r--arch/mips/math-emu/sp_div.c1
-rw-r--r--arch/mips/math-emu/sp_fdp.c1
-rw-r--r--arch/mips/math-emu/sp_fint.c1
-rw-r--r--arch/mips/math-emu/sp_flong.c1
-rw-r--r--arch/mips/math-emu/sp_frexp.c1
-rw-r--r--arch/mips/math-emu/sp_logb.c1
-rw-r--r--arch/mips/math-emu/sp_modf.c1
-rw-r--r--arch/mips/math-emu/sp_mul.c1
-rw-r--r--arch/mips/math-emu/sp_scalb.c1
-rw-r--r--arch/mips/math-emu/sp_simple.c1
-rw-r--r--arch/mips/math-emu/sp_sqrt.c1
-rw-r--r--arch/mips/math-emu/sp_sub.c1
-rw-r--r--arch/mips/math-emu/sp_tint.c1
-rw-r--r--arch/mips/math-emu/sp_tlong.c1
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c2
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c2
-rw-r--r--arch/powerpc/include/asm/hydra.h2
-rw-r--r--arch/powerpc/include/asm/kvm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h31
-rw-r--r--arch/powerpc/include/asm/kvm_host.h21
-rw-r--r--arch/powerpc/include/asm/kvm_para.h139
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c25
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/head_64.S6
-rw-r--r--arch/powerpc/kernel/kvm.c596
-rw-r--r--arch/powerpc/kernel/kvm_emul.S302
-rw-r--r--arch/powerpc/kvm/44x.c10
-rw-r--r--arch/powerpc/kvm/44x_tlb.c9
-rw-r--r--arch/powerpc/kvm/book3s.c272
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c111
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c75
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c74
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c73
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c140
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c11
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/booke.c108
-rw-r--r--arch/powerpc/kvm/booke.h10
-rw-r--r--arch/powerpc/kvm/booke_emulate.c14
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S3
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_tlb.c18
-rw-r--r--arch/powerpc/kvm/e500_tlb.h2
-rw-r--r--arch/powerpc/kvm/emulate.c36
-rw-r--r--arch/powerpc/kvm/powerpc.c88
-rw-r--r--arch/powerpc/kvm/trace.h239
-rw-r--r--arch/powerpc/platforms/Kconfig10
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/kvm_virtio.h7
-rw-r--r--arch/x86/include/asm/kvm_emulate.h30
-rw-r--r--arch/x86/include/asm/kvm_host.h81
-rw-r--r--arch/x86/include/asm/kvm_para.h6
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/pvclock.h38
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/kvmclock.c6
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_intel.c2
-rw-r--r--arch/x86/kernel/pvclock.c3
-rw-r--r--arch/x86/kvm/Kconfig7
-rw-r--r--arch/x86/kvm/emulate.c2262
-rw-r--r--arch/x86/kvm/i8254.c11
-rw-r--r--arch/x86/kvm/i8259.c25
-rw-r--r--arch/x86/kvm/irq.c9
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h9
-rw-r--r--arch/x86/kvm/lapic.c15
-rw-r--r--arch/x86/kvm/mmu.c918
-rw-r--r--arch/x86/kvm/mmu.h9
-rw-r--r--arch/x86/kvm/mmu_audit.c299
-rw-r--r--arch/x86/kvm/mmutrace.h19
-rw-r--r--arch/x86/kvm/paging_tmpl.h202
-rw-r--r--arch/x86/kvm/svm.c283
-rw-r--r--arch/x86/kvm/timer.c2
-rw-r--r--arch/x86/kvm/vmx.c219
-rw-r--r--arch/x86/kvm/x86.c780
-rw-r--r--arch/x86/kvm/x86.h8
-rw-r--r--arch/xtensa/include/asm/uaccess.h2
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/Kconfig21
-rw-r--r--crypto/cryptd.c206
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/apm-emulation.c4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c3
-rw-r--r--drivers/char/n_r3964.c1
-rw-r--r--drivers/char/pcmcia/Kconfig4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/stallion.c4
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm_infineon.c2
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/hifn_795x.c3
-rw-r--r--drivers/crypto/omap-aes.c948
-rw-r--r--drivers/crypto/omap-sham.c6
-rw-r--r--drivers/crypto/talitos.c29
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/edac_device_sysfs.c2
-rw-r--r--drivers/edac/i7300_edac.c1247
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/edd.c2
-rw-r--r--drivers/firmware/pcdp.h4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/hid/Kconfig91
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-3m-pct.c127
-rw-r--r--drivers/hid/hid-a4tech.c2
-rw-r--r--drivers/hid/hid-apple.c7
-rw-r--r--drivers/hid/hid-cherry.c7
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-cypress.c9
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-egalax.c16
-rw-r--r--drivers/hid/hid-elecom.c7
-rw-r--r--drivers/hid/hid-ids.h23
-rw-r--r--drivers/hid/hid-input.c87
-rw-r--r--drivers/hid/hid-kye.c7
-rw-r--r--drivers/hid/hid-lg.c49
-rw-r--r--drivers/hid/hid-lg.h6
-rw-r--r--drivers/hid/hid-lg2ff.c4
-rw-r--r--drivers/hid/hid-lg4ff.c136
-rw-r--r--drivers/hid/hid-magicmouse.c325
-rw-r--r--drivers/hid/hid-microsoft.c7
-rw-r--r--drivers/hid/hid-monterey.c7
-rw-r--r--drivers/hid/hid-ntrig.c69
-rw-r--r--drivers/hid/hid-ortek.c7
-rw-r--r--drivers/hid/hid-petalynx.c7
-rw-r--r--drivers/hid/hid-prodikeys.c7
-rw-r--r--drivers/hid/hid-roccat-pyra.c968
-rw-r--r--drivers/hid/hid-roccat-pyra.h186
-rw-r--r--drivers/hid/hid-samsung.c20
-rw-r--r--drivers/hid/hid-sony.c56
-rw-r--r--drivers/hid/hid-stantum.c2
-rw-r--r--drivers/hid/hid-sunplus.c7
-rw-r--r--drivers/hid/hid-uclogic.c623
-rw-r--r--drivers/hid/hid-waltop.c1099
-rw-r--r--drivers/hid/hid-zydacron.c7
-rw-r--r--drivers/hid/hidraw.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hid/usbhid/hid-quirks.c6
-rw-r--r--drivers/hid/usbhid/hiddev.c40
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c2
-rw-r--r--drivers/hwmon/f75375s.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/i2c/Kconfig3
-rw-r--r--drivers/i2c/Makefile4
-rw-r--r--drivers/i2c/algos/Kconfig12
-rw-r--r--drivers/i2c/algos/Makefile4
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c163
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c8
-rw-r--r--drivers/i2c/i2c-core.c45
-rw-r--r--drivers/i2c/i2c-dev.c13
-rw-r--r--drivers/i2c/muxes/Kconfig10
-rw-r--r--drivers/i2c/muxes/Makefile5
-rw-r--r--drivers/i2c/muxes/pca9541.c411
-rw-r--r--drivers/i2c/muxes/pca954x.c8
-rw-r--r--drivers/ide/hpt366.c2
-rw-r--r--drivers/ide/ht6560b.c1
-rw-r--r--drivers/ide/ide-disk.c3
-rw-r--r--drivers/infiniband/Kconfig4
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/mouse/Kconfig1
-rw-r--r--drivers/input/mouse/touchkit_ps2.c4
-rw-r--r--drivers/input/touchscreen/mk712.c2
-rw-r--r--drivers/isdn/i4l/isdn_audio.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c6
-rw-r--r--drivers/media/IR/keymaps/rc-manli.c1
-rw-r--r--drivers/media/dvb/ttpci/av7110.c9
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c2
-rw-r--r--drivers/media/dvb/ttpci/budget.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-typhoon.c3
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/cafe_ccic.c2
-rw-r--r--drivers/media/video/cx18/cx18-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/video/mxb.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_pas202bcb.c1
-rw-r--r--drivers/media/video/zoran/videocodec.h2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/misc/Kconfig6
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/maps/Kconfig4
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/igb/igb_main.c2
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/skfp/hwt.c2
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/Kconfig2
-rw-r--r--drivers/net/tulip/pnic2.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/hdlc.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/p54/Kconfig6
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/parisc/README.dino3
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/pnp/pnpbios/proc.c1
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/char/tape_3590.c6
-rw-r--r--drivers/s390/char/vmcp.c6
-rw-r--r--drivers/s390/kvm/kvm_virtio.c66
-rw-r--r--drivers/scsi/Kconfig9
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/serial/68328serial.h5
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/bfin_sport_uart.h2
-rw-r--r--drivers/serial/uartlite.c2
-rw-r--r--drivers/staging/asus_oled/README2
-rw-r--r--drivers/staging/asus_oled/asus_oled.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c2
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h2
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c2
-rw-r--r--drivers/staging/quickstart/quickstart.c3
-rw-r--r--drivers/uio/Kconfig6
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/f_audio.c2
-rw-r--r--drivers/usb/gadget/f_hid.c4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/imx21-hcd.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c9
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/u132-hcd.c8
-rw-r--r--drivers/usb/image/microtek.c9
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/mon/mon_main.c2
-rw-r--r--drivers/usb/serial/Kconfig4
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c11
-rw-r--r--drivers/usb/serial/io_ti.c4
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/keyspan.h2
-rw-r--r--drivers/usb/serial/keyspan_pda.c1
-rw-r--r--drivers/usb/serial/mct_u232.h9
-rw-r--r--drivers/usb/serial/mos7720.c6
-rw-r--r--drivers/usb/serial/mos7840.c7
-rw-r--r--drivers/usb/serial/omninet.c2
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/serial/spcp8x5.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c1
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/video/Kconfig10
-rw-r--r--drivers/video/arcfb.c1
-rw-r--r--drivers/video/aty/radeon_i2c.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c6
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/epson1355fb.c2
-rw-r--r--drivers/video/fbcvt.c2
-rw-r--r--drivers/video/i810/i810.h1
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c1
-rw-r--r--drivers/video/metronomefb.c2
-rw-r--r--drivers/video/savage/savagefb.h1
-rw-r--r--drivers/video/vesafb.c2
-rw-r--r--firmware/keyspan_pda/keyspan_pda.S2
-rw-r--r--firmware/keyspan_pda/xircom_pgs.S2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/exofs/inode.c14
-rw-r--r--fs/exofs/ios.c10
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/hostfs/hostfs.h7
-rw-r--r--fs/jfs/jfs_logmgr.c6
-rw-r--r--fs/jfs/jfs_mount.c4
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h2
-rw-r--r--fs/partitions/ldm.c2
-rw-r--r--fs/partitions/ldm.h2
-rw-r--r--fs/reiserfs/Kconfig6
-rw-r--r--fs/reiserfs/README2
-rw-r--r--fs/seq_file.c6
-rw-r--r--include/crypto/cryptd.h24
-rw-r--r--include/crypto/gf128mul.h4
-rw-r--r--include/linux/fdreg.h2
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hiddev.h4
-rw-r--r--include/linux/i2c.h12
-rw-r--r--include/linux/idr.h7
-rw-r--r--include/linux/if_infiniband.h2
-rw-r--r--include/linux/jhash.h2
-rw-r--r--include/linux/kvm.h12
-rw-r--r--include/linux/kvm_host.h22
-rw-r--r--include/linux/kvm_para.h7
-rw-r--r--include/linux/n_r3964.h1
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/pci_ids.h19
-rw-r--r--include/linux/slub_def.h14
-rw-r--r--include/video/vga.h2
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/pm_qos_params.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/idr.c13
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c788
-rw-r--r--mm/util.c13
-rw-r--r--mm/vmscan.c4
-rw-r--r--net/ax25/Kconfig8
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c2
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/wanrouter/wanmain.c4
-rw-r--r--sound/oss/ac97_codec.c7
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/intel8x0.c2
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c2
-rw-r--r--sound/soc/s3c24xx/neo1973_wm8753.c2
-rw-r--r--usr/Kconfig2
-rw-r--r--virt/kvm/irq_comm.c2
-rw-r--r--virt/kvm/kvm_main.c84
511 files changed, 15193 insertions, 3919 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
new file mode 100644
index 000000000000..ad1125b02ff4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
@@ -0,0 +1,98 @@
1What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_cpi
2Date: August 2010
3Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
4Description: It is possible to switch the cpi setting of the mouse with the
5 press of a button.
6 When read, this file returns the raw number of the actual cpi
7 setting reported by the mouse. This number has to be further
8 processed to receive the real dpi value.
9
10 VALUE DPI
11 1 400
12 2 800
13 4 1600
14
15 This file is readonly.
16
17What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
18Date: August 2010
19Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
20Description: When read, this file returns the number of the actual profile in
21 range 0-4.
22 This file is readonly.
23
24What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
25Date: August 2010
26Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
27Description: When read, this file returns the raw integer version number of the
28 firmware reported by the mouse. Using the integer value eases
29 further usage in other programs. To receive the real version
30 number the decimal point has to be shifted 2 positions to the
31 left. E.g. a returned value of 138 means 1.38
32 This file is readonly.
33
34What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile_settings
35Date: August 2010
36Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
37Description: The mouse can store 5 profiles which can be switched by the
38 press of a button. A profile is split in settings and buttons.
39 profile_settings holds informations like resolution, sensitivity
40 and light effects.
41 When written, this file lets one write the respective profile
42 settings back to the mouse. The data has to be 13 bytes long.
43 The mouse will reject invalid data.
44 Which profile to write is determined by the profile number
45 contained in the data.
46 This file is writeonly.
47
48What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]_settings
49Date: August 2010
50Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
51Description: The mouse can store 5 profiles which can be switched by the
52 press of a button. A profile is split in settings and buttons.
53 profile_settings holds informations like resolution, sensitivity
54 and light effects.
55 When read, these files return the respective profile settings.
56 The returned data is 13 bytes in size.
57 This file is readonly.
58
59What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile_buttons
60Date: August 2010
61Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
62Description: The mouse can store 5 profiles which can be switched by the
63 press of a button. A profile is split in settings and buttons.
64 profile_buttons holds informations about button layout.
65 When written, this file lets one write the respective profile
66 buttons back to the mouse. The data has to be 19 bytes long.
67 The mouse will reject invalid data.
68 Which profile to write is determined by the profile number
69 contained in the data.
70 This file is writeonly.
71
72What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]_buttons
73Date: August 2010
74Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
75Description: The mouse can store 5 profiles which can be switched by the
76 press of a button. A profile is split in settings and buttons.
77 profile_buttons holds informations about button layout.
78 When read, these files return the respective profile buttons.
79 The returned data is 19 bytes in size.
80 This file is readonly.
81
82What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
83Date: August 2010
84Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
85Description: The integer value of this attribute ranges from 0-4.
86 When read, this attribute returns the number of the profile
87 that's active when the mouse is powered on.
88 This file is readonly.
89
90What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
91Date: August 2010
92Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
93Description: When read, this file returns the settings stored in the mouse.
94 The size of the data is 3 bytes and holds information on the
95 startup_profile.
96 When written, this file lets write settings back to the mouse.
97 The data has to be 3 bytes long. The mouse will reject invalid
98 data.
diff --git a/Documentation/arm/SA1100/FreeBird b/Documentation/arm/SA1100/FreeBird
index fb23b770aaf4..ab9193663b2b 100644
--- a/Documentation/arm/SA1100/FreeBird
+++ b/Documentation/arm/SA1100/FreeBird
@@ -1,6 +1,6 @@
1Freebird-1.1 is produced by Legned(C) ,Inc. 1Freebird-1.1 is produced by Legend(C), Inc.
2http://web.archive.org/web/*/http://www.legend.com.cn 2http://web.archive.org/web/*/http://www.legend.com.cn
3and software/linux mainatined by Coventive(C),Inc. 3and software/linux maintained by Coventive(C), Inc.
4(http://www.coventive.com) 4(http://www.coventive.com)
5 5
6Based on the Nicolas's strongarm kernel tree. 6Based on the Nicolas's strongarm kernel tree.
diff --git a/Documentation/input/ntrig.txt b/Documentation/input/ntrig.txt
new file mode 100644
index 000000000000..be1fd981f73f
--- /dev/null
+++ b/Documentation/input/ntrig.txt
@@ -0,0 +1,126 @@
1N-Trig touchscreen Driver
2-------------------------
3 Copyright (c) 2008-2010 Rafi Rubin <rafi@seas.upenn.edu>
4 Copyright (c) 2009-2010 Stephane Chatty
5
6This driver provides support for N-Trig pen and multi-touch sensors. Single
7and multi-touch events are translated to the appropriate protocols for
8the hid and input systems. Pen events are sufficiently hid compliant and
9are left to the hid core. The driver also provides additional filtering
10and utility functions accessible with sysfs and module parameters.
11
12This driver has been reported to work properly with multiple N-Trig devices
13attached.
14
15
16Parameters
17----------
18
19Note: values set at load time are global and will apply to all applicable
20devices. Adjusting parameters with sysfs will override the load time values,
21but only for that one device.
22
23The following parameters are used to configure filters to reduce noise:
24
25activate_slack number of fingers to ignore before processing events
26
27activation_height size threshold to activate immediately
28activation_width
29
30min_height size threshold bellow which fingers are ignored
31min_width both to decide activation and during activity
32
33deactivate_slack the number of "no contact" frames to ignore before
34 propagating the end of activity events
35
36When the last finger is removed from the device, it sends a number of empty
37frames. By holding off on deactivation for a few frames we can tolerate false
38erroneous disconnects, where the sensor may mistakenly not detect a finger that
39is still present. Thus deactivate_slack addresses problems where a users might
40see breaks in lines during drawing, or drop an object during a long drag.
41
42
43Additional sysfs items
44----------------------
45
46These nodes just provide easy access to the ranges reported by the device.
47sensor_logical_height the range for positions reported during activity
48sensor_logical_width
49
50sensor_physical_height internal ranges not used for normal events but
51sensor_physical_width useful for tuning
52
53All N-Trig devices with product id of 1 report events in the ranges of
54X: 0-9600
55Y: 0-7200
56However not all of these devices have the same physical dimensions. Most
57seem to be 12" sensors (Dell Latitude XT and XT2 and the HP TX2), and
58at least one model (Dell Studio 17) has a 17" sensor. The ratio of physical
59to logical sizes is used to adjust the size based filter parameters.
60
61
62Filtering
63---------
64
65With the release of the early multi-touch firmwares it became increasingly
66obvious that these sensors were prone to erroneous events. Users reported
67seeing both inappropriately dropped contact and ghosts, contacts reported
68where no finger was actually touching the screen.
69
70Deactivation slack helps prevent dropped contact for single touch use, but does
71not address the problem of dropping one of more contacts while other contacts
72are still active. Drops in the multi-touch context require additional
73processing and should be handled in tandem with tacking.
74
75As observed ghost contacts are similar to actual use of the sensor, but they
76seem to have different profiles. Ghost activity typically shows up as small
77short lived touches. As such, I assume that the longer the continuous stream
78of events the more likely those events are from a real contact, and that the
79larger the size of each contact the more likely it is real. Balancing the
80goals of preventing ghosts and accepting real events quickly (to minimize
81user observable latency), the filter accumulates confidence for incoming
82events until it hits thresholds and begins propagating. In the interest in
83minimizing stored state as well as the cost of operations to make a decision,
84I've kept that decision simple.
85
86Time is measured in terms of the number of fingers reported, not frames since
87the probability of multiple simultaneous ghosts is expected to drop off
88dramatically with increasing numbers. Rather than accumulate weight as a
89function of size, I just use it as a binary threshold. A sufficiently large
90contact immediately overrides the waiting period and leads to activation.
91
92Setting the activation size thresholds to large values will result in deciding
93primarily on activation slack. If you see longer lived ghosts, turning up the
94activation slack while reducing the size thresholds may suffice to eliminate
95the ghosts while keeping the screen quite responsive to firm taps.
96
97Contacts continue to be filtered with min_height and min_width even after
98the initial activation filter is satisfied. The intent is to provide
99a mechanism for filtering out ghosts in the form of an extra finger while
100you actually are using the screen. In practice this sort of ghost has
101been far less problematic or relatively rare and I've left the defaults
102set to 0 for both parameters, effectively turning off that filter.
103
104I don't know what the optimal values are for these filters. If the defaults
105don't work for you, please play with the parameters. If you do find other
106values more comfortable, I would appreciate feedback.
107
108The calibration of these devices does drift over time. If ghosts or contact
109dropping worsen and interfere with the normal usage of your device, try
110recalibrating it.
111
112
113Calibration
114-----------
115
116The N-Trig windows tools provide calibration and testing routines. Also an
117unofficial unsupported set of user space tools including a calibrator is
118available at:
119http://code.launchpad.net/~rafi-seas/+junk/ntrig_calib
120
121
122Tracking
123--------
124
125As of yet, all tested N-Trig firmwares do not track fingers. When multiple
126contacts are active they seem to be sorted primarily by Y position.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4cd8b86e00ea..0b6815504e6d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1131,9 +1131,13 @@ and is between 256 and 4096 characters. It is defined in the file
1131 kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging. 1131 kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
1132 Default is 1 (enabled) 1132 Default is 1 (enabled)
1133 1133
1134 kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM. 1134 kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
1135 KVM MMU at runtime.
1135 Default is 0 (off) 1136 Default is 0 (off)
1136 1137
1138 kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
1139 Default is 1 (enabled)
1140
1137 kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU) 1141 kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU)
1138 for all guests. 1142 for all guests.
1139 Default is 1 (enabled) if in 64bit or 32bit-PAE mode 1143 Default is 1 (enabled) if in 64bit or 32bit-PAE mode
@@ -1698,6 +1702,8 @@ and is between 256 and 4096 characters. It is defined in the file
1698 1702
1699 nojitter [IA64] Disables jitter checking for ITC timers. 1703 nojitter [IA64] Disables jitter checking for ITC timers.
1700 1704
1705 no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
1706
1701 nolapic [X86-32,APIC] Do not enable or use the local APIC. 1707 nolapic [X86-32,APIC] Do not enable or use the local APIC.
1702 1708
1703 nolapic_timer [X86-32,APIC] Do not use the local APIC timer. 1709 nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
@@ -1718,7 +1724,7 @@ and is between 256 and 4096 characters. It is defined in the file
1718 norandmaps Don't use address space randomization. Equivalent to 1724 norandmaps Don't use address space randomization. Equivalent to
1719 echo 0 > /proc/sys/kernel/randomize_va_space 1725 echo 0 > /proc/sys/kernel/randomize_va_space
1720 1726
1721 noreplace-paravirt [X86-32,PV_OPS] Don't patch paravirt_ops 1727 noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops
1722 1728
1723 noreplace-smp [X86-32,SMP] Don't replace SMP instructions 1729 noreplace-smp [X86-32,SMP] Don't replace SMP instructions
1724 with UP alternatives 1730 with UP alternatives
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 5f5b64982b1a..b336266bea5e 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -320,13 +320,13 @@ struct kvm_translation {
3204.15 KVM_INTERRUPT 3204.15 KVM_INTERRUPT
321 321
322Capability: basic 322Capability: basic
323Architectures: x86 323Architectures: x86, ppc
324Type: vcpu ioctl 324Type: vcpu ioctl
325Parameters: struct kvm_interrupt (in) 325Parameters: struct kvm_interrupt (in)
326Returns: 0 on success, -1 on error 326Returns: 0 on success, -1 on error
327 327
328Queues a hardware interrupt vector to be injected. This is only 328Queues a hardware interrupt vector to be injected. This is only
329useful if in-kernel local APIC is not used. 329useful if in-kernel local APIC or equivalent is not used.
330 330
331/* for KVM_INTERRUPT */ 331/* for KVM_INTERRUPT */
332struct kvm_interrupt { 332struct kvm_interrupt {
@@ -334,8 +334,37 @@ struct kvm_interrupt {
334 __u32 irq; 334 __u32 irq;
335}; 335};
336 336
337X86:
338
337Note 'irq' is an interrupt vector, not an interrupt pin or line. 339Note 'irq' is an interrupt vector, not an interrupt pin or line.
338 340
341PPC:
342
343Queues an external interrupt to be injected. This ioctl is overleaded
344with 3 different irq values:
345
346a) KVM_INTERRUPT_SET
347
348 This injects an edge type external interrupt into the guest once it's ready
349 to receive interrupts. When injected, the interrupt is done.
350
351b) KVM_INTERRUPT_UNSET
352
353 This unsets any pending interrupt.
354
355 Only available with KVM_CAP_PPC_UNSET_IRQ.
356
357c) KVM_INTERRUPT_SET_LEVEL
358
359 This injects a level type external interrupt into the guest context. The
360 interrupt stays pending until a specific ioctl with KVM_INTERRUPT_UNSET
361 is triggered.
362
363 Only available with KVM_CAP_PPC_IRQ_LEVEL.
364
365Note that any value for 'irq' other than the ones stated above is invalid
366and incurs unexpected behavior.
367
3394.16 KVM_DEBUG_GUEST 3684.16 KVM_DEBUG_GUEST
340 369
341Capability: basic 370Capability: basic
@@ -1013,8 +1042,9 @@ number is just right, the 'nent' field is adjusted to the number of valid
1013entries in the 'entries' array, which is then filled. 1042entries in the 'entries' array, which is then filled.
1014 1043
1015The entries returned are the host cpuid as returned by the cpuid instruction, 1044The entries returned are the host cpuid as returned by the cpuid instruction,
1016with unknown or unsupported features masked out. The fields in each entry 1045with unknown or unsupported features masked out. Some features (for example,
1017are defined as follows: 1046x2apic), may not be present in the host cpu, but are exposed by kvm if it can
1047emulate them efficiently. The fields in each entry are defined as follows:
1018 1048
1019 function: the eax value used to obtain the entry 1049 function: the eax value used to obtain the entry
1020 index: the ecx value used to obtain the entry (for entries that are 1050 index: the ecx value used to obtain the entry (for entries that are
@@ -1032,6 +1062,29 @@ are defined as follows:
1032 eax, ebx, ecx, edx: the values returned by the cpuid instruction for 1062 eax, ebx, ecx, edx: the values returned by the cpuid instruction for
1033 this function/index combination 1063 this function/index combination
1034 1064
10654.46 KVM_PPC_GET_PVINFO
1066
1067Capability: KVM_CAP_PPC_GET_PVINFO
1068Architectures: ppc
1069Type: vm ioctl
1070Parameters: struct kvm_ppc_pvinfo (out)
1071Returns: 0 on success, !0 on error
1072
1073struct kvm_ppc_pvinfo {
1074 __u32 flags;
1075 __u32 hcall[4];
1076 __u8 pad[108];
1077};
1078
1079This ioctl fetches PV specific information that need to be passed to the guest
1080using the device tree or other means from vm context.
1081
1082For now the only implemented piece of information distributed here is an array
1083of 4 instructions that make up a hypercall.
1084
1085If any additional field gets added to this structure later on, a bit for that
1086additional piece of information will be set in the flags bitmap.
1087
10355. The kvm_run structure 10885. The kvm_run structure
1036 1089
1037Application code obtains a pointer to the kvm_run structure by 1090Application code obtains a pointer to the kvm_run structure by
diff --git a/Documentation/kvm/ppc-pv.txt b/Documentation/kvm/ppc-pv.txt
new file mode 100644
index 000000000000..a7f2244b3be9
--- /dev/null
+++ b/Documentation/kvm/ppc-pv.txt
@@ -0,0 +1,196 @@
1The PPC KVM paravirtual interface
2=================================
3
4The basic execution principle by which KVM on PowerPC works is to run all kernel
5space code in PR=1 which is user space. This way we trap all privileged
6instructions and can emulate them accordingly.
7
8Unfortunately that is also the downfall. There are quite some privileged
9instructions that needlessly return us to the hypervisor even though they
10could be handled differently.
11
12This is what the PPC PV interface helps with. It takes privileged instructions
13and transforms them into unprivileged ones with some help from the hypervisor.
14This cuts down virtualization costs by about 50% on some of my benchmarks.
15
16The code for that interface can be found in arch/powerpc/kernel/kvm*
17
18Querying for existence
19======================
20
21To find out if we're running on KVM or not, we leverage the device tree. When
22Linux is running on KVM, a node /hypervisor exists. That node contains a
23compatible property with the value "linux,kvm".
24
25Once you determined you're running under a PV capable KVM, you can now use
26hypercalls as described below.
27
28KVM hypercalls
29==============
30
31Inside the device tree's /hypervisor node there's a property called
32'hypercall-instructions'. This property contains at most 4 opcodes that make
33up the hypercall. To call a hypercall, just call these instructions.
34
35The parameters are as follows:
36
37 Register IN OUT
38
39 r0 - volatile
40 r3 1st parameter Return code
41 r4 2nd parameter 1st output value
42 r5 3rd parameter 2nd output value
43 r6 4th parameter 3rd output value
44 r7 5th parameter 4th output value
45 r8 6th parameter 5th output value
46 r9 7th parameter 6th output value
47 r10 8th parameter 7th output value
48 r11 hypercall number 8th output value
49 r12 - volatile
50
51Hypercall definitions are shared in generic code, so the same hypercall numbers
52apply for x86 and powerpc alike with the exception that each KVM hypercall
53also needs to be ORed with the KVM vendor code which is (42 << 16).
54
55Return codes can be as follows:
56
57 Code Meaning
58
59 0 Success
60 12 Hypercall not implemented
61 <0 Error
62
63The magic page
64==============
65
66To enable communication between the hypervisor and guest there is a new shared
67page that contains parts of supervisor visible register state. The guest can
68map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE.
69
70With this hypercall issued the guest always gets the magic page mapped at the
71desired location in effective and physical address space. For now, we always
72map the page to -4096. This way we can access it using absolute load and store
73functions. The following instruction reads the first field of the magic page:
74
75 ld rX, -4096(0)
76
77The interface is designed to be extensible should there be need later to add
78additional registers to the magic page. If you add fields to the magic page,
79also define a new hypercall feature to indicate that the host can give you more
80registers. Only if the host supports the additional features, make use of them.
81
82The magic page has the following layout as described in
83arch/powerpc/include/asm/kvm_para.h:
84
85struct kvm_vcpu_arch_shared {
86 __u64 scratch1;
87 __u64 scratch2;
88 __u64 scratch3;
89 __u64 critical; /* Guest may not get interrupts if == r1 */
90 __u64 sprg0;
91 __u64 sprg1;
92 __u64 sprg2;
93 __u64 sprg3;
94 __u64 srr0;
95 __u64 srr1;
96 __u64 dar;
97 __u64 msr;
98 __u32 dsisr;
99 __u32 int_pending; /* Tells the guest if we have an interrupt */
100};
101
102Additions to the page must only occur at the end. Struct fields are always 32
103or 64 bit aligned, depending on them being 32 or 64 bit wide respectively.
104
105Magic page features
106===================
107
108When mapping the magic page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE,
109a second return value is passed to the guest. This second return value contains
110a bitmap of available features inside the magic page.
111
112The following enhancements to the magic page are currently available:
113
114 KVM_MAGIC_FEAT_SR Maps SR registers r/w in the magic page
115
116For enhanced features in the magic page, please check for the existence of the
117feature before using them!
118
119MSR bits
120========
121
122The MSR contains bits that require hypervisor intervention and bits that do
123not require direct hypervisor intervention because they only get interpreted
124when entering the guest or don't have any impact on the hypervisor's behavior.
125
126The following bits are safe to be set inside the guest:
127
128 MSR_EE
129 MSR_RI
130 MSR_CR
131 MSR_ME
132
133If any other bit changes in the MSR, please still use mtmsr(d).
134
135Patched instructions
136====================
137
138The "ld" and "std" instructions are transormed to "lwz" and "stw" instructions
139respectively on 32 bit systems with an added offset of 4 to accomodate for big
140endianness.
141
142The following is a list of mapping the Linux kernel performs when running as
143guest. Implementing any of those mappings is optional, as the instruction traps
144also act on the shared page. So calling privileged instructions still works as
145before.
146
147From To
148==== ==
149
150mfmsr rX ld rX, magic_page->msr
151mfsprg rX, 0 ld rX, magic_page->sprg0
152mfsprg rX, 1 ld rX, magic_page->sprg1
153mfsprg rX, 2 ld rX, magic_page->sprg2
154mfsprg rX, 3 ld rX, magic_page->sprg3
155mfsrr0 rX ld rX, magic_page->srr0
156mfsrr1 rX ld rX, magic_page->srr1
157mfdar rX ld rX, magic_page->dar
158mfdsisr rX lwz rX, magic_page->dsisr
159
160mtmsr rX std rX, magic_page->msr
161mtsprg 0, rX std rX, magic_page->sprg0
162mtsprg 1, rX std rX, magic_page->sprg1
163mtsprg 2, rX std rX, magic_page->sprg2
164mtsprg 3, rX std rX, magic_page->sprg3
165mtsrr0 rX std rX, magic_page->srr0
166mtsrr1 rX std rX, magic_page->srr1
167mtdar rX std rX, magic_page->dar
168mtdsisr rX stw rX, magic_page->dsisr
169
170tlbsync nop
171
172mtmsrd rX, 0 b <special mtmsr section>
173mtmsr rX b <special mtmsr section>
174
175mtmsrd rX, 1 b <special mtmsrd section>
176
177[Book3S only]
178mtsrin rX, rY b <special mtsrin section>
179
180[BookE only]
181wrteei [0|1] b <special wrteei section>
182
183
184Some instructions require more logic to determine what's going on than a load
185or store instruction can deliver. To enable patching of those, we keep some
186RAM around where we can live translate instructions to. What happens is the
187following:
188
189 1) copy emulation code to memory
190 2) patch that code to fit the emulated instruction
191 3) patch that code to return to the original pc + 4
192 4) patch the original instruction to branch to the new code
193
194That way we can inject an arbitrary amount of code as replacement for a single
195instruction. This allows us to check for pending interrupts when setting EE=1
196for example.
diff --git a/Documentation/kvm/timekeeping.txt b/Documentation/kvm/timekeeping.txt
new file mode 100644
index 000000000000..0c5033a58c9e
--- /dev/null
+++ b/Documentation/kvm/timekeeping.txt
@@ -0,0 +1,612 @@
1
2 Timekeeping Virtualization for X86-Based Architectures
3
4 Zachary Amsden <zamsden@redhat.com>
5 Copyright (c) 2010, Red Hat. All rights reserved.
6
71) Overview
82) Timing Devices
93) TSC Hardware
104) Virtualization Problems
11
12=========================================================================
13
141) Overview
15
16One of the most complicated parts of the X86 platform, and specifically,
17the virtualization of this platform is the plethora of timing devices available
18and the complexity of emulating those devices. In addition, virtualization of
19time introduces a new set of challenges because it introduces a multiplexed
20division of time beyond the control of the guest CPU.
21
22First, we will describe the various timekeeping hardware available, then
23present some of the problems which arise and solutions available, giving
24specific recommendations for certain classes of KVM guests.
25
26The purpose of this document is to collect data and information relevant to
27timekeeping which may be difficult to find elsewhere, specifically,
28information relevant to KVM and hardware-based virtualization.
29
30=========================================================================
31
322) Timing Devices
33
34First we discuss the basic hardware devices available. TSC and the related
35KVM clock are special enough to warrant a full exposition and are described in
36the following section.
37
382.1) i8254 - PIT
39
40One of the first timer devices available is the programmable interrupt timer,
41or PIT. The PIT has a fixed frequency 1.193182 MHz base clock and three
42channels which can be programmed to deliver periodic or one-shot interrupts.
43These three channels can be configured in different modes and have individual
44counters. Channel 1 and 2 were not available for general use in the original
45IBM PC, and historically were connected to control RAM refresh and the PC
46speaker. Now the PIT is typically integrated as part of an emulated chipset
47and a separate physical PIT is not used.
48
49The PIT uses I/O ports 0x40 - 0x43. Access to the 16-bit counters is done
50using single or multiple byte access to the I/O ports. There are 6 modes
51available, but not all modes are available to all timers, as only timer 2
52has a connected gate input, required for modes 1 and 5. The gate line is
53controlled by port 61h, bit 0, as illustrated in the following diagram.
54
55 -------------- ----------------
56| | | |
57| 1.1932 MHz |---------->| CLOCK OUT | ---------> IRQ 0
58| Clock | | | |
59 -------------- | +->| GATE TIMER 0 |
60 | ----------------
61 |
62 | ----------------
63 | | |
64 |------>| CLOCK OUT | ---------> 66.3 KHZ DRAM
65 | | | (aka /dev/null)
66 | +->| GATE TIMER 1 |
67 | ----------------
68 |
69 | ----------------
70 | | |
71 |------>| CLOCK OUT | ---------> Port 61h, bit 5
72 | | |
73Port 61h, bit 0 ---------->| GATE TIMER 2 | \_.---- ____
74 ---------------- _| )--|LPF|---Speaker
75 / *---- \___/
76Port 61h, bit 1 -----------------------------------/
77
78The timer modes are now described.
79
80Mode 0: Single Timeout. This is a one-shot software timeout that counts down
81 when the gate is high (always true for timers 0 and 1). When the count
82 reaches zero, the output goes high.
83
84Mode 1: Triggered One-shot. The output is intially set high. When the gate
85 line is set high, a countdown is initiated (which does not stop if the gate is
86 lowered), during which the output is set low. When the count reaches zero,
87 the output goes high.
88
89Mode 2: Rate Generator. The output is initially set high. When the countdown
90 reaches 1, the output goes low for one count and then returns high. The value
91 is reloaded and the countdown automatically resumes. If the gate line goes
92 low, the count is halted. If the output is low when the gate is lowered, the
93 output automatically goes high (this only affects timer 2).
94
95Mode 3: Square Wave. This generates a high / low square wave. The count
96 determines the length of the pulse, which alternates between high and low
97 when zero is reached. The count only proceeds when gate is high and is
98 automatically reloaded on reaching zero. The count is decremented twice at
99 each clock to generate a full high / low cycle at the full periodic rate.
100 If the count is even, the clock remains high for N/2 counts and low for N/2
101 counts; if the clock is odd, the clock is high for (N+1)/2 counts and low
102 for (N-1)/2 counts. Only even values are latched by the counter, so odd
103 values are not observed when reading. This is the intended mode for timer 2,
104 which generates sine-like tones by low-pass filtering the square wave output.
105
106Mode 4: Software Strobe. After programming this mode and loading the counter,
107 the output remains high until the counter reaches zero. Then the output
108 goes low for 1 clock cycle and returns high. The counter is not reloaded.
109 Counting only occurs when gate is high.
110
111Mode 5: Hardware Strobe. After programming and loading the counter, the
112 output remains high. When the gate is raised, a countdown is initiated
113 (which does not stop if the gate is lowered). When the counter reaches zero,
114 the output goes low for 1 clock cycle and then returns high. The counter is
115 not reloaded.
116
117In addition to normal binary counting, the PIT supports BCD counting. The
118command port, 0x43 is used to set the counter and mode for each of the three
119timers.
120
121PIT commands, issued to port 0x43, using the following bit encoding:
122
123Bit 7-4: Command (See table below)
124Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined)
125Bit 0 : Binary (0) / BCD (1)
126
127Command table:
128
1290000 - Latch Timer 0 count for port 0x40
130 sample and hold the count to be read in port 0x40;
131 additional commands ignored until counter is read;
132 mode bits ignored.
133
1340001 - Set Timer 0 LSB mode for port 0x40
135 set timer to read LSB only and force MSB to zero;
136 mode bits set timer mode
137
1380010 - Set Timer 0 MSB mode for port 0x40
139 set timer to read MSB only and force LSB to zero;
140 mode bits set timer mode
141
1420011 - Set Timer 0 16-bit mode for port 0x40
143 set timer to read / write LSB first, then MSB;
144 mode bits set timer mode
145
1460100 - Latch Timer 1 count for port 0x41 - as described above
1470101 - Set Timer 1 LSB mode for port 0x41 - as described above
1480110 - Set Timer 1 MSB mode for port 0x41 - as described above
1490111 - Set Timer 1 16-bit mode for port 0x41 - as described above
150
1511000 - Latch Timer 2 count for port 0x42 - as described above
1521001 - Set Timer 2 LSB mode for port 0x42 - as described above
1531010 - Set Timer 2 MSB mode for port 0x42 - as described above
1541011 - Set Timer 2 16-bit mode for port 0x42 as described above
155
1561101 - General counter latch
157 Latch combination of counters into corresponding ports
158 Bit 3 = Counter 2
159 Bit 2 = Counter 1
160 Bit 1 = Counter 0
161 Bit 0 = Unused
162
1631110 - Latch timer status
164 Latch combination of counter mode into corresponding ports
165 Bit 3 = Counter 2
166 Bit 2 = Counter 1
167 Bit 1 = Counter 0
168
169 The output of ports 0x40-0x42 following this command will be:
170
171 Bit 7 = Output pin
172 Bit 6 = Count loaded (0 if timer has expired)
173 Bit 5-4 = Read / Write mode
174 01 = MSB only
175 10 = LSB only
176 11 = LSB / MSB (16-bit)
177 Bit 3-1 = Mode
178 Bit 0 = Binary (0) / BCD mode (1)
179
1802.2) RTC
181
182The second device which was available in the original PC was the MC146818 real
183time clock. The original device is now obsolete, and usually emulated by the
184system chipset, sometimes by an HPET and some frankenstein IRQ routing.
185
186The RTC is accessed through CMOS variables, which uses an index register to
187control which bytes are read. Since there is only one index register, read
188of the CMOS and read of the RTC require lock protection (in addition, it is
189dangerous to allow userspace utilities such as hwclock to have direct RTC
190access, as they could corrupt kernel reads and writes of CMOS memory).
191
192The RTC generates an interrupt which is usually routed to IRQ 8. The interrupt
193can function as a periodic timer, an additional once a day alarm, and can issue
194interrupts after an update of the CMOS registers by the MC146818 is complete.
195The type of interrupt is signalled in the RTC status registers.
196
197The RTC will update the current time fields by battery power even while the
198system is off. The current time fields should not be read while an update is
199in progress, as indicated in the status register.
200
201The clock uses a 32.768kHz crystal, so bits 6-4 of register A should be
202programmed to a 32kHz divider if the RTC is to count seconds.
203
204This is the RAM map originally used for the RTC/CMOS:
205
206Location Size Description
207------------------------------------------
20800h byte Current second (BCD)
20901h byte Seconds alarm (BCD)
21002h byte Current minute (BCD)
21103h byte Minutes alarm (BCD)
21204h byte Current hour (BCD)
21305h byte Hours alarm (BCD)
21406h byte Current day of week (BCD)
21507h byte Current day of month (BCD)
21608h byte Current month (BCD)
21709h byte Current year (BCD)
2180Ah byte Register A
219 bit 7 = Update in progress
220 bit 6-4 = Divider for clock
221 000 = 4.194 MHz
222 001 = 1.049 MHz
223 010 = 32 kHz
224 10X = test modes
225 110 = reset / disable
226 111 = reset / disable
227 bit 3-0 = Rate selection for periodic interrupt
228 000 = periodic timer disabled
229 001 = 3.90625 uS
230 010 = 7.8125 uS
231 011 = .122070 mS
232 100 = .244141 mS
233 ...
234 1101 = 125 mS
235 1110 = 250 mS
236 1111 = 500 mS
2370Bh byte Register B
238 bit 7 = Run (0) / Halt (1)
239 bit 6 = Periodic interrupt enable
240 bit 5 = Alarm interrupt enable
241 bit 4 = Update-ended interrupt enable
242 bit 3 = Square wave interrupt enable
243 bit 2 = BCD calendar (0) / Binary (1)
244 bit 1 = 12-hour mode (0) / 24-hour mode (1)
245 bit 0 = 0 (DST off) / 1 (DST enabled)
246OCh byte Register C (read only)
247 bit 7 = interrupt request flag (IRQF)
248 bit 6 = periodic interrupt flag (PF)
249 bit 5 = alarm interrupt flag (AF)
250 bit 4 = update interrupt flag (UF)
251 bit 3-0 = reserved
252ODh byte Register D (read only)
253 bit 7 = RTC has power
254 bit 6-0 = reserved
25532h byte Current century BCD (*)
256 (*) location vendor specific and now determined from ACPI global tables
257
2582.3) APIC
259
260On Pentium and later processors, an on-board timer is available to each CPU
261as part of the Advanced Programmable Interrupt Controller. The APIC is
262accessed through memory-mapped registers and provides interrupt service to each
263CPU, used for IPIs and local timer interrupts.
264
265Although in theory the APIC is a safe and stable source for local interrupts,
266in practice, many bugs and glitches have occurred due to the special nature of
267the APIC CPU-local memory-mapped hardware. Beware that CPU errata may affect
268the use of the APIC and that workarounds may be required. In addition, some of
269these workarounds pose unique constraints for virtualization - requiring either
270extra overhead incurred from extra reads of memory-mapped I/O or additional
271functionality that may be more computationally expensive to implement.
272
273Since the APIC is documented quite well in the Intel and AMD manuals, we will
274avoid repetition of the detail here. It should be pointed out that the APIC
275timer is programmed through the LVT (local vector timer) register, is capable
276of one-shot or periodic operation, and is based on the bus clock divided down
277by the programmable divider register.
278
2792.4) HPET
280
281HPET is quite complex, and was originally intended to replace the PIT / RTC
282support of the X86 PC. It remains to be seen whether that will be the case, as
283the de facto standard of PC hardware is to emulate these older devices. Some
284systems designated as legacy free may support only the HPET as a hardware timer
285device.
286
287The HPET spec is rather loose and vague, requiring at least 3 hardware timers,
288but allowing implementation freedom to support many more. It also imposes no
289fixed rate on the timer frequency, but does impose some extremal values on
290frequency, error and slew.
291
292In general, the HPET is recommended as a high precision (compared to PIT /RTC)
293time source which is independent of local variation (as there is only one HPET
294in any given system). The HPET is also memory-mapped, and its presence is
295indicated through ACPI tables by the BIOS.
296
297Detailed specification of the HPET is beyond the current scope of this
298document, as it is also very well documented elsewhere.
299
3002.5) Offboard Timers
301
302Several cards, both proprietary (watchdog boards) and commonplace (e1000) have
303timing chips built into the cards which may have registers which are accessible
304to kernel or user drivers. To the author's knowledge, using these to generate
305a clocksource for a Linux or other kernel has not yet been attempted and is in
306general frowned upon as not playing by the agreed rules of the game. Such a
307timer device would require additional support to be virtualized properly and is
308not considered important at this time as no known operating system does this.
309
310=========================================================================
311
3123) TSC Hardware
313
314The TSC or time stamp counter is relatively simple in theory; it counts
315instruction cycles issued by the processor, which can be used as a measure of
316time. In practice, due to a number of problems, it is the most complicated
317timekeeping device to use.
318
319The TSC is represented internally as a 64-bit MSR which can be read with the
320RDMSR, RDTSC, or RDTSCP (when available) instructions. In the past, hardware
321limitations made it possible to write the TSC, but generally on old hardware it
322was only possible to write the low 32-bits of the 64-bit counter, and the upper
32332-bits of the counter were cleared. Now, however, on Intel processors family
3240Fh, for models 3, 4 and 6, and family 06h, models e and f, this restriction
325has been lifted and all 64-bits are writable. On AMD systems, the ability to
326write the TSC MSR is not an architectural guarantee.
327
328The TSC is accessible from CPL-0 and conditionally, for CPL > 0 software by
329means of the CR4.TSD bit, which when enabled, disables CPL > 0 TSC access.
330
331Some vendors have implemented an additional instruction, RDTSCP, which returns
332atomically not just the TSC, but an indicator which corresponds to the
333processor number. This can be used to index into an array of TSC variables to
334determine offset information in SMP systems where TSCs are not synchronized.
335The presence of this instruction must be determined by consulting CPUID feature
336bits.
337
338Both VMX and SVM provide extension fields in the virtualization hardware which
339allows the guest visible TSC to be offset by a constant. Newer implementations
340promise to allow the TSC to additionally be scaled, but this hardware is not
341yet widely available.
342
3433.1) TSC synchronization
344
345The TSC is a CPU-local clock in most implementations. This means, on SMP
346platforms, the TSCs of different CPUs may start at different times depending
347on when the CPUs are powered on. Generally, CPUs on the same die will share
348the same clock, however, this is not always the case.
349
350The BIOS may attempt to resynchronize the TSCs during the poweron process and
351the operating system or other system software may attempt to do this as well.
352Several hardware limitations make the problem worse - if it is not possible to
353write the full 64-bits of the TSC, it may be impossible to match the TSC in
354newly arriving CPUs to that of the rest of the system, resulting in
355unsynchronized TSCs. This may be done by BIOS or system software, but in
356practice, getting a perfectly synchronized TSC will not be possible unless all
357values are read from the same clock, which generally only is possible on single
358socket systems or those with special hardware support.
359
3603.2) TSC and CPU hotplug
361
362As touched on already, CPUs which arrive later than the boot time of the system
363may not have a TSC value that is synchronized with the rest of the system.
364Either system software, BIOS, or SMM code may actually try to establish the TSC
365to a value matching the rest of the system, but a perfect match is usually not
366a guarantee. This can have the effect of bringing a system from a state where
367TSC is synchronized back to a state where TSC synchronization flaws, however
368small, may be exposed to the OS and any virtualization environment.
369
3703.3) TSC and multi-socket / NUMA
371
372Multi-socket systems, especially large multi-socket systems are likely to have
373individual clocksources rather than a single, universally distributed clock.
374Since these clocks are driven by different crystals, they will not have
375perfectly matched frequency, and temperature and electrical variations will
376cause the CPU clocks, and thus the TSCs to drift over time. Depending on the
377exact clock and bus design, the drift may or may not be fixed in absolute
378error, and may accumulate over time.
379
380In addition, very large systems may deliberately slew the clocks of individual
381cores. This technique, known as spread-spectrum clocking, reduces EMI at the
382clock frequency and harmonics of it, which may be required to pass FCC
383standards for telecommunications and computer equipment.
384
385It is recommended not to trust the TSCs to remain synchronized on NUMA or
386multiple socket systems for these reasons.
387
3883.4) TSC and C-states
389
390C-states, or idling states of the processor, especially C1E and deeper sleep
391states may be problematic for TSC as well. The TSC may stop advancing in such
392a state, resulting in a TSC which is behind that of other CPUs when execution
393is resumed. Such CPUs must be detected and flagged by the operating system
394based on CPU and chipset identifications.
395
396The TSC in such a case may be corrected by catching it up to a known external
397clocksource.
398
3993.5) TSC frequency change / P-states
400
401To make things slightly more interesting, some CPUs may change frequency. They
402may or may not run the TSC at the same rate, and because the frequency change
403may be staggered or slewed, at some points in time, the TSC rate may not be
404known other than falling within a range of values. In this case, the TSC will
405not be a stable time source, and must be calibrated against a known, stable,
406external clock to be a usable source of time.
407
408Whether the TSC runs at a constant rate or scales with the P-state is model
409dependent and must be determined by inspecting CPUID, chipset or vendor
410specific MSR fields.
411
412In addition, some vendors have known bugs where the P-state is actually
413compensated for properly during normal operation, but when the processor is
414inactive, the P-state may be raised temporarily to service cache misses from
415other processors. In such cases, the TSC on halted CPUs could advance faster
416than that of non-halted processors. AMD Turion processors are known to have
417this problem.
418
4193.6) TSC and STPCLK / T-states
420
421External signals given to the processor may also have the effect of stopping
422the TSC. This is typically done for thermal emergency power control to prevent
423an overheating condition, and typically, there is no way to detect that this
424condition has happened.
425
4263.7) TSC virtualization - VMX
427
428VMX provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
429instructions, which is enough for full virtualization of TSC in any manner. In
430addition, VMX allows passing through the host TSC plus an additional TSC_OFFSET
431field specified in the VMCS. Special instructions must be used to read and
432write the VMCS field.
433
4343.8) TSC virtualization - SVM
435
436SVM provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
437instructions, which is enough for full virtualization of TSC in any manner. In
438addition, SVM allows passing through the host TSC plus an additional offset
439field specified in the SVM control block.
440
4413.9) TSC feature bits in Linux
442
443In summary, there is no way to guarantee the TSC remains in perfect
444synchronization unless it is explicitly guaranteed by the architecture. Even
445if so, the TSCs in multi-sockets or NUMA systems may still run independently
446despite being locally consistent.
447
448The following feature bits are used by Linux to signal various TSC attributes,
449but they can only be taken to be meaningful for UP or single node systems.
450
451X86_FEATURE_TSC : The TSC is available in hardware
452X86_FEATURE_RDTSCP : The RDTSCP instruction is available
453X86_FEATURE_CONSTANT_TSC : The TSC rate is unchanged with P-states
454X86_FEATURE_NONSTOP_TSC : The TSC does not stop in C-states
455X86_FEATURE_TSC_RELIABLE : TSC sync checks are skipped (VMware)
456
4574) Virtualization Problems
458
459Timekeeping is especially problematic for virtualization because a number of
460challenges arise. The most obvious problem is that time is now shared between
461the host and, potentially, a number of virtual machines. Thus the virtual
462operating system does not run with 100% usage of the CPU, despite the fact that
463it may very well make that assumption. It may expect it to remain true to very
464exacting bounds when interrupt sources are disabled, but in reality only its
465virtual interrupt sources are disabled, and the machine may still be preempted
466at any time. This causes problems as the passage of real time, the injection
467of machine interrupts and the associated clock sources are no longer completely
468synchronized with real time.
469
470This same problem can occur on native harware to a degree, as SMM mode may
471steal cycles from the naturally on X86 systems when SMM mode is used by the
472BIOS, but not in such an extreme fashion. However, the fact that SMM mode may
473cause similar problems to virtualization makes it a good justification for
474solving many of these problems on bare metal.
475
4764.1) Interrupt clocking
477
478One of the most immediate problems that occurs with legacy operating systems
479is that the system timekeeping routines are often designed to keep track of
480time by counting periodic interrupts. These interrupts may come from the PIT
481or the RTC, but the problem is the same: the host virtualization engine may not
482be able to deliver the proper number of interrupts per second, and so guest
483time may fall behind. This is especially problematic if a high interrupt rate
484is selected, such as 1000 HZ, which is unfortunately the default for many Linux
485guests.
486
487There are three approaches to solving this problem; first, it may be possible
488to simply ignore it. Guests which have a separate time source for tracking
489'wall clock' or 'real time' may not need any adjustment of their interrupts to
490maintain proper time. If this is not sufficient, it may be necessary to inject
491additional interrupts into the guest in order to increase the effective
492interrupt rate. This approach leads to complications in extreme conditions,
493where host load or guest lag is too much to compensate for, and thus another
494solution to the problem has risen: the guest may need to become aware of lost
495ticks and compensate for them internally. Although promising in theory, the
496implementation of this policy in Linux has been extremely error prone, and a
497number of buggy variants of lost tick compensation are distributed across
498commonly used Linux systems.
499
500Windows uses periodic RTC clocking as a means of keeping time internally, and
501thus requires interrupt slewing to keep proper time. It does use a low enough
502rate (ed: is it 18.2 Hz?) however that it has not yet been a problem in
503practice.
504
5054.2) TSC sampling and serialization
506
507As the highest precision time source available, the cycle counter of the CPU
508has aroused much interest from developers. As explained above, this timer has
509many problems unique to its nature as a local, potentially unstable and
510potentially unsynchronized source. One issue which is not unique to the TSC,
511but is highlighted because of its very precise nature is sampling delay. By
512definition, the counter, once read is already old. However, it is also
513possible for the counter to be read ahead of the actual use of the result.
514This is a consequence of the superscalar execution of the instruction stream,
515which may execute instructions out of order. Such execution is called
516non-serialized. Forcing serialized execution is necessary for precise
517measurement with the TSC, and requires a serializing instruction, such as CPUID
518or an MSR read.
519
520Since CPUID may actually be virtualized by a trap and emulate mechanism, this
521serialization can pose a performance issue for hardware virtualization. An
522accurate time stamp counter reading may therefore not always be available, and
523it may be necessary for an implementation to guard against "backwards" reads of
524the TSC as seen from other CPUs, even in an otherwise perfectly synchronized
525system.
526
5274.3) Timespec aliasing
528
529Additionally, this lack of serialization from the TSC poses another challenge
530when using results of the TSC when measured against another time source. As
531the TSC is much higher precision, many possible values of the TSC may be read
532while another clock is still expressing the same value.
533
534That is, you may read (T,T+10) while external clock C maintains the same value.
535Due to non-serialized reads, you may actually end up with a range which
536fluctuates - from (T-1.. T+10). Thus, any time calculated from a TSC, but
537calibrated against an external value may have a range of valid values.
538Re-calibrating this computation may actually cause time, as computed after the
539calibration, to go backwards, compared with time computed before the
540calibration.
541
542This problem is particularly pronounced with an internal time source in Linux,
543the kernel time, which is expressed in the theoretically high resolution
544timespec - but which advances in much larger granularity intervals, sometimes
545at the rate of jiffies, and possibly in catchup modes, at a much larger step.
546
547This aliasing requires care in the computation and recalibration of kvmclock
548and any other values derived from TSC computation (such as TSC virtualization
549itself).
550
5514.4) Migration
552
553Migration of a virtual machine raises problems for timekeeping in two ways.
554First, the migration itself may take time, during which interrupts cannot be
555delivered, and after which, the guest time may need to be caught up. NTP may
556be able to help to some degree here, as the clock correction required is
557typically small enough to fall in the NTP-correctable window.
558
559An additional concern is that timers based off the TSC (or HPET, if the raw bus
560clock is exposed) may now be running at different rates, requiring compensation
561in some way in the hypervisor by virtualizing these timers. In addition,
562migrating to a faster machine may preclude the use of a passthrough TSC, as a
563faster clock cannot be made visible to a guest without the potential of time
564advancing faster than usual. A slower clock is less of a problem, as it can
565always be caught up to the original rate. KVM clock avoids these problems by
566simply storing multipliers and offsets against the TSC for the guest to convert
567back into nanosecond resolution values.
568
5694.5) Scheduling
570
571Since scheduling may be based on precise timing and firing of interrupts, the
572scheduling algorithms of an operating system may be adversely affected by
573virtualization. In theory, the effect is random and should be universally
574distributed, but in contrived as well as real scenarios (guest device access,
575causes of virtualization exits, possible context switch), this may not always
576be the case. The effect of this has not been well studied.
577
578In an attempt to work around this, several implementations have provided a
579paravirtualized scheduler clock, which reveals the true amount of CPU time for
580which a virtual machine has been running.
581
5824.6) Watchdogs
583
584Watchdog timers, such as the lock detector in Linux may fire accidentally when
585running under hardware virtualization due to timer interrupts being delayed or
586misinterpretation of the passage of real time. Usually, these warnings are
587spurious and can be ignored, but in some circumstances it may be necessary to
588disable such detection.
589
5904.7) Delays and precision timing
591
592Precise timing and delays may not be possible in a virtualized system. This
593can happen if the system is controlling physical hardware, or issues delays to
594compensate for slower I/O to and from devices. The first issue is not solvable
595in general for a virtualized system; hardware control software can't be
596adequately virtualized without a full real-time operating system, which would
597require an RT aware virtualization platform.
598
599The second issue may cause performance problems, but this is unlikely to be a
600significant issue. In many cases these delays may be eliminated through
601configuration or paravirtualization.
602
6034.8) Covert channels and leaks
604
605In addition to the above problems, time information will inevitably leak to the
606guest about the host in anything but a perfect implementation of virtualized
607time. This may allow the guest to infer the presence of a hypervisor (as in a
608red-pill type detection), and it may allow information to leak between guests
609by using CPU utilization itself as a signalling channel. Preventing such
610problems would require completely isolated virtual time which may not track
611real time any longer. This may be useful in certain security or QA contexts,
612but in general isn't recommended for real-world deployment scenarios.
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index 6690fc34ef6d..4e7da6543424 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -424,7 +424,7 @@ a command line tool, numactl(8), exists that allows one to:
424 424
425+ set the shared policy for a shared memory segment via mbind(2) 425+ set the shared policy for a shared memory segment via mbind(2)
426 426
427The numactl(8) tool is packages with the run-time version of the library 427The numactl(8) tool is packaged with the run-time version of the library
428containing the memory policy system call wrappers. Some distributions 428containing the memory policy system call wrappers. Some distributions
429package the headers and compile-time libraries in a separate development 429package the headers and compile-time libraries in a separate development
430package. 430package.
diff --git a/MAINTAINERS b/MAINTAINERS
index 494e1a07366a..69aa8fe060b3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2212,6 +2212,13 @@ W: bluesmoke.sourceforge.net
2212S: Maintained 2212S: Maintained
2213F: drivers/edac/i5400_edac.c 2213F: drivers/edac/i5400_edac.c
2214 2214
2215EDAC-I7300
2216M: Mauro Carvalho Chehab <mchehab@redhat.com>
2217L: linux-edac@vger.kernel.org
2218W: bluesmoke.sourceforge.net
2219S: Maintained
2220F: drivers/edac/i7300_edac.c
2221
2215EDAC-I7CORE 2222EDAC-I7CORE
2216M: Mauro Carvalho Chehab <mchehab@redhat.com> 2223M: Mauro Carvalho Chehab <mchehab@redhat.com>
2217L: linux-edac@vger.kernel.org 2224L: linux-edac@vger.kernel.org
@@ -3434,7 +3441,7 @@ F: scripts/package/
3434 3441
3435KERNEL JANITORS 3442KERNEL JANITORS
3436L: kernel-janitors@vger.kernel.org 3443L: kernel-janitors@vger.kernel.org
3437W: http://janitor.kernelnewbies.org/ 3444W: http://kernelnewbies.org/KernelJanitors
3438S: Odd Fixes 3445S: Odd Fixes
3439 3446
3440KERNEL NFSD, SUNRPC, AND LOCKD SERVERS 3447KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
@@ -4453,6 +4460,15 @@ L: linux-i2c@vger.kernel.org
4453S: Maintained 4460S: Maintained
4454F: drivers/i2c/busses/i2c-pasemi.c 4461F: drivers/i2c/busses/i2c-pasemi.c
4455 4462
4463PADATA PARALLEL EXECUTION MECHANISM
4464M: Steffen Klassert <steffen.klassert@secunet.com>
4465L: linux-kernel@vger.kernel.org
4466L: linux-crypto@vger.kernel.org
4467S: Maintained
4468F: kernel/padata.c
4469F: include/linux/padata.h
4470F: Documentation/padata.txt
4471
4456PANASONIC LAPTOP ACPI EXTRAS DRIVER 4472PANASONIC LAPTOP ACPI EXTRAS DRIVER
4457M: Harald Welte <laforge@gnumonks.org> 4473M: Harald Welte <laforge@gnumonks.org>
4458L: platform-driver-x86@vger.kernel.org 4474L: platform-driver-x86@vger.kernel.org
@@ -4532,6 +4548,12 @@ S: Maintained
4532F: drivers/leds/leds-pca9532.c 4548F: drivers/leds/leds-pca9532.c
4533F: include/linux/leds-pca9532.h 4549F: include/linux/leds-pca9532.h
4534 4550
4551PCA9541 I2C BUS MASTER SELECTOR DRIVER
4552M: Guenter Roeck <guenter.roeck@ericsson.com>
4553L: linux-i2c@vger.kernel.org
4554S: Maintained
4555F: drivers/i2c/muxes/pca9541.c
4556
4535PCA9564/PCA9665 I2C BUS DRIVER 4557PCA9564/PCA9665 I2C BUS DRIVER
4536M: Wolfram Sang <w.sang@pengutronix.de> 4558M: Wolfram Sang <w.sang@pengutronix.de>
4537L: linux-i2c@vger.kernel.org 4559L: linux-i2c@vger.kernel.org
@@ -4580,6 +4602,13 @@ L: netdev@vger.kernel.org
4580S: Maintained 4602S: Maintained
4581F: drivers/net/pcnet32.c 4603F: drivers/net/pcnet32.c
4582 4604
4605PCRYPT PARALLEL CRYPTO ENGINE
4606M: Steffen Klassert <steffen.klassert@secunet.com>
4607L: linux-crypto@vger.kernel.org
4608S: Maintained
4609F: crypto/pcrypt.c
4610F: include/crypto/pcrypt.h
4611
4583PER-TASK DELAY ACCOUNTING 4612PER-TASK DELAY ACCOUNTING
4584M: Balbir Singh <balbir@linux.vnet.ibm.com> 4613M: Balbir Singh <balbir@linux.vnet.ibm.com>
4585S: Maintained 4614S: Maintained
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3849887157e7..b64e465ac49c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1222,7 +1222,7 @@ config SMP
1222 1222
1223 See also <file:Documentation/i386/IO-APIC.txt>, 1223 See also <file:Documentation/i386/IO-APIC.txt>,
1224 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at 1224 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
1225 <http://www.linuxdoc.org/docs.html#howto>. 1225 <http://tldp.org/HOWTO/SMP-HOWTO.html>.
1226 1226
1227 If you don't know what to do here, say N. 1227 If you don't know what to do here, say N.
1228 1228
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 9a7f09cff300..2dc6da70ae59 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Support functions for calculating clocks/divisors for the ICST307 10 * Support functions for calculating clocks/divisors for the ICST307
11 * clock generators. See http://www.icst.com/ for more information 11 * clock generators. See http://www.idt.com/ for more information
12 * on these devices. 12 * on these devices.
13 * 13 *
14 * This is an almost identical implementation to the ICST525 clock generator. 14 * This is an almost identical implementation to the ICST525 clock generator.
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 9012004321dd..c11af1e4bad3 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -44,12 +44,12 @@ void reset_scoop(struct device *dev)
44{ 44{
45 struct scoop_dev *sdev = dev_get_drvdata(dev); 45 struct scoop_dev *sdev = dev_get_drvdata(dev);
46 46
47 iowrite16(0x0100, sdev->base + SCOOP_MCR); // 00 47 iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */
48 iowrite16(0x0000, sdev->base + SCOOP_CDR); // 04 48 iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */
49 iowrite16(0x0000, sdev->base + SCOOP_CCR); // 10 49 iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */
50 iowrite16(0x0000, sdev->base + SCOOP_IMR); // 18 50 iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */
51 iowrite16(0x00FF, sdev->base + SCOOP_IRM); // 14 51 iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */
52 iowrite16(0x0000, sdev->base + SCOOP_ISR); // 1C 52 iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */
53 iowrite16(0x0000, sdev->base + SCOOP_IRM); 53 iowrite16(0x0000, sdev->base + SCOOP_IRM);
54} 54}
55 55
diff --git a/arch/arm/common/uengine.c b/arch/arm/common/uengine.c
index b520e56216a9..bef408f3d76c 100644
--- a/arch/arm/common/uengine.c
+++ b/arch/arm/common/uengine.c
@@ -312,16 +312,16 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
312 b1 = (gpr_a[i] >> 8) & 0xff; 312 b1 = (gpr_a[i] >> 8) & 0xff;
313 b0 = gpr_a[i] & 0xff; 313 b0 = gpr_a[i] & 0xff;
314 314
315 // immed[@ai, (b1 << 8) | b0] 315 /* immed[@ai, (b1 << 8) | b0] */
316 // 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII 316 /* 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII */
317 ucode[offset++] = 0xf0; 317 ucode[offset++] = 0xf0;
318 ucode[offset++] = (b1 >> 4); 318 ucode[offset++] = (b1 >> 4);
319 ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6); 319 ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6);
320 ucode[offset++] = (b0 << 2); 320 ucode[offset++] = (b0 << 2);
321 ucode[offset++] = 0x80 | i; 321 ucode[offset++] = 0x80 | i;
322 322
323 // immed_w1[@ai, (b3 << 8) | b2] 323 /* immed_w1[@ai, (b3 << 8) | b2] */
324 // 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII 324 /* 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII */
325 ucode[offset++] = 0xf4; 325 ucode[offset++] = 0xf4;
326 ucode[offset++] = 0x40 | (b3 >> 4); 326 ucode[offset++] = 0x40 | (b3 >> 4);
327 ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6); 327 ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6);
@@ -340,16 +340,16 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
340 b1 = (gpr_b[i] >> 8) & 0xff; 340 b1 = (gpr_b[i] >> 8) & 0xff;
341 b0 = gpr_b[i] & 0xff; 341 b0 = gpr_b[i] & 0xff;
342 342
343 // immed[@bi, (b1 << 8) | b0] 343 /* immed[@bi, (b1 << 8) | b0] */
344 // 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV 344 /* 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV */
345 ucode[offset++] = 0xf0; 345 ucode[offset++] = 0xf0;
346 ucode[offset++] = (b1 >> 4); 346 ucode[offset++] = (b1 >> 4);
347 ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6); 347 ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6);
348 ucode[offset++] = (i << 2) | 0x03; 348 ucode[offset++] = (i << 2) | 0x03;
349 ucode[offset++] = b0; 349 ucode[offset++] = b0;
350 350
351 // immed_w1[@bi, (b3 << 8) | b2] 351 /* immed_w1[@bi, (b3 << 8) | b2] */
352 // 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV 352 /* 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV */
353 ucode[offset++] = 0xf4; 353 ucode[offset++] = 0xf4;
354 ucode[offset++] = 0x40 | (b3 >> 4); 354 ucode[offset++] = 0x40 | (b3 >> 4);
355 ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6); 355 ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6);
@@ -357,7 +357,7 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
357 ucode[offset++] = b2; 357 ucode[offset++] = b2;
358 } 358 }
359 359
360 // ctx_arb[kill] 360 /* ctx_arb[kill] */
361 ucode[offset++] = 0xe0; 361 ucode[offset++] = 0xe0;
362 ucode[offset++] = 0x00; 362 ucode[offset++] = 0x00;
363 ucode[offset++] = 0x01; 363 ucode[offset++] = 0x01;
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
index 10382a3dcec9..794220b087d2 100644
--- a/arch/arm/include/asm/hardware/icst.h
+++ b/arch/arm/include/asm/hardware/icst.h
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Support functions for calculating clocks/divisors for the ICST 10 * Support functions for calculating clocks/divisors for the ICST
11 * clock generators. See http://www.icst.com/ for more information 11 * clock generators. See http://www.idt.com/ for more information
12 * on these devices. 12 * on these devices.
13 */ 13 */
14#ifndef ASMARM_HARDWARE_ICST_H 14#ifndef ASMARM_HARDWARE_ICST_H
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 851e8139ef9d..abed4d15a7fd 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -109,7 +109,7 @@ config MACH_ONEARM
109 bool "Ajeco 1ARM Single Board Computer" 109 bool "Ajeco 1ARM Single Board Computer"
110 help 110 help
111 Select this if you are using Ajeco's 1ARM Single Board Computer. 111 Select this if you are using Ajeco's 1ARM Single Board Computer.
112 <http://www.ajeco.fi/products.htm> 112 <http://www.ajeco.fi/>
113 113
114config ARCH_AT91RM9200DK 114config ARCH_AT91RM9200DK
115 bool "Atmel AT91RM9200-DK Development board" 115 bool "Atmel AT91RM9200-DK Development board"
@@ -141,7 +141,7 @@ config MACH_CARMEVA
141 bool "Conitec ARM&EVA" 141 bool "Conitec ARM&EVA"
142 help 142 help
143 Select this if you are using Conitec's AT91RM9200-MCU-Module. 143 Select this if you are using Conitec's AT91RM9200-MCU-Module.
144 <http://www.conitec.net/english/linuxboard.htm> 144 <http://www.conitec.net/english/linuxboard.php>
145 145
146config MACH_ATEB9200 146config MACH_ATEB9200
147 bool "Embest ATEB9200" 147 bool "Embest ATEB9200"
@@ -153,7 +153,7 @@ config MACH_KB9200
153 bool "KwikByte KB920x" 153 bool "KwikByte KB920x"
154 help 154 help
155 Select this if you are using KwikByte's KB920x board. 155 Select this if you are using KwikByte's KB920x board.
156 <http://kwikbyte.com/KB9202_description_new.htm> 156 <http://www.kwikbyte.com/KB9202.html>
157 157
158config MACH_PICOTUX2XX 158config MACH_PICOTUX2XX
159 bool "picotux 200" 159 bool "picotux 200"
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 3b02d3b944af..5f6496375404 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -128,7 +128,7 @@ config MACH_OMAP_PALMTT
128 help 128 help
129 Support for the Palm Tungsten|T PDA. To boot the kernel, you'll 129 Support for the Palm Tungsten|T PDA. To boot the kernel, you'll
130 need a PalmOS compatible bootloader (Garux); check out 130 need a PalmOS compatible bootloader (Garux); check out
131 http://www.hackndev.com/palm/tt/ for more information. 131 http://garux.sourceforge.net/ for more information.
132 Say Y here if you have this PDA model, say N otherwise. 132 Say Y here if you have this PDA model, say N otherwise.
133 133
134config MACH_SX1 134config MACH_SX1
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 37d65d62ed8f..5f2066a6ba74 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1838,7 +1838,7 @@ static struct omap_clk omap2420_clks[] = {
1838 CLK(NULL, "des_ick", &des_ick, CK_242X), 1838 CLK(NULL, "des_ick", &des_ick, CK_242X),
1839 CLK("omap-sham", "ick", &sha_ick, CK_242X), 1839 CLK("omap-sham", "ick", &sha_ick, CK_242X),
1840 CLK("omap_rng", "ick", &rng_ick, CK_242X), 1840 CLK("omap_rng", "ick", &rng_ick, CK_242X),
1841 CLK(NULL, "aes_ick", &aes_ick, CK_242X), 1841 CLK("omap-aes", "ick", &aes_ick, CK_242X),
1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X),
1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X),
1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X), 1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index b33118fb6a87..701a1716019e 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1926,7 +1926,7 @@ static struct omap_clk omap2430_clks[] = {
1926 CLK(NULL, "des_ick", &des_ick, CK_243X), 1926 CLK(NULL, "des_ick", &des_ick, CK_243X),
1927 CLK("omap-sham", "ick", &sha_ick, CK_243X), 1927 CLK("omap-sham", "ick", &sha_ick, CK_243X),
1928 CLK("omap_rng", "ick", &rng_ick, CK_243X), 1928 CLK("omap_rng", "ick", &rng_ick, CK_243X),
1929 CLK(NULL, "aes_ick", &aes_ick, CK_243X), 1929 CLK("omap-aes", "ick", &aes_ick, CK_243X),
1930 CLK(NULL, "pka_ick", &pka_ick, CK_243X), 1930 CLK(NULL, "pka_ick", &pka_ick, CK_243X),
1931 CLK(NULL, "usb_fck", &usb_fck, CK_243X), 1931 CLK(NULL, "usb_fck", &usb_fck, CK_243X),
1932 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), 1932 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index dfdce2d82779..c73906d17458 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3288,7 +3288,7 @@ static struct omap_clk omap3xxx_clks[] = {
3288 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX), 3288 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX),
3289 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX), 3289 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX),
3290 CLK(NULL, "icr_ick", &icr_ick, CK_343X), 3290 CLK(NULL, "icr_ick", &icr_ick, CK_343X),
3291 CLK(NULL, "aes2_ick", &aes2_ick, CK_343X), 3291 CLK("omap-aes", "ick", &aes2_ick, CK_343X),
3292 CLK("omap-sham", "ick", &sha12_ick, CK_343X), 3292 CLK("omap-sham", "ick", &sha12_ick, CK_343X),
3293 CLK(NULL, "des2_ick", &des2_ick, CK_343X), 3293 CLK(NULL, "des2_ick", &des2_ick, CK_343X),
3294 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX), 3294 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 2dbb265bedd4..b27e7cbb3f29 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -498,6 +498,76 @@ static void omap_init_sham(void)
498static inline void omap_init_sham(void) { } 498static inline void omap_init_sham(void) { }
499#endif 499#endif
500 500
501#if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE)
502
503#ifdef CONFIG_ARCH_OMAP24XX
504static struct resource omap2_aes_resources[] = {
505 {
506 .start = OMAP24XX_SEC_AES_BASE,
507 .end = OMAP24XX_SEC_AES_BASE + 0x4C,
508 .flags = IORESOURCE_MEM,
509 },
510 {
511 .start = OMAP24XX_DMA_AES_TX,
512 .flags = IORESOURCE_DMA,
513 },
514 {
515 .start = OMAP24XX_DMA_AES_RX,
516 .flags = IORESOURCE_DMA,
517 }
518};
519static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources);
520#else
521#define omap2_aes_resources NULL
522#define omap2_aes_resources_sz 0
523#endif
524
525#ifdef CONFIG_ARCH_OMAP34XX
526static struct resource omap3_aes_resources[] = {
527 {
528 .start = OMAP34XX_SEC_AES_BASE,
529 .end = OMAP34XX_SEC_AES_BASE + 0x4C,
530 .flags = IORESOURCE_MEM,
531 },
532 {
533 .start = OMAP34XX_DMA_AES2_TX,
534 .flags = IORESOURCE_DMA,
535 },
536 {
537 .start = OMAP34XX_DMA_AES2_RX,
538 .flags = IORESOURCE_DMA,
539 }
540};
541static int omap3_aes_resources_sz = ARRAY_SIZE(omap3_aes_resources);
542#else
543#define omap3_aes_resources NULL
544#define omap3_aes_resources_sz 0
545#endif
546
547static struct platform_device aes_device = {
548 .name = "omap-aes",
549 .id = -1,
550};
551
552static void omap_init_aes(void)
553{
554 if (cpu_is_omap24xx()) {
555 aes_device.resource = omap2_aes_resources;
556 aes_device.num_resources = omap2_aes_resources_sz;
557 } else if (cpu_is_omap34xx()) {
558 aes_device.resource = omap3_aes_resources;
559 aes_device.num_resources = omap3_aes_resources_sz;
560 } else {
561 pr_err("%s: platform not supported\n", __func__);
562 return;
563 }
564 platform_device_register(&aes_device);
565}
566
567#else
568static inline void omap_init_aes(void) { }
569#endif
570
501/*-------------------------------------------------------------------------*/ 571/*-------------------------------------------------------------------------*/
502 572
503#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 573#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
@@ -854,6 +924,7 @@ static int __init omap2_init_devices(void)
854 omap_hdq_init(); 924 omap_hdq_init();
855 omap_init_sti(); 925 omap_init_sti();
856 omap_init_sham(); 926 omap_init_sham();
927 omap_init_aes();
857 omap_init_vout(); 928 omap_init_vout();
858 929
859 return 0; 930 return 0;
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 1fe6f0187177..0f8a2e6ee284 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -23,7 +23,7 @@ struct omap2_hsmmc_info {
23 char *name; /* or NULL for default */ 23 char *name; /* or NULL for default */
24 struct device *dev; /* returned: pointer to mmc adapter */ 24 struct device *dev; /* returned: pointer to mmc adapter */
25 int ocr_mask; /* temporary HACK */ 25 int ocr_mask; /* temporary HACK */
26 /* Remux (pad configuation) when powering on/off */ 26 /* Remux (pad configuration) when powering on/off */
27 void (*remux)(struct device *dev, int slot, int power_on); 27 void (*remux)(struct device *dev, int slot, int power_on);
28 /* init some special card */ 28 /* init some special card */
29 void (*init_card)(struct mmc_card *card); 29 void (*init_card)(struct mmc_card *card);
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index e3810c86a5e6..6c98b789b8c6 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -5,7 +5,7 @@
5 * and modifications by SBZ <sbz@spgui.org> and 5 * and modifications by SBZ <sbz@spgui.org> and
6 * Weibing <http://weibing.blogbus.com> 6 * Weibing <http://weibing.blogbus.com>
7 * 7 *
8 * For product information, visit http://www.arm9e.com/ 8 * For product information, visit http://www.arm.com/
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index fd4c52b7ccb6..5da8c35aa0de 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -90,8 +90,8 @@ config SA1100_JORNADA720
90 # FIXME: select CPU_FREQ_SA11x0 90 # FIXME: select CPU_FREQ_SA11x0
91 help 91 help
92 Say Y here if you want to build a kernel for the HP Jornada 720 92 Say Y here if you want to build a kernel for the HP Jornada 720
93 handheld computer. See <http://www.hp.com/jornada/products/720> 93 handheld computer. See
94 for details. 94 <http://h10025.www1.hp.com/ewfrf/wc/product?product=61677&cc=us&lc=en&dlc=en&product=61677#>
95 95
96config SA1100_JORNADA720_SSP 96config SA1100_JORNADA720_SSP
97 bool "HP Jornada 720 Extended SSP driver" 97 bool "HP Jornada 720 Extended SSP driver"
@@ -145,7 +145,7 @@ config SA1100_SIMPAD
145 FLASH. The SL4 version got 64 MB RAM and 32 MB FLASH and a 145 FLASH. The SL4 version got 64 MB RAM and 32 MB FLASH and a
146 PCMCIA-Slot. The version for the Germany Telecom (DTAG) is the same 146 PCMCIA-Slot. The version for the Germany Telecom (DTAG) is the same
147 like CL4 in additional it has a PCMCIA-Slot. For more information 147 like CL4 in additional it has a PCMCIA-Slot. For more information
148 visit <http://www.my-siemens.com/> or <http://www.siemens.ch/>. 148 visit <http://www.usa.siemens.com/> or <http://www.siemens.ch/>.
149 149
150config SA1100_SSP 150config SA1100_SSP
151 tristate "Generic PIO SSP" 151 tristate "Generic PIO SSP"
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c
index ef817876a5d6..c0a13ef5436f 100644
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ b/arch/arm/mach-sa1100/cpu-sa1100.c
@@ -13,7 +13,7 @@
13 * This software has been developed while working on the LART 13 * This software has been developed while working on the LART
14 * computing board (http://www.lartmaker.nl/), which is 14 * computing board (http://www.lartmaker.nl/), which is
15 * sponsored by the Mobile Multi-media Communications 15 * sponsored by the Mobile Multi-media Communications
16 * (http://www.mmc.tudelft.nl/) and Ubiquitous Communications 16 * (http://www.mobimedia.org/) and Ubiquitous Communications
17 * (http://www.ubicom.tudelft.nl/) projects. 17 * (http://www.ubicom.tudelft.nl/) projects.
18 * 18 *
19 * The authors can be reached at: 19 * The authors can be reached at:
diff --git a/arch/arm/nwfpe/milieu.h b/arch/arm/nwfpe/milieu.h
index a3892ab2dca4..09a4f2ddeb77 100644
--- a/arch/arm/nwfpe/milieu.h
+++ b/arch/arm/nwfpe/milieu.h
@@ -12,8 +12,8 @@ National Science Foundation under grant MIP-9311980. The original version
12of this code was written as part of a project to build a fixed-point vector 12of this code was written as part of a project to build a fixed-point vector
13processor in collaboration with the University of California at Berkeley, 13processor in collaboration with the University of California at Berkeley,
14overseen by Profs. Nelson Morgan and John Wawrzynek. More information 14overseen by Profs. Nelson Morgan and John Wawrzynek. More information
15is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ 15is available through the Web page
16arithmetic/softfloat.html'. 16http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt
17 17
18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
diff --git a/arch/arm/nwfpe/softfloat-macros b/arch/arm/nwfpe/softfloat-macros
index 5a060f95a58f..cf2a6173149e 100644
--- a/arch/arm/nwfpe/softfloat-macros
+++ b/arch/arm/nwfpe/softfloat-macros
@@ -12,8 +12,8 @@ National Science Foundation under grant MIP-9311980. The original version
12of this code was written as part of a project to build a fixed-point vector 12of this code was written as part of a project to build a fixed-point vector
13processor in collaboration with the University of California at Berkeley, 13processor in collaboration with the University of California at Berkeley,
14overseen by Profs. Nelson Morgan and John Wawrzynek. More information 14overseen by Profs. Nelson Morgan and John Wawrzynek. More information
15is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ 15is available through the web page
16arithmetic/softfloat.html'. 16http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt
17 17
18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
diff --git a/arch/arm/nwfpe/softfloat-specialize b/arch/arm/nwfpe/softfloat-specialize
index d4a4c8e06635..679a0269dd25 100644
--- a/arch/arm/nwfpe/softfloat-specialize
+++ b/arch/arm/nwfpe/softfloat-specialize
@@ -12,8 +12,8 @@ National Science Foundation under grant MIP-9311980. The original version
12of this code was written as part of a project to build a fixed-point vector 12of this code was written as part of a project to build a fixed-point vector
13processor in collaboration with the University of California at Berkeley, 13processor in collaboration with the University of California at Berkeley,
14overseen by Profs. Nelson Morgan and John Wawrzynek. More information 14overseen by Profs. Nelson Morgan and John Wawrzynek. More information
15is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ 15is available through the Web page
16arithmetic/softfloat.html'. 16http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt
17 17
18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c
index 0f9656e482ba..ffa6b438786b 100644
--- a/arch/arm/nwfpe/softfloat.c
+++ b/arch/arm/nwfpe/softfloat.c
@@ -11,8 +11,8 @@ National Science Foundation under grant MIP-9311980. The original version
11of this code was written as part of a project to build a fixed-point vector 11of this code was written as part of a project to build a fixed-point vector
12processor in collaboration with the University of California at Berkeley, 12processor in collaboration with the University of California at Berkeley,
13overseen by Profs. Nelson Morgan and John Wawrzynek. More information 13overseen by Profs. Nelson Morgan and John Wawrzynek. More information
14is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ 14is available through the web page
15arithmetic/softfloat.html'. 15http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt
16 16
17THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 17THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
18has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 18has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h
index 13e479c5da57..df4d243a2b7c 100644
--- a/arch/arm/nwfpe/softfloat.h
+++ b/arch/arm/nwfpe/softfloat.h
@@ -12,8 +12,8 @@ National Science Foundation under grant MIP-9311980. The original version
12of this code was written as part of a project to build a fixed-point vector 12of this code was written as part of a project to build a fixed-point vector
13processor in collaboration with the University of California at Berkeley, 13processor in collaboration with the University of California at Berkeley,
14overseen by Profs. Nelson Morgan and John Wawrzynek. More information 14overseen by Profs. Nelson Morgan and John Wawrzynek. More information
15is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ 15is available through the Web page
16arithmetic/softfloat.html'. 16http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt
17 17
18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 18THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 19has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
diff --git a/arch/arm/plat-samsung/include/plat/adc.h b/arch/arm/plat-samsung/include/plat/adc.h
index e8382c7be10b..b258a08de591 100644
--- a/arch/arm/plat-samsung/include/plat/adc.h
+++ b/arch/arm/plat-samsung/include/plat/adc.h
@@ -1,7 +1,7 @@
1/* arch/arm/plat-samsung/include/plat/adc.h 1/* arch/arm/plat-samsung/include/plat/adc.h
2 * 2 *
3 * Copyright (c) 2008 Simtec Electronics 3 * Copyright (c) 2008 Simtec Electronics
4 * http://armlinux.simnte.co.uk/ 4 * http://armlinux.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * S3C ADC driver information 7 * S3C ADC driver information
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 9ac87255a03a..f0dc5b8075a7 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -146,7 +146,7 @@ config BOARD_HAMMERHEAD
146 will cover even the most exceptional need of memory bandwidth. Together with the onboard 146 will cover even the most exceptional need of memory bandwidth. Together with the onboard
147 video decoder the board is ready for video processing. 147 video decoder the board is ready for video processing.
148 148
149 For more information see: http://www.miromico.com/hammerhead 149 For more information see: http://www.miromico.ch/index.php/hammerhead.html
150 150
151config BOARD_FAVR_32 151config BOARD_FAVR_32
152 bool "Favr-32 LCD-board" 152 bool "Favr-32 LCD-board"
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 07c132dc4125..df82723fb504 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -887,7 +887,7 @@ static struct adp5520_keys_platform_data adp5520_keys_data = {
887}; 887};
888 888
889 /* 889 /*
890 * ADP5520/5501 Multifuction Device Init Data 890 * ADP5520/5501 Multifunction Device Init Data
891 */ 891 */
892 892
893static struct adp5520_platform_data adp5520_pdev_data = { 893static struct adp5520_platform_data adp5520_pdev_data = {
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index c9e0e85629bf..cd2c797c8c9f 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -2015,7 +2015,7 @@ static struct adp5520_keys_platform_data adp5520_keys_data = {
2015}; 2015};
2016 2016
2017 /* 2017 /*
2018 * ADP5520/5501 Multifuction Device Init Data 2018 * ADP5520/5501 Multifunction Device Init Data
2019 */ 2019 */
2020 2020
2021static struct adp5520_platform_data adp5520_pdev_data = { 2021static struct adp5520_platform_data adp5520_pdev_data = {
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index 6e2ecff199c5..d236ab4232ca 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -17,7 +17,7 @@ config H8300H_AKI3068NET
17 help 17 help
18 AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support 18 AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support
19 More Information. (Japanese Only) 19 More Information. (Japanese Only)
20 <http://akizukidensi.com/catalog/h8.html> 20 <http://akizukidenshi.com/catalog/default.aspx>
21 AE-3068/69 Evaluation Board Support 21 AE-3068/69 Evaluation Board Support
22 More Information. 22 More Information.
23 <http://www.microtronique.com/ae3069lan.htm> 23 <http://www.microtronique.com/ae3069lan.htm>
@@ -36,7 +36,7 @@ config H8300H_SIM
36 help 36 help
37 GDB Simulator Support 37 GDB Simulator Support
38 More Information. 38 More Information.
39 arch/h8300/Doc/simulator.txt 39 <http://sourceware.org/sid/>
40 40
41config H8S_GENERIC 41config H8S_GENERIC
42 bool "H8S Generic" 42 bool "H8S Generic"
@@ -50,14 +50,14 @@ config H8S_EDOSK2674
50 Renesas EDOSK-2674 Evaluation Board Support 50 Renesas EDOSK-2674 Evaluation Board Support
51 More Information. 51 More Information.
52 <http://www.azpower.com/H8-uClinux/index.html> 52 <http://www.azpower.com/H8-uClinux/index.html>
53 <http://www.eu.renesas.com/tools/edk/support/edosk2674.html> 53 <http://www.renesas.eu/products/tools/introductory_evaluation_tools/evaluation_development_os_kits/edosk2674r/edosk2674r_software_tools_root.jsp>
54 54
55config H8S_SIM 55config H8S_SIM
56 bool "H8S Simulator" 56 bool "H8S Simulator"
57 help 57 help
58 GDB Simulator Support 58 GDB Simulator Support
59 More Information. 59 More Information.
60 arch/h8300/Doc/simulator.txt 60 <http://sourceware.org/sid/>
61 61
62endchoice 62endchoice
63 63
diff --git a/arch/h8300/README b/arch/h8300/README
index 2fd6f6d7a019..637f5a02f311 100644
--- a/arch/h8300/README
+++ b/arch/h8300/README
@@ -18,6 +18,7 @@ H8/300H and H8S
18 18
194.EDOSK2674 194.EDOSK2674
20 see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html 20 see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html
21 http://www.uclinux.org/pub/uClinux/ports/h8/HITACHI-EDOSK2674-HOWTO
21 http://www.azpower.com/H8-uClinux/ 22 http://www.azpower.com/H8-uClinux/
22 23
23* Toolchain Version 24* Toolchain Version
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index ee541cebcd78..c5f92a926a9a 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -25,5 +25,6 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
25int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); 25int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
26int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); 26int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
27#define kvm_apic_present(x) (true) 27#define kvm_apic_present(x) (true)
28#define kvm_lapic_enabled(x) (true)
28 29
29#endif 30#endif
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 836abbbc9c04..3867fd21f333 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -315,7 +315,7 @@ config SMP
315 Management" code will be disabled if you say Y here. 315 Management" code will be disabled if you say Y here.
316 316
317 See also the SMP-HOWTO available at 317 See also the SMP-HOWTO available at
318 <http://www.linuxdoc.org/docs.html#howto>. 318 <http://tldp.org/HOWTO/SMP-HOWTO.html>.
319 319
320 If you don't know what to do here, say N. 320 If you don't know what to do here, say N.
321 321
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 89f195656be7..7085bd51668b 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -29,7 +29,7 @@
29 29
30static inline void __flush_cache_all(void) 30static inline void __flush_cache_all(void)
31{ 31{
32#ifdef CONFIG_M5407 32#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
33 /* 33 /*
34 * Use cpushl to push and invalidate all cache lines. 34 * Use cpushl to push and invalidate all cache lines.
35 * Gas doesn't seem to know how to generate the ColdFire 35 * Gas doesn't seem to know how to generate the ColdFire
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index 83a9fa4e618a..3b0a34d0fe33 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -32,7 +32,9 @@
32 */ 32 */
33#define MCF_MBAR 0x10000000 33#define MCF_MBAR 0x10000000
34#define MCF_MBAR2 0x80000000 34#define MCF_MBAR2 0x80000000
35#if defined(CONFIG_M520x) 35#if defined(CONFIG_M548x)
36#define MCF_IPSBAR MCF_MBAR
37#elif defined(CONFIG_M520x)
36#define MCF_IPSBAR 0xFC000000 38#define MCF_IPSBAR 0xFC000000
37#else 39#else
38#define MCF_IPSBAR 0x40000000 40#define MCF_IPSBAR 0x40000000
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 283214dc65a7..1b57adbafad5 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -36,7 +36,8 @@
36 */ 36 */
37#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ 37#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
38 defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 38 defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
39 defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x) 39 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
40 defined(CONFIG_M532x) || defined(CONFIG_M548x)
40 41
41/* These parts have GPIO organized by 8 bit ports */ 42/* These parts have GPIO organized by 8 bit ports */
42 43
@@ -136,6 +137,8 @@ static inline u32 __mcf_gpio_ppdr(unsigned gpio)
136#endif 137#endif
137 else 138 else
138 return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START); 139 return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
140#else
141 return 0;
139#endif 142#endif
140} 143}
141 144
@@ -173,6 +176,8 @@ static inline u32 __mcf_gpio_podr(unsigned gpio)
173#endif 176#endif
174 else 177 else
175 return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START); 178 return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
179#else
180 return 0;
176#endif 181#endif
177} 182}
178 183
diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m548xgpt.h
new file mode 100644
index 000000000000..c8ef158a1c4e
--- /dev/null
+++ b/arch/m68k/include/asm/m548xgpt.h
@@ -0,0 +1,88 @@
1/*
2 * File: m548xgpt.h
3 * Purpose: Register and bit definitions for the MCF548X
4 *
5 * Notes:
6 *
7 */
8
9#ifndef m548xgpt_h
10#define m548xgpt_h
11
12/*********************************************************************
13*
14* General Purpose Timers (GPT)
15*
16*********************************************************************/
17
18/* Register read/write macros */
19#define MCF_GPT_GMS0 0x000800
20#define MCF_GPT_GCIR0 0x000804
21#define MCF_GPT_GPWM0 0x000808
22#define MCF_GPT_GSR0 0x00080C
23#define MCF_GPT_GMS1 0x000810
24#define MCF_GPT_GCIR1 0x000814
25#define MCF_GPT_GPWM1 0x000818
26#define MCF_GPT_GSR1 0x00081C
27#define MCF_GPT_GMS2 0x000820
28#define MCF_GPT_GCIR2 0x000824
29#define MCF_GPT_GPWM2 0x000828
30#define MCF_GPT_GSR2 0x00082C
31#define MCF_GPT_GMS3 0x000830
32#define MCF_GPT_GCIR3 0x000834
33#define MCF_GPT_GPWM3 0x000838
34#define MCF_GPT_GSR3 0x00083C
35#define MCF_GPT_GMS(x) (0x000800+((x)*0x010))
36#define MCF_GPT_GCIR(x) (0x000804+((x)*0x010))
37#define MCF_GPT_GPWM(x) (0x000808+((x)*0x010))
38#define MCF_GPT_GSR(x) (0x00080C+((x)*0x010))
39
40/* Bit definitions and macros for MCF_GPT_GMS */
41#define MCF_GPT_GMS_TMS(x) (((x)&0x00000007)<<0)
42#define MCF_GPT_GMS_GPIO(x) (((x)&0x00000003)<<4)
43#define MCF_GPT_GMS_IEN (0x00000100)
44#define MCF_GPT_GMS_OD (0x00000200)
45#define MCF_GPT_GMS_SC (0x00000400)
46#define MCF_GPT_GMS_CE (0x00001000)
47#define MCF_GPT_GMS_WDEN (0x00008000)
48#define MCF_GPT_GMS_ICT(x) (((x)&0x00000003)<<16)
49#define MCF_GPT_GMS_OCT(x) (((x)&0x00000003)<<20)
50#define MCF_GPT_GMS_OCPW(x) (((x)&0x000000FF)<<24)
51#define MCF_GPT_GMS_OCT_FRCLOW (0x00000000)
52#define MCF_GPT_GMS_OCT_PULSEHI (0x00100000)
53#define MCF_GPT_GMS_OCT_PULSELO (0x00200000)
54#define MCF_GPT_GMS_OCT_TOGGLE (0x00300000)
55#define MCF_GPT_GMS_ICT_ANY (0x00000000)
56#define MCF_GPT_GMS_ICT_RISE (0x00010000)
57#define MCF_GPT_GMS_ICT_FALL (0x00020000)
58#define MCF_GPT_GMS_ICT_PULSE (0x00030000)
59#define MCF_GPT_GMS_GPIO_INPUT (0x00000000)
60#define MCF_GPT_GMS_GPIO_OUTLO (0x00000020)
61#define MCF_GPT_GMS_GPIO_OUTHI (0x00000030)
62#define MCF_GPT_GMS_TMS_DISABLE (0x00000000)
63#define MCF_GPT_GMS_TMS_INCAPT (0x00000001)
64#define MCF_GPT_GMS_TMS_OUTCAPT (0x00000002)
65#define MCF_GPT_GMS_TMS_PWM (0x00000003)
66#define MCF_GPT_GMS_TMS_GPIO (0x00000004)
67
68/* Bit definitions and macros for MCF_GPT_GCIR */
69#define MCF_GPT_GCIR_CNT(x) (((x)&0x0000FFFF)<<0)
70#define MCF_GPT_GCIR_PRE(x) (((x)&0x0000FFFF)<<16)
71
72/* Bit definitions and macros for MCF_GPT_GPWM */
73#define MCF_GPT_GPWM_LOAD (0x00000001)
74#define MCF_GPT_GPWM_PWMOP (0x00000100)
75#define MCF_GPT_GPWM_WIDTH(x) (((x)&0x0000FFFF)<<16)
76
77/* Bit definitions and macros for MCF_GPT_GSR */
78#define MCF_GPT_GSR_CAPT (0x00000001)
79#define MCF_GPT_GSR_COMP (0x00000002)
80#define MCF_GPT_GSR_PWMP (0x00000004)
81#define MCF_GPT_GSR_TEXP (0x00000008)
82#define MCF_GPT_GSR_PIN (0x00000100)
83#define MCF_GPT_GSR_OVF(x) (((x)&0x00000007)<<12)
84#define MCF_GPT_GSR_CAPTURE(x) (((x)&0x0000FFFF)<<16)
85
86/********************************************************************/
87
88#endif /* m548xgpt_h */
diff --git a/arch/m68k/include/asm/m548xsim.h b/arch/m68k/include/asm/m548xsim.h
new file mode 100644
index 000000000000..149135ef30d2
--- /dev/null
+++ b/arch/m68k/include/asm/m548xsim.h
@@ -0,0 +1,55 @@
1/*
2 * m548xsim.h -- ColdFire 547x/548x System Integration Unit support.
3 */
4
5#ifndef m548xsim_h
6#define m548xsim_h
7
8#define MCFINT_VECBASE 64
9
10/*
11 * Interrupt Controller Registers
12 */
13#define MCFICM_INTC0 0x0700 /* Base for Interrupt Ctrl 0 */
14#define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */
15#define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */
16#define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */
17#define MCFINTC_IMRL 0x0c /* Interrupt mask 1-31 */
18#define MCFINTC_INTFRCH 0x10 /* Interrupt force 32-63 */
19#define MCFINTC_INTFRCL 0x14 /* Interrupt force 1-31 */
20#define MCFINTC_IRLR 0x18 /* */
21#define MCFINTC_IACKL 0x19 /* */
22#define MCFINTC_ICR0 0x40 /* Base ICR register */
23
24/*
25 * Define system peripheral IRQ usage.
26 */
27#define MCF_IRQ_TIMER (64 + 54) /* Slice Timer 0 */
28#define MCF_IRQ_PROFILER (64 + 53) /* Slice Timer 1 */
29
30/*
31 * Generic GPIO support
32 */
33#define MCFGPIO_PIN_MAX 0 /* I am too lazy to count */
34#define MCFGPIO_IRQ_MAX -1
35#define MCFGPIO_IRQ_VECBASE -1
36
37/*
38 * Some PSC related definitions
39 */
40#define MCF_PAR_PSC(x) (0x000A4F-((x)&0x3))
41#define MCF_PAR_SDA (0x0008)
42#define MCF_PAR_SCL (0x0004)
43#define MCF_PAR_PSC_TXD (0x04)
44#define MCF_PAR_PSC_RXD (0x08)
45#define MCF_PAR_PSC_RTS(x) (((x)&0x03)<<4)
46#define MCF_PAR_PSC_CTS(x) (((x)&0x03)<<6)
47#define MCF_PAR_PSC_CTS_GPIO (0x00)
48#define MCF_PAR_PSC_CTS_BCLK (0x80)
49#define MCF_PAR_PSC_CTS_CTS (0xC0)
50#define MCF_PAR_PSC_RTS_GPIO (0x00)
51#define MCF_PAR_PSC_RTS_FSYNC (0x20)
52#define MCF_PAR_PSC_RTS_RTS (0x30)
53#define MCF_PAR_PSC_CANRX (0x40)
54
55#endif /* m548xsim_h */
diff --git a/arch/m68k/include/asm/mcfcache.h b/arch/m68k/include/asm/mcfcache.h
index c042634fadaa..f49dfc09f70a 100644
--- a/arch/m68k/include/asm/mcfcache.h
+++ b/arch/m68k/include/asm/mcfcache.h
@@ -107,7 +107,7 @@
107.endm 107.endm
108#endif /* CONFIG_M532x */ 108#endif /* CONFIG_M532x */
109 109
110#if defined(CONFIG_M5407) 110#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
111/* 111/*
112 * Version 4 cores have a true harvard style separate instruction 112 * Version 4 cores have a true harvard style separate instruction
113 * and data cache. Invalidate and enable cache, also enable write 113 * and data cache. Invalidate and enable cache, also enable write
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index 9c70a67bf85f..6901fd68165b 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -41,6 +41,8 @@
41#elif defined(CONFIG_M5407) 41#elif defined(CONFIG_M5407)
42#include <asm/m5407sim.h> 42#include <asm/m5407sim.h>
43#include <asm/mcfintc.h> 43#include <asm/mcfintc.h>
44#elif defined(CONFIG_M548x)
45#include <asm/m548xsim.h>
44#endif 46#endif
45 47
46/****************************************************************************/ 48/****************************************************************************/
diff --git a/arch/m68k/include/asm/mcfslt.h b/arch/m68k/include/asm/mcfslt.h
new file mode 100644
index 000000000000..d0d0ecba5333
--- /dev/null
+++ b/arch/m68k/include/asm/mcfslt.h
@@ -0,0 +1,44 @@
1/****************************************************************************/
2
3/*
4 * mcfslt.h -- ColdFire internal Slice (SLT) timer support defines.
5 *
6 * (C) Copyright 2004, Greg Ungerer (gerg@snapgear.com)
7 * (C) Copyright 2009, Philippe De Muyter (phdm@macqel.be)
8 */
9
10/****************************************************************************/
11#ifndef mcfslt_h
12#define mcfslt_h
13/****************************************************************************/
14
15/*
16 * Get address specific defines for the 547x.
17 */
18#define MCFSLT_TIMER0 0x900 /* Base address of TIMER0 */
19#define MCFSLT_TIMER1 0x910 /* Base address of TIMER1 */
20
21
22/*
23 * Define the SLT timer register set addresses.
24 */
25#define MCFSLT_STCNT 0x00 /* Terminal count */
26#define MCFSLT_SCR 0x04 /* Control */
27#define MCFSLT_SCNT 0x08 /* Current count */
28#define MCFSLT_SSR 0x0C /* Status */
29
30/*
31 * Bit definitions for the SCR control register.
32 */
33#define MCFSLT_SCR_RUN 0x04000000 /* Run mode (continuous) */
34#define MCFSLT_SCR_IEN 0x02000000 /* Interrupt enable */
35#define MCFSLT_SCR_TEN 0x01000000 /* Timer enable */
36
37/*
38 * Bit definitions for the SSR status register.
39 */
40#define MCFSLT_SSR_BE 0x02000000 /* Bus error condition */
41#define MCFSLT_SSR_TE 0x01000000 /* Timeout condition */
42
43/****************************************************************************/
44#endif /* mcfslt_h */
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index 01a8716c5fc5..db72e2b889ca 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -47,6 +47,11 @@
47#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */ 47#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
48#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */ 48#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
49#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */ 49#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
50#elif defined(CONFIG_M548x)
51#define MCFUART_BASE1 0x8600 /* on M548x */
52#define MCFUART_BASE2 0x8700 /* on M548x */
53#define MCFUART_BASE3 0x8800 /* on M548x */
54#define MCFUART_BASE4 0x8900 /* on M548x */
50#endif 55#endif
51 56
52 57
@@ -212,7 +217,9 @@ struct mcf_platform_uart {
212#define MCFUART_URF_RXS 0xc0 /* Receiver status */ 217#define MCFUART_URF_RXS 0xc0 /* Receiver status */
213#endif 218#endif
214 219
215#if defined(CONFIG_M5272) 220#if defined(CONFIG_M548x)
221#define MCFUART_TXFIFOSIZE 512
222#elif defined(CONFIG_M5272)
216#define MCFUART_TXFIFOSIZE 25 223#define MCFUART_TXFIFOSIZE 25
217#else 224#else
218#define MCFUART_TXFIFOSIZE 1 225#define MCFUART_TXFIFOSIZE 1
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 73e5e581245b..78e59b82ebc3 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -22,13 +22,9 @@
22int main(void) 22int main(void)
23{ 23{
24 /* offsets into the task struct */ 24 /* offsets into the task struct */
25 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
26 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
27 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
28 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
29 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); 26 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
30 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 27 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
31 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
32#ifdef CONFIG_MMU 28#ifdef CONFIG_MMU
33 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); 29 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
34#endif 30#endif
@@ -64,14 +60,6 @@ int main(void)
64 /* bitfields are a bit difficult */ 60 /* bitfields are a bit difficult */
65 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); 61 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
66 62
67 /* offsets into the irq_handler struct */
68 DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
69 DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
70 DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
71
72 /* offsets into the kernel_stat struct */
73 DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
74
75 /* offsets into the irq_cpustat_t struct */ 63 /* offsets into the irq_cpustat_t struct */
76 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); 64 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
77 65
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index 05285d08e547..ffaa1f6439ae 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -114,7 +114,8 @@ static void mac_init_asc( void )
114 * 16-bit I/O functionality. The PowerBook 500 series computers 114 * 16-bit I/O functionality. The PowerBook 500 series computers
115 * support 16-bit stereo output, but only mono input." 115 * support 16-bit stereo output, but only mono input."
116 * 116 *
117 * http://til.info.apple.com/techinfo.nsf/artnum/n16405 117 * Technical Information Library (TIL) article number 16405.
118 * http://support.apple.com/kb/TA32601
118 * 119 *
119 * --David Kilzer 120 * --David Kilzer
120 */ 121 */
diff --git a/arch/m68k/q40/README b/arch/m68k/q40/README
index 6bdbf4879570..f877b7249790 100644
--- a/arch/m68k/q40/README
+++ b/arch/m68k/q40/README
@@ -3,7 +3,7 @@ Linux for the Q40
3 3
4You may try http://www.geocities.com/SiliconValley/Bay/2602/ for 4You may try http://www.geocities.com/SiliconValley/Bay/2602/ for
5some up to date information. Booter and other tools will be also 5some up to date information. Booter and other tools will be also
6available from this place or ftp.uni-erlangen.de/linux/680x0/q40/ 6available from this place or http://ftp.uni-erlangen.de/pub/unix/Linux/680x0/q40/
7and mirrors. 7and mirrors.
8 8
9Hints to documentation usually refer to the linux source tree in 9Hints to documentation usually refer to the linux source tree in
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 2609c394e1df..9287150e5fb0 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -59,6 +59,10 @@ config GENERIC_HARDIRQS
59 bool 59 bool
60 default y 60 default y
61 61
62config GENERIC_HARDIRQS_NO__DO_IRQ
63 bool
64 default y
65
62config GENERIC_CALIBRATE_DELAY 66config GENERIC_CALIBRATE_DELAY
63 bool 67 bool
64 default y 68 default y
@@ -171,6 +175,11 @@ config M5407
171 help 175 help
172 Motorola ColdFire 5407 processor support. 176 Motorola ColdFire 5407 processor support.
173 177
178config M548x
179 bool "MCF548x"
180 help
181 Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
182
174endchoice 183endchoice
175 184
176config M527x 185config M527x
@@ -181,7 +190,7 @@ config M527x
181 190
182config COLDFIRE 191config COLDFIRE
183 bool 192 bool
184 depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407) 193 depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M548x)
185 select GENERIC_GPIO 194 select GENERIC_GPIO
186 select ARCH_REQUIRE_GPIOLIB 195 select ARCH_REQUIRE_GPIOLIB
187 default y 196 default y
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 14042574ac21..026ef16fa68e 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -25,6 +25,7 @@ platform-$(CONFIG_M528x) := 528x
25platform-$(CONFIG_M5307) := 5307 25platform-$(CONFIG_M5307) := 5307
26platform-$(CONFIG_M532x) := 532x 26platform-$(CONFIG_M532x) := 532x
27platform-$(CONFIG_M5407) := 5407 27platform-$(CONFIG_M5407) := 5407
28platform-$(CONFIG_M548x) := 548x
28PLATFORM := $(platform-y) 29PLATFORM := $(platform-y)
29 30
30board-$(CONFIG_PILOT) := pilot 31board-$(CONFIG_PILOT) := pilot
@@ -73,6 +74,7 @@ cpuclass-$(CONFIG_M528x) := coldfire
73cpuclass-$(CONFIG_M5307) := coldfire 74cpuclass-$(CONFIG_M5307) := coldfire
74cpuclass-$(CONFIG_M532x) := coldfire 75cpuclass-$(CONFIG_M532x) := coldfire
75cpuclass-$(CONFIG_M5407) := coldfire 76cpuclass-$(CONFIG_M5407) := coldfire
77cpuclass-$(CONFIG_M548x) := coldfire
76cpuclass-$(CONFIG_M68328) := 68328 78cpuclass-$(CONFIG_M68328) := 68328
77cpuclass-$(CONFIG_M68EZ328) := 68328 79cpuclass-$(CONFIG_M68EZ328) := 68328
78cpuclass-$(CONFIG_M68VZ328) := 68328 80cpuclass-$(CONFIG_M68VZ328) := 68328
@@ -100,6 +102,7 @@ cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
100cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) 102cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
101cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307) 103cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
102cflags-$(CONFIG_M5407) := $(call cc-option,-m5407,-m5200) 104cflags-$(CONFIG_M5407) := $(call cc-option,-m5407,-m5200)
105cflags-$(CONFIG_M548x) := $(call cc-option,-m5407,-m5200)
103cflags-$(CONFIG_M68328) := -m68000 106cflags-$(CONFIG_M68328) := -m68000
104cflags-$(CONFIG_M68EZ328) := -m68000 107cflags-$(CONFIG_M68EZ328) := -m68000
105cflags-$(CONFIG_M68VZ328) := -m68000 108cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/kernel/.gitignore b/arch/m68knommu/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/m68knommu/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 24335022fa2c..ffe02f41ad46 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -21,14 +21,8 @@
21int main(void) 21int main(void)
22{ 22{
23 /* offsets into the task struct */ 23 /* offsets into the task struct */
24 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
25 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
26 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
27 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
28 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 24 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
29 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
30 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 25 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
31 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
32 26
33 /* offsets into the irq_cpustat_t struct */ 27 /* offsets into the irq_cpustat_t struct */
34 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); 28 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
@@ -63,7 +57,7 @@ int main(void)
63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); 57 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
64#else 58#else
65 /* bitfields are a bit difficult */ 59 /* bitfields are a bit difficult */
66 DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); 60 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
67#endif 61#endif
68 62
69 /* signal defines */ 63 /* signal defines */
@@ -75,11 +69,8 @@ int main(void)
75 DEFINE(PT_PTRACED, PT_PTRACED); 69 DEFINE(PT_PTRACED, PT_PTRACED);
76 70
77 /* Offsets in thread_info structure */ 71 /* Offsets in thread_info structure */
78 DEFINE(TI_TASK, offsetof(struct thread_info, task));
79 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
80 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 72 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
81 DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count)); 73 DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
82 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
83 74
84 return 0; 75 return 0;
85} 76}
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index f6be1248d216..6fe7c38cd556 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -18,6 +18,7 @@
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <linux/user.h> 19#include <linux/user.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/tracehook.h>
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <asm/page.h> 24#include <asm/page.h>
@@ -134,14 +135,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
134 tmp >>= 16; 135 tmp >>= 16;
135 } else if (addr >= 21 && addr < 49) { 136 } else if (addr >= 21 && addr < 49) {
136 tmp = child->thread.fp[addr - 21]; 137 tmp = child->thread.fp[addr - 21];
137#ifdef CONFIG_M68KFPU_EMU
138 /* Convert internal fpu reg representation
139 * into long double format
140 */
141 if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
142 tmp = ((tmp & 0xffff0000) << 15) |
143 ((tmp & 0x0000ffff) << 16);
144#endif
145 } else if (addr == 49) { 138 } else if (addr == 49) {
146 tmp = child->mm->start_code; 139 tmp = child->mm->start_code;
147 } else if (addr == 50) { 140 } else if (addr == 50) {
@@ -175,16 +168,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
175 } 168 }
176 if (addr >= 21 && addr < 48) 169 if (addr >= 21 && addr < 48)
177 { 170 {
178#ifdef CONFIG_M68KFPU_EMU
179 /* Convert long double format
180 * into internal fpu reg representation
181 */
182 if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
183 data = (unsigned long)data << 15;
184 data = (data & 0xffff0000) |
185 ((data & 0x0000ffff) >> 1);
186 }
187#endif
188 child->thread.fp[addr - 21] = data; 171 child->thread.fp[addr - 21] = data;
189 ret = 0; 172 ret = 0;
190 } 173 }
@@ -259,21 +242,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
259 return ret; 242 return ret;
260} 243}
261 244
262asmlinkage void syscall_trace(void) 245asmlinkage int syscall_trace_enter(void)
263{ 246{
264 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 247 int ret = 0;
265 return; 248
266 if (!(current->ptrace & PT_PTRACED)) 249 if (test_thread_flag(TIF_SYSCALL_TRACE))
267 return; 250 ret = tracehook_report_syscall_entry(task_pt_regs(current));
268 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 251 return ret;
269 ? 0x80 : 0)); 252}
270 /* 253
271 * this isn't the same as continuing with a signal, but it will do 254asmlinkage void syscall_trace_leave(void)
272 * for normal use. strace only continues with a signal if the 255{
273 * stopping signal is not SIGTRAP. -brl 256 if (test_thread_flag(TIF_SYSCALL_TRACE))
274 */ 257 tracehook_report_syscall_exit(task_pt_regs(current), 0);
275 if (current->exit_code) {
276 send_sig(current->exit_code, current, 1);
277 current->exit_code = 0;
278 }
279} 258}
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index ba92b90d5fbc..c684adf5dc40 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -54,9 +54,6 @@ void (*mach_reset)(void);
54void (*mach_halt)(void); 54void (*mach_halt)(void);
55void (*mach_power_off)(void); 55void (*mach_power_off)(void);
56 56
57#ifdef CONFIG_M68000
58 #define CPU "MC68000"
59#endif
60#ifdef CONFIG_M68328 57#ifdef CONFIG_M68328
61 #define CPU "MC68328" 58 #define CPU "MC68328"
62#endif 59#endif
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 7089dd9d843b..d6ac2a43453c 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -60,13 +60,16 @@ static unsigned long read_rtc_mmss(void)
60{ 60{
61 unsigned int year, mon, day, hour, min, sec; 61 unsigned int year, mon, day, hour, min, sec;
62 62
63 if (mach_gettod) 63 if (mach_gettod) {
64 mach_gettod(&year, &mon, &day, &hour, &min, &sec); 64 mach_gettod(&year, &mon, &day, &hour, &min, &sec);
65 else 65 if ((year += 1900) < 1970)
66 year = mon = day = hour = min = sec = 0; 66 year += 100;
67 } else {
68 year = 1970;
69 mon = day = 1;
70 hour = min = sec = 0;
71 }
67 72
68 if ((year += 1900) < 1970)
69 year += 100;
70 73
71 return mktime(year, mon, day, hour, min, sec); 74 return mktime(year, mon, day, hour, min, sec);
72} 75}
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index 3739c8f657d7..a768008dfd06 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -179,14 +179,16 @@ static void __show_stack(struct task_struct *task, unsigned long *stack)
179 179
180void bad_super_trap(struct frame *fp) 180void bad_super_trap(struct frame *fp)
181{ 181{
182 int vector = (fp->ptregs.vector >> 2) & 0xff;
183
182 console_verbose(); 184 console_verbose();
183 if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names)) 185 if (vector < ARRAY_SIZE(vec_names))
184 printk (KERN_WARNING "*** %s *** FORMAT=%X\n", 186 printk (KERN_WARNING "*** %s *** FORMAT=%X\n",
185 vec_names[(fp->ptregs.vector) >> 2], 187 vec_names[vector],
186 fp->ptregs.format); 188 fp->ptregs.format);
187 else 189 else
188 printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n", 190 printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
189 (fp->ptregs.vector) >> 2, 191 vector,
190 fp->ptregs.format); 192 fp->ptregs.format);
191 printk (KERN_WARNING "Current process id is %d\n", current->pid); 193 printk (KERN_WARNING "Current process id is %d\n", current->pid);
192 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); 194 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
@@ -195,10 +197,11 @@ void bad_super_trap(struct frame *fp)
195asmlinkage void trap_c(struct frame *fp) 197asmlinkage void trap_c(struct frame *fp)
196{ 198{
197 int sig; 199 int sig;
200 int vector = (fp->ptregs.vector >> 2) & 0xff;
198 siginfo_t info; 201 siginfo_t info;
199 202
200 if (fp->ptregs.sr & PS_S) { 203 if (fp->ptregs.sr & PS_S) {
201 if ((fp->ptregs.vector >> 2) == VEC_TRACE) { 204 if (vector == VEC_TRACE) {
202 /* traced a trapping instruction */ 205 /* traced a trapping instruction */
203 } else 206 } else
204 bad_super_trap(fp); 207 bad_super_trap(fp);
@@ -206,7 +209,7 @@ asmlinkage void trap_c(struct frame *fp)
206 } 209 }
207 210
208 /* send the appropriate signal to the user program */ 211 /* send the appropriate signal to the user program */
209 switch ((fp->ptregs.vector) >> 2) { 212 switch (vector) {
210 case VEC_ADDRERR: 213 case VEC_ADDRERR:
211 info.si_code = BUS_ADRALN; 214 info.si_code = BUS_ADRALN;
212 sig = SIGBUS; 215 sig = SIGBUS;
@@ -360,16 +363,3 @@ void show_stack(struct task_struct *task, unsigned long *stack)
360 else 363 else
361 __show_stack(task, stack); 364 __show_stack(task, stack);
362} 365}
363
364#ifdef CONFIG_M68KFPU_EMU
365asmlinkage void fpemu_signal(int signal, int code, void *addr)
366{
367 siginfo_t info;
368
369 info.si_signo = signal;
370 info.si_errno = 0;
371 info.si_code = code;
372 info.si_addr = addr;
373 force_sig_info(signal, &info, current);
374}
375#endif
diff --git a/arch/m68knommu/platform/5206/Makefile b/arch/m68knommu/platform/5206/Makefile
index 113c33390064..b5db05625cfa 100644
--- a/arch/m68knommu/platform/5206/Makefile
+++ b/arch/m68knommu/platform/5206/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5206e/Makefile b/arch/m68knommu/platform/5206e/Makefile
index 113c33390064..b5db05625cfa 100644
--- a/arch/m68knommu/platform/5206e/Makefile
+++ b/arch/m68knommu/platform/5206e/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/520x/Makefile b/arch/m68knommu/platform/520x/Makefile
index 435ab3483dc1..ad3f4e5a57ce 100644
--- a/arch/m68knommu/platform/520x/Makefile
+++ b/arch/m68knommu/platform/520x/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/523x/Makefile b/arch/m68knommu/platform/523x/Makefile
index b8f9b45440c2..c04b8f71c88c 100644
--- a/arch/m68knommu/platform/523x/Makefile
+++ b/arch/m68knommu/platform/523x/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5249/Makefile b/arch/m68knommu/platform/5249/Makefile
index f56225d1582f..4bed30fd0073 100644
--- a/arch/m68knommu/platform/5249/Makefile
+++ b/arch/m68knommu/platform/5249/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5272/Makefile b/arch/m68knommu/platform/5272/Makefile
index 93673ef8e2c1..34110fc14301 100644
--- a/arch/m68knommu/platform/5272/Makefile
+++ b/arch/m68knommu/platform/5272/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c
index 59278c0887d0..65bb582734e1 100644
--- a/arch/m68knommu/platform/5272/config.c
+++ b/arch/m68knommu/platform/5272/config.c
@@ -13,6 +13,8 @@
13#include <linux/param.h> 13#include <linux/param.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/phy.h>
17#include <linux/phy_fixed.h>
16#include <asm/machdep.h> 18#include <asm/machdep.h>
17#include <asm/coldfire.h> 19#include <asm/coldfire.h>
18#include <asm/mcfsim.h> 20#include <asm/mcfsim.h>
@@ -148,9 +150,23 @@ void __init config_BSP(char *commandp, int size)
148 150
149/***************************************************************************/ 151/***************************************************************************/
150 152
153/*
154 * Some 5272 based boards have the FEC ethernet diectly connected to
155 * an ethernet switch. In this case we need to use the fixed phy type,
156 * and we need to declare it early in boot.
157 */
158static struct fixed_phy_status nettel_fixed_phy_status __initdata = {
159 .link = 1,
160 .speed = 100,
161 .duplex = 0,
162};
163
164/***************************************************************************/
165
151static int __init init_BSP(void) 166static int __init init_BSP(void)
152{ 167{
153 m5272_uarts_init(); 168 m5272_uarts_init();
169 fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
154 platform_add_devices(m5272_devices, ARRAY_SIZE(m5272_devices)); 170 platform_add_devices(m5272_devices, ARRAY_SIZE(m5272_devices));
155 return 0; 171 return 0;
156} 172}
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c
index 7081e0a9720e..3cf681c177aa 100644
--- a/arch/m68knommu/platform/5272/intc.c
+++ b/arch/m68knommu/platform/5272/intc.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
15#include <linux/irq.h> 16#include <linux/irq.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <asm/coldfire.h> 18#include <asm/coldfire.h>
@@ -29,6 +30,10 @@
29 * via a set of 4 "Interrupt Controller Registers" (ICR). There is a 30 * via a set of 4 "Interrupt Controller Registers" (ICR). There is a
30 * loose mapping of vector number to register and internal bits, but 31 * loose mapping of vector number to register and internal bits, but
31 * a table is the easiest and quickest way to map them. 32 * a table is the easiest and quickest way to map them.
33 *
34 * Note that the external interrupts are edge triggered (unlike the
35 * internal interrupt sources which are level triggered). Which means
36 * they also need acknowledgeing via acknowledge bits.
32 */ 37 */
33struct irqmap { 38struct irqmap {
34 unsigned char icr; 39 unsigned char icr;
@@ -68,6 +73,11 @@ static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = {
68 /*MCF_IRQ_SWTO*/ { .icr = MCFSIM_ICR4, .index = 16, .ack = 0, }, 73 /*MCF_IRQ_SWTO*/ { .icr = MCFSIM_ICR4, .index = 16, .ack = 0, },
69}; 74};
70 75
76/*
77 * The act of masking the interrupt also has a side effect of 'ack'ing
78 * an interrupt on this irq (for the external irqs). So this mask function
79 * is also an ack_mask function.
80 */
71static void intc_irq_mask(unsigned int irq) 81static void intc_irq_mask(unsigned int irq)
72{ 82{
73 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { 83 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
@@ -95,7 +105,9 @@ static void intc_irq_ack(unsigned int irq)
95 irq -= MCFINT_VECBASE; 105 irq -= MCFINT_VECBASE;
96 if (intc_irqmap[irq].ack) { 106 if (intc_irqmap[irq].ack) {
97 u32 v; 107 u32 v;
98 v = 0xd << intc_irqmap[irq].index; 108 v = readl(MCF_MBAR + intc_irqmap[irq].icr);
109 v &= (0x7 << intc_irqmap[irq].index);
110 v |= (0x8 << intc_irqmap[irq].index);
99 writel(v, MCF_MBAR + intc_irqmap[irq].icr); 111 writel(v, MCF_MBAR + intc_irqmap[irq].icr);
100 } 112 }
101 } 113 }
@@ -103,21 +115,47 @@ static void intc_irq_ack(unsigned int irq)
103 115
104static int intc_irq_set_type(unsigned int irq, unsigned int type) 116static int intc_irq_set_type(unsigned int irq, unsigned int type)
105{ 117{
106 /* We can set the edge type here for external interrupts */ 118 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
119 irq -= MCFINT_VECBASE;
120 if (intc_irqmap[irq].ack) {
121 u32 v;
122 v = readl(MCF_MBAR + MCFSIM_PITR);
123 if (type == IRQ_TYPE_EDGE_FALLING)
124 v &= ~(0x1 << (32 - irq));
125 else
126 v |= (0x1 << (32 - irq));
127 writel(v, MCF_MBAR + MCFSIM_PITR);
128 }
129 }
107 return 0; 130 return 0;
108} 131}
109 132
133/*
134 * Simple flow handler to deal with the external edge triggered interrupts.
135 * We need to be careful with the masking/acking due to the side effects
136 * of masking an interrupt.
137 */
138static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
139{
140 kstat_incr_irqs_this_cpu(irq, desc);
141 desc->status |= IRQ_INPROGRESS;
142 desc->chip->ack(irq);
143 handle_IRQ_event(irq, desc->action);
144 desc->status &= ~IRQ_INPROGRESS;
145}
146
110static struct irq_chip intc_irq_chip = { 147static struct irq_chip intc_irq_chip = {
111 .name = "CF-INTC", 148 .name = "CF-INTC",
112 .mask = intc_irq_mask, 149 .mask = intc_irq_mask,
113 .unmask = intc_irq_unmask, 150 .unmask = intc_irq_unmask,
151 .mask_ack = intc_irq_mask,
114 .ack = intc_irq_ack, 152 .ack = intc_irq_ack,
115 .set_type = intc_irq_set_type, 153 .set_type = intc_irq_set_type,
116}; 154};
117 155
118void __init init_IRQ(void) 156void __init init_IRQ(void)
119{ 157{
120 int irq; 158 int irq, edge;
121 159
122 init_vectors(); 160 init_vectors();
123 161
@@ -128,11 +166,17 @@ void __init init_IRQ(void)
128 writel(0x88888888, MCF_MBAR + MCFSIM_ICR4); 166 writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
129 167
130 for (irq = 0; (irq < NR_IRQS); irq++) { 168 for (irq = 0; (irq < NR_IRQS); irq++) {
131 irq_desc[irq].status = IRQ_DISABLED; 169 set_irq_chip(irq, &intc_irq_chip);
132 irq_desc[irq].action = NULL; 170 edge = 0;
133 irq_desc[irq].depth = 1; 171 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX))
134 irq_desc[irq].chip = &intc_irq_chip; 172 edge = intc_irqmap[irq - MCFINT_VECBASE].ack;
135 intc_irq_set_type(irq, 0); 173 if (edge) {
174 set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
175 set_irq_handler(irq, intc_external_irq);
176 } else {
177 set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
178 set_irq_handler(irq, handle_level_irq);
179 }
136 } 180 }
137} 181}
138 182
diff --git a/arch/m68knommu/platform/527x/Makefile b/arch/m68knommu/platform/527x/Makefile
index 3d90e6d92459..6ac4b57370ea 100644
--- a/arch/m68knommu/platform/527x/Makefile
+++ b/arch/m68knommu/platform/527x/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/528x/Makefile b/arch/m68knommu/platform/528x/Makefile
index 3d90e6d92459..6ac4b57370ea 100644
--- a/arch/m68knommu/platform/528x/Makefile
+++ b/arch/m68knommu/platform/528x/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 6de526976828..d4293b791f2e 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/532x/Makefile b/arch/m68knommu/platform/532x/Makefile
index 4cc23245bcd1..ce01669399c6 100644
--- a/arch/m68knommu/platform/532x/Makefile
+++ b/arch/m68knommu/platform/532x/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5407/Makefile b/arch/m68knommu/platform/5407/Makefile
index dee62c5dbaa6..e83fe148eddc 100644
--- a/arch/m68knommu/platform/5407/Makefile
+++ b/arch/m68knommu/platform/5407/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/548x/Makefile b/arch/m68knommu/platform/548x/Makefile
new file mode 100644
index 000000000000..e6035e7a2d3f
--- /dev/null
+++ b/arch/m68knommu/platform/548x/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the m68knommu linux kernel.
3#
4
5#
6# If you want to play with the HW breakpoints then you will
7# need to add define this, which will give you a stack backtrace
8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt:
10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
13#
14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
16
17obj-y := config.o
18
diff --git a/arch/m68knommu/platform/548x/config.c b/arch/m68knommu/platform/548x/config.c
new file mode 100644
index 000000000000..9888846bd1cf
--- /dev/null
+++ b/arch/m68knommu/platform/548x/config.c
@@ -0,0 +1,115 @@
1/***************************************************************************/
2
3/*
4 * linux/arch/m68knommu/platform/548x/config.c
5 *
6 * Copyright (C) 2010, Philippe De Muyter <phdm@macqel.be>
7 */
8
9/***************************************************************************/
10
11#include <linux/kernel.h>
12#include <linux/param.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <asm/machdep.h>
17#include <asm/coldfire.h>
18#include <asm/m548xsim.h>
19#include <asm/mcfuart.h>
20#include <asm/m548xgpt.h>
21
22/***************************************************************************/
23
24static struct mcf_platform_uart m548x_uart_platform[] = {
25 {
26 .mapbase = MCF_MBAR + MCFUART_BASE1,
27 .irq = 64 + 35,
28 },
29 {
30 .mapbase = MCF_MBAR + MCFUART_BASE2,
31 .irq = 64 + 34,
32 },
33 {
34 .mapbase = MCF_MBAR + MCFUART_BASE3,
35 .irq = 64 + 33,
36 },
37 {
38 .mapbase = MCF_MBAR + MCFUART_BASE4,
39 .irq = 64 + 32,
40 },
41};
42
43static struct platform_device m548x_uart = {
44 .name = "mcfuart",
45 .id = 0,
46 .dev.platform_data = m548x_uart_platform,
47};
48
49static struct platform_device *m548x_devices[] __initdata = {
50 &m548x_uart,
51};
52
53
54/***************************************************************************/
55
56static void __init m548x_uart_init_line(int line, int irq)
57{
58 int rts_cts;
59
60 /* enable io pins */
61 switch (line) {
62 case 0:
63 rts_cts = 0; break;
64 case 1:
65 rts_cts = MCF_PAR_PSC_RTS_RTS; break;
66 case 2:
67 rts_cts = MCF_PAR_PSC_RTS_RTS | MCF_PAR_PSC_CTS_CTS; break;
68 case 3:
69 rts_cts = 0; break;
70 }
71 __raw_writeb(MCF_PAR_PSC_TXD | rts_cts | MCF_PAR_PSC_RXD,
72 MCF_MBAR + MCF_PAR_PSC(line));
73}
74
75static void __init m548x_uarts_init(void)
76{
77 const int nrlines = ARRAY_SIZE(m548x_uart_platform);
78 int line;
79
80 for (line = 0; (line < nrlines); line++)
81 m548x_uart_init_line(line, m548x_uart_platform[line].irq);
82}
83
84/***************************************************************************/
85
86static void mcf548x_reset(void)
87{
88 /* disable interrupts and enable the watchdog */
89 asm("movew #0x2700, %sr\n");
90 __raw_writel(0, MCF_MBAR + MCF_GPT_GMS0);
91 __raw_writel(MCF_GPT_GCIR_CNT(1), MCF_MBAR + MCF_GPT_GCIR0);
92 __raw_writel(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE | MCF_GPT_GMS_TMS(4),
93 MCF_MBAR + MCF_GPT_GMS0);
94}
95
96/***************************************************************************/
97
98void __init config_BSP(char *commandp, int size)
99{
100 mach_reset = mcf548x_reset;
101 m548x_uarts_init();
102}
103
104/***************************************************************************/
105
106static int __init init_BSP(void)
107{
108
109 platform_add_devices(m548x_devices, ARRAY_SIZE(m548x_devices));
110 return 0;
111}
112
113arch_initcall(init_BSP);
114
115/***************************************************************************/
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 9d80d2c42866..27241e16a526 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -43,10 +43,10 @@ badsys:
43 jra ret_from_exception 43 jra ret_from_exception
44 44
45do_trace: 45do_trace:
46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ 46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 subql #4,%sp 47 subql #4,%sp
48 SAVE_SWITCH_STACK 48 SAVE_SWITCH_STACK
49 jbsr syscall_trace 49 jbsr syscall_trace_enter
50 RESTORE_SWITCH_STACK 50 RESTORE_SWITCH_STACK
51 addql #4,%sp 51 addql #4,%sp
52 movel %sp@(PT_OFF_ORIG_D0),%d1 52 movel %sp@(PT_OFF_ORIG_D0),%d1
@@ -57,10 +57,10 @@ do_trace:
57 lea sys_call_table, %a0 57 lea sys_call_table, %a0
58 jbsr %a0@(%d1) 58 jbsr %a0@(%d1)
59 59
601: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ 601: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */ 61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK 62 SAVE_SWITCH_STACK
63 jbsr syscall_trace 63 jbsr syscall_trace_leave
64 64
65ret_from_signal: 65ret_from_signal:
66 RESTORE_SWITCH_STACK 66 RESTORE_SWITCH_STACK
@@ -71,16 +71,16 @@ ENTRY(system_call)
71 SAVE_ALL 71 SAVE_ALL
72 72
73 /* save top of frame*/ 73 /* save top of frame*/
74 pea %sp@ 74 pea %sp@
75 jbsr set_esp0 75 jbsr set_esp0
76 addql #4,%sp 76 addql #4,%sp
77 77
78 movel %sp@(PT_OFF_ORIG_D0),%d0 78 movel %sp@(PT_OFF_ORIG_D0),%d0
79 79
80 movel %sp,%d1 /* get thread_info pointer */ 80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 81 andl #-THREAD_SIZE,%d1
82 movel %d1,%a2 82 movel %d1,%a2
83 btst #TIF_SYSCALL_TRACE,%a2@(TI_FLAGS) 83 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84 jne do_trace 84 jne do_trace
85 cmpl #NR_syscalls,%d0 85 cmpl #NR_syscalls,%d0
86 jcc badsys 86 jcc badsys
@@ -88,10 +88,10 @@ ENTRY(system_call)
88 lea sys_call_table,%a0 88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0 89 movel %a0@(%d0), %a0
90 jbsr %a0@ 90 jbsr %a0@
91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ 91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
92 92
93ret_from_exception: 93ret_from_exception:
94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ 94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/ 95 jeq Luser_return /* if so, skip resched, signals*/
96 96
97Lkernel_return: 97Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
133 */ 133 */
134inthandler1: 134inthandler1:
135 SAVE_ALL 135 SAVE_ALL
136 movew %sp@(PT_OFF_VECTOR), %d0 136 movew %sp@(PT_OFF_FORMATVEC), %d0
137 and #0x3ff, %d0 137 and #0x3ff, %d0
138 138
139 movel %sp,%sp@- 139 movel %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
144 144
145inthandler2: 145inthandler2:
146 SAVE_ALL 146 SAVE_ALL
147 movew %sp@(PT_OFF_VECTOR), %d0 147 movew %sp@(PT_OFF_FORMATVEC), %d0
148 and #0x3ff, %d0 148 and #0x3ff, %d0
149 149
150 movel %sp,%sp@- 150 movel %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
155 155
156inthandler3: 156inthandler3:
157 SAVE_ALL 157 SAVE_ALL
158 movew %sp@(PT_OFF_VECTOR), %d0 158 movew %sp@(PT_OFF_FORMATVEC), %d0
159 and #0x3ff, %d0 159 and #0x3ff, %d0
160 160
161 movel %sp,%sp@- 161 movel %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
166 166
167inthandler4: 167inthandler4:
168 SAVE_ALL 168 SAVE_ALL
169 movew %sp@(PT_OFF_VECTOR), %d0 169 movew %sp@(PT_OFF_FORMATVEC), %d0
170 and #0x3ff, %d0 170 and #0x3ff, %d0
171 171
172 movel %sp,%sp@- 172 movel %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
177 177
178inthandler5: 178inthandler5:
179 SAVE_ALL 179 SAVE_ALL
180 movew %sp@(PT_OFF_VECTOR), %d0 180 movew %sp@(PT_OFF_FORMATVEC), %d0
181 and #0x3ff, %d0 181 and #0x3ff, %d0
182 182
183 movel %sp,%sp@- 183 movel %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
188 188
189inthandler6: 189inthandler6:
190 SAVE_ALL 190 SAVE_ALL
191 movew %sp@(PT_OFF_VECTOR), %d0 191 movew %sp@(PT_OFF_FORMATVEC), %d0
192 and #0x3ff, %d0 192 and #0x3ff, %d0
193 193
194 movel %sp,%sp@- 194 movel %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
199 199
200inthandler7: 200inthandler7:
201 SAVE_ALL 201 SAVE_ALL
202 movew %sp@(PT_OFF_VECTOR), %d0 202 movew %sp@(PT_OFF_FORMATVEC), %d0
203 and #0x3ff, %d0 203 and #0x3ff, %d0
204 204
205 movel %sp,%sp@- 205 movel %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
210 210
211inthandler: 211inthandler:
212 SAVE_ALL 212 SAVE_ALL
213 movew %sp@(PT_OFF_VECTOR), %d0 213 movew %sp@(PT_OFF_FORMATVEC), %d0
214 and #0x3ff, %d0 214 and #0x3ff, %d0
215 215
216 movel %sp,%sp@- 216 movel %sp,%sp@-
diff --git a/arch/m68knommu/platform/68328/head-de2.S b/arch/m68knommu/platform/68328/head-de2.S
index 92d96456d363..f632fdcb93e9 100644
--- a/arch/m68knommu/platform/68328/head-de2.S
+++ b/arch/m68knommu/platform/68328/head-de2.S
@@ -1,11 +1,5 @@
1 1
2#if defined(CONFIG_RAM32MB)
3#define MEM_END 0x02000000 /* Memory size 32Mb */
4#elif defined(CONFIG_RAM16MB)
5#define MEM_END 0x01000000 /* Memory size 16Mb */
6#else
7#define MEM_END 0x00800000 /* Memory size 8Mb */ 2#define MEM_END 0x00800000 /* Memory size 8Mb */
8#endif
9 3
10#undef CRT_DEBUG 4#undef CRT_DEBUG
11 5
diff --git a/arch/m68knommu/platform/68328/head-ram.S b/arch/m68knommu/platform/68328/head-ram.S
index 252b80b02038..7f1aeeacb219 100644
--- a/arch/m68knommu/platform/68328/head-ram.S
+++ b/arch/m68knommu/platform/68328/head-ram.S
@@ -67,33 +67,6 @@ pclp1:
67 beq pclp1 67 beq pclp1
68#endif /* DEBUG */ 68#endif /* DEBUG */
69 69
70#ifdef CONFIG_RELOCATE
71 /* Copy me to RAM */
72 moveal #__rom_start, %a0
73 moveal #_stext, %a1
74 moveal #_edata, %a2
75
76 /* Copy %a0 to %a1 until %a1 == %a2 */
77LD1:
78 movel %a0@+, %d0
79 movel %d0, %a1@+
80 cmpal %a1, %a2
81 bhi LD1
82
83#ifdef DEBUG
84 moveq #74, %d7 /* 'J' */
85 moveb %d7,0xfffff907 /* No absolute addresses */
86pclp2:
87 movew 0xfffff906, %d7
88 andw #0x2000, %d7
89 beq pclp2
90#endif /* DEBUG */
91 /* jump into the RAM copy */
92 jmp ram_jump
93ram_jump:
94
95#endif /* CONFIG_RELOCATE */
96
97#ifdef DEBUG 70#ifdef DEBUG
98 moveq #82, %d7 /* 'R' */ 71 moveq #82, %d7 /* 'R' */
99 moveb %d7,0xfffff907 /* No absolute addresses */ 72 moveb %d7,0xfffff907 /* No absolute addresses */
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index b91ee85d4b5d..865852806a17 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -179,10 +179,8 @@ void __init init_IRQ(void)
179 IMR = ~0; 179 IMR = ~0;
180 180
181 for (i = 0; (i < NR_IRQS); i++) { 181 for (i = 0; (i < NR_IRQS); i++) {
182 irq_desc[i].status = IRQ_DISABLED; 182 set_irq_chip(irq, &intc_irq_chip);
183 irq_desc[i].action = NULL; 183 set_irq_handler(irq, handle_level_irq);
184 irq_desc[i].depth = 1;
185 irq_desc[i].chip = &intc_irq_chip;
186 } 184 }
187} 185}
188 186
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 6d3460a39cac..c131c6e1d92d 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -42,7 +42,7 @@ do_trace:
42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ 42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43 subql #4,%sp 43 subql #4,%sp
44 SAVE_SWITCH_STACK 44 SAVE_SWITCH_STACK
45 jbsr syscall_trace 45 jbsr syscall_trace_enter
46 RESTORE_SWITCH_STACK 46 RESTORE_SWITCH_STACK
47 addql #4,%sp 47 addql #4,%sp
48 movel %sp@(PT_OFF_ORIG_D0),%d1 48 movel %sp@(PT_OFF_ORIG_D0),%d1
@@ -56,7 +56,7 @@ do_trace:
561: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ 561: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
57 subql #4,%sp /* dummy return address */ 57 subql #4,%sp /* dummy return address */
58 SAVE_SWITCH_STACK 58 SAVE_SWITCH_STACK
59 jbsr syscall_trace 59 jbsr syscall_trace_leave
60 60
61ret_from_signal: 61ret_from_signal:
62 RESTORE_SWITCH_STACK 62 RESTORE_SWITCH_STACK
@@ -71,7 +71,12 @@ ENTRY(system_call)
71 jbsr set_esp0 71 jbsr set_esp0
72 addql #4,%sp 72 addql #4,%sp
73 73
74 btst #PF_TRACESYS_BIT,%a2@(TASK_FLAGS+PF_TRACESYS_OFF) 74 movel %sp@(PT_OFF_ORIG_D0),%d0
75
76 movel %sp,%d1 /* get thread_info pointer */
77 andl #-THREAD_SIZE,%d1
78 movel %d1,%a2
79 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
75 jne do_trace 80 jne do_trace
76 cmpl #NR_syscalls,%d0 81 cmpl #NR_syscalls,%d0
77 jcc badsys 82 jcc badsys
@@ -124,7 +129,7 @@ Lreturn:
124 */ 129 */
125inthandler: 130inthandler:
126 SAVE_ALL 131 SAVE_ALL
127 movew %sp@(PT_OFF_VECTOR), %d0 132 movew %sp@(PT_OFF_FORMATVEC), %d0
128 and.l #0x3ff, %d0 133 and.l #0x3ff, %d0
129 lsr.l #0x02, %d0 134 lsr.l #0x02, %d0
130 135
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 6f22970d8c20..ad96ab1051f0 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,10 +132,8 @@ void init_IRQ(void)
132 pquicc->intr_cimr = 0x00000000; 132 pquicc->intr_cimr = 0x00000000;
133 133
134 for (i = 0; (i < NR_IRQS); i++) { 134 for (i = 0; (i < NR_IRQS); i++) {
135 irq_desc[i].status = IRQ_DISABLED; 135 set_irq_chip(irq, &intc_irq_chip);
136 irq_desc[i].action = NULL; 136 set_irq_handler(irq, handle_level_irq);
137 irq_desc[i].depth = 1;
138 irq_desc[i].chip = &intc_irq_chip;
139 } 137 }
140} 138}
141 139
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c
index fc5c63054e98..eabaabe8af36 100644
--- a/arch/m68knommu/platform/68VZ328/config.c
+++ b/arch/m68knommu/platform/68VZ328/config.c
@@ -90,11 +90,6 @@ static void init_hardware(char *command, int size)
90 PDIQEG &= ~PD(1); 90 PDIQEG &= ~PD(1);
91 PDIRQEN |= PD(1); /* IRQ enabled */ 91 PDIRQEN |= PD(1); /* IRQ enabled */
92 92
93#ifdef CONFIG_68328_SERIAL_UART2
94 /* Enable RXD TXD port bits to enable UART2 */
95 PJSEL &= ~(PJ(5) | PJ(4));
96#endif
97
98#ifdef CONFIG_INIT_LCD 93#ifdef CONFIG_INIT_LCD
99 /* initialize LCD controller */ 94 /* initialize LCD controller */
100 LSSA = (long) screen_bits; 95 LSSA = (long) screen_bits;
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index f72a0e5d9996..45f501fa4525 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -8,8 +8,8 @@
8# on the console port whenever a DBG interrupt occurs. You have to 8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt: 9# set up you HW breakpoints to trigger a DBG interrupt:
10# 10#
11# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT 11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT 12# asflags-y := -DTRAP_DBG_INTERRUPT
13# 13#
14 14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
@@ -26,6 +26,7 @@ obj-$(CONFIG_M528x) += pit.o intc-2.o
26obj-$(CONFIG_M5307) += timers.o intc.o 26obj-$(CONFIG_M5307) += timers.o intc.o
27obj-$(CONFIG_M532x) += timers.o intc-simr.o 27obj-$(CONFIG_M532x) += timers.o intc-simr.o
28obj-$(CONFIG_M5407) += timers.o intc.o 28obj-$(CONFIG_M5407) += timers.o intc.o
29obj-$(CONFIG_M548x) += sltimers.o intc-2.o
29 30
30obj-y += pinmux.o gpio.o 31obj-y += pinmux.o gpio.o
31extra-y := head.o 32extra-y := head.o
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index cd79d7e92ce6..5e92bed94b7e 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -88,7 +88,7 @@ ENTRY(system_call)
88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ 88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
89 subql #4,%sp 89 subql #4,%sp
90 SAVE_SWITCH_STACK 90 SAVE_SWITCH_STACK
91 jbsr syscall_trace 91 jbsr syscall_trace_enter
92 RESTORE_SWITCH_STACK 92 RESTORE_SWITCH_STACK
93 addql #4,%sp 93 addql #4,%sp
94 movel %d3,%a0 94 movel %d3,%a0
@@ -96,7 +96,7 @@ ENTRY(system_call)
96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */ 96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
97 subql #4,%sp /* dummy return address */ 97 subql #4,%sp /* dummy return address */
98 SAVE_SWITCH_STACK 98 SAVE_SWITCH_STACK
99 jbsr syscall_trace 99 jbsr syscall_trace_leave
100 100
101ret_from_signal: 101ret_from_signal:
102 RESTORE_SWITCH_STACK 102 RESTORE_SWITCH_STACK
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
index 5598c8b8661f..85daa2b3001a 100644
--- a/arch/m68knommu/platform/coldfire/intc-2.c
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -1,5 +1,11 @@
1/* 1/*
2 * intc-1.c 2 * intc-2.c
3 *
4 * General interrupt controller code for the many ColdFire cores that use
5 * interrupt controllers with 63 interrupt sources, organized as 56 fully-
6 * programmable + 7 fixed-level interrupt sources. This includes the 523x
7 * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such
8 * controllers, and the 547x and 548x families which have only one of them.
3 * 9 *
4 * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> 10 * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
5 * 11 *
@@ -19,21 +25,37 @@
19#include <asm/traps.h> 25#include <asm/traps.h>
20 26
21/* 27/*
22 * Each vector needs a unique priority and level asscoiated with it. 28 * Bit definitions for the ICR family of registers.
29 */
30#define MCFSIM_ICR_LEVEL(l) ((l)<<3) /* Level l intr */
31#define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */
32
33/*
34 * Each vector needs a unique priority and level associated with it.
23 * We don't really care so much what they are, we don't rely on the 35 * We don't really care so much what they are, we don't rely on the
24 * tranditional priority interrupt scheme of the m68k/ColdFire. 36 * traditional priority interrupt scheme of the m68k/ColdFire.
25 */ 37 */
26static u8 intc_intpri = 0x36; 38static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6);
39
40#ifdef MCFICM_INTC1
41#define NR_VECS 128
42#else
43#define NR_VECS 64
44#endif
27 45
28static void intc_irq_mask(unsigned int irq) 46static void intc_irq_mask(unsigned int irq)
29{ 47{
30 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) { 48 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
31 unsigned long imraddr; 49 unsigned long imraddr;
32 u32 val, imrbit; 50 u32 val, imrbit;
33 51
34 irq -= MCFINT_VECBASE; 52 irq -= MCFINT_VECBASE;
35 imraddr = MCF_IPSBAR; 53 imraddr = MCF_IPSBAR;
54#ifdef MCFICM_INTC1
36 imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; 55 imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
56#else
57 imraddr += MCFICM_INTC0;
58#endif
37 imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; 59 imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL;
38 imrbit = 0x1 << (irq & 0x1f); 60 imrbit = 0x1 << (irq & 0x1f);
39 61
@@ -44,13 +66,17 @@ static void intc_irq_mask(unsigned int irq)
44 66
45static void intc_irq_unmask(unsigned int irq) 67static void intc_irq_unmask(unsigned int irq)
46{ 68{
47 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) { 69 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
48 unsigned long intaddr, imraddr, icraddr; 70 unsigned long intaddr, imraddr, icraddr;
49 u32 val, imrbit; 71 u32 val, imrbit;
50 72
51 irq -= MCFINT_VECBASE; 73 irq -= MCFINT_VECBASE;
52 intaddr = MCF_IPSBAR; 74 intaddr = MCF_IPSBAR;
75#ifdef MCFICM_INTC1
53 intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; 76 intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
77#else
78 intaddr += MCFICM_INTC0;
79#endif
54 imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); 80 imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL);
55 icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f); 81 icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f);
56 imrbit = 0x1 << (irq & 0x1f); 82 imrbit = 0x1 << (irq & 0x1f);
@@ -67,10 +93,16 @@ static void intc_irq_unmask(unsigned int irq)
67 } 93 }
68} 94}
69 95
96static int intc_irq_set_type(unsigned int irq, unsigned int type)
97{
98 return 0;
99}
100
70static struct irq_chip intc_irq_chip = { 101static struct irq_chip intc_irq_chip = {
71 .name = "CF-INTC", 102 .name = "CF-INTC",
72 .mask = intc_irq_mask, 103 .mask = intc_irq_mask,
73 .unmask = intc_irq_unmask, 104 .unmask = intc_irq_unmask,
105 .set_type = intc_irq_set_type,
74}; 106};
75 107
76void __init init_IRQ(void) 108void __init init_IRQ(void)
@@ -81,13 +113,14 @@ void __init init_IRQ(void)
81 113
82 /* Mask all interrupt sources */ 114 /* Mask all interrupt sources */
83 __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); 115 __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
116#ifdef MCFICM_INTC1
84 __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL); 117 __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL);
118#endif
85 119
86 for (irq = 0; (irq < NR_IRQS); irq++) { 120 for (irq = 0; (irq < NR_IRQS); irq++) {
87 irq_desc[irq].status = IRQ_DISABLED; 121 set_irq_chip(irq, &intc_irq_chip);
88 irq_desc[irq].action = NULL; 122 set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
89 irq_desc[irq].depth = 1; 123 set_irq_handler(irq, handle_level_irq);
90 irq_desc[irq].chip = &intc_irq_chip;
91 } 124 }
92} 125}
93 126
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
index 1b01e79c2f63..bb7048636140 100644
--- a/arch/m68knommu/platform/coldfire/intc-simr.c
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * intc-simr.c 2 * intc-simr.c
3 * 3 *
4 * Interrupt controller code for the ColdFire 5208, 5207 & 532x parts.
5 *
4 * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> 6 * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
5 * 7 *
6 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
@@ -68,11 +70,9 @@ void __init init_IRQ(void)
68 __raw_writeb(0xff, MCFINTC1_SIMR); 70 __raw_writeb(0xff, MCFINTC1_SIMR);
69 71
70 for (irq = 0; (irq < NR_IRQS); irq++) { 72 for (irq = 0; (irq < NR_IRQS); irq++) {
71 irq_desc[irq].status = IRQ_DISABLED; 73 set_irq_chip(irq, &intc_irq_chip);
72 irq_desc[irq].action = NULL; 74 set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
73 irq_desc[irq].depth = 1; 75 set_irq_handler(irq, handle_level_irq);
74 irq_desc[irq].chip = &intc_irq_chip;
75 intc_irq_set_type(irq, 0);
76 } 76 }
77} 77}
78 78
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
index a4560c86db71..60d2fcbe182b 100644
--- a/arch/m68knommu/platform/coldfire/intc.c
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -143,11 +143,9 @@ void __init init_IRQ(void)
143 mcf_maskimr(0xffffffff); 143 mcf_maskimr(0xffffffff);
144 144
145 for (irq = 0; (irq < NR_IRQS); irq++) { 145 for (irq = 0; (irq < NR_IRQS); irq++) {
146 irq_desc[irq].status = IRQ_DISABLED; 146 set_irq_chip(irq, &intc_irq_chip);
147 irq_desc[irq].action = NULL; 147 set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
148 irq_desc[irq].depth = 1; 148 set_irq_handler(irq, handle_level_irq);
149 irq_desc[irq].chip = &intc_irq_chip;
150 intc_irq_set_type(irq, 0);
151 } 149 }
152} 150}
153 151
diff --git a/arch/m68knommu/platform/coldfire/sltimers.c b/arch/m68knommu/platform/coldfire/sltimers.c
new file mode 100644
index 000000000000..0a1b937c3e18
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/sltimers.c
@@ -0,0 +1,145 @@
1/***************************************************************************/
2
3/*
4 * sltimers.c -- generic ColdFire slice timer support.
5 *
6 * Copyright (C) 2009-2010, Philippe De Muyter <phdm@macqel.be>
7 * based on
8 * timers.c -- generic ColdFire hardware timer support.
9 * Copyright (C) 1999-2008, Greg Ungerer <gerg@snapgear.com>
10 */
11
12/***************************************************************************/
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/profile.h>
20#include <linux/clocksource.h>
21#include <asm/io.h>
22#include <asm/traps.h>
23#include <asm/machdep.h>
24#include <asm/coldfire.h>
25#include <asm/mcfslt.h>
26#include <asm/mcfsim.h>
27
28/***************************************************************************/
29
30#ifdef CONFIG_HIGHPROFILE
31
32/*
33 * By default use Slice Timer 1 as the profiler clock timer.
34 */
35#define PA(a) (MCF_MBAR + MCFSLT_TIMER1 + (a))
36
37/*
38 * Choose a reasonably fast profile timer. Make it an odd value to
39 * try and get good coverage of kernel operations.
40 */
41#define PROFILEHZ 1013
42
43irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
44{
45 /* Reset Slice Timer 1 */
46 __raw_writel(MCFSLT_SSR_BE | MCFSLT_SSR_TE, PA(MCFSLT_SSR));
47 if (current->pid)
48 profile_tick(CPU_PROFILING);
49 return IRQ_HANDLED;
50}
51
52static struct irqaction mcfslt_profile_irq = {
53 .name = "profile timer",
54 .flags = IRQF_DISABLED | IRQF_TIMER,
55 .handler = mcfslt_profile_tick,
56};
57
58void mcfslt_profile_init(void)
59{
60 printk(KERN_INFO "PROFILE: lodging TIMER 1 @ %dHz as profile timer\n",
61 PROFILEHZ);
62
63 setup_irq(MCF_IRQ_PROFILER, &mcfslt_profile_irq);
64
65 /* Set up TIMER 2 as high speed profile clock */
66 __raw_writel(MCF_BUSCLK / PROFILEHZ - 1, PA(MCFSLT_STCNT));
67 __raw_writel(MCFSLT_SCR_RUN | MCFSLT_SCR_IEN | MCFSLT_SCR_TEN,
68 PA(MCFSLT_SCR));
69
70}
71
72#endif /* CONFIG_HIGHPROFILE */
73
74/***************************************************************************/
75
76/*
77 * By default use Slice Timer 0 as the system clock timer.
78 */
79#define TA(a) (MCF_MBAR + MCFSLT_TIMER0 + (a))
80
81static u32 mcfslt_cycles_per_jiffy;
82static u32 mcfslt_cnt;
83
84static irqreturn_t mcfslt_tick(int irq, void *dummy)
85{
86 /* Reset Slice Timer 0 */
87 __raw_writel(MCFSLT_SSR_BE | MCFSLT_SSR_TE, TA(MCFSLT_SSR));
88 mcfslt_cnt += mcfslt_cycles_per_jiffy;
89 return arch_timer_interrupt(irq, dummy);
90}
91
92static struct irqaction mcfslt_timer_irq = {
93 .name = "timer",
94 .flags = IRQF_DISABLED | IRQF_TIMER,
95 .handler = mcfslt_tick,
96};
97
98static cycle_t mcfslt_read_clk(struct clocksource *cs)
99{
100 unsigned long flags;
101 u32 cycles;
102 u16 scnt;
103
104 local_irq_save(flags);
105 scnt = __raw_readl(TA(MCFSLT_SCNT));
106 cycles = mcfslt_cnt;
107 local_irq_restore(flags);
108
109 /* substract because slice timers count down */
110 return cycles - scnt;
111}
112
113static struct clocksource mcfslt_clk = {
114 .name = "slt",
115 .rating = 250,
116 .read = mcfslt_read_clk,
117 .shift = 20,
118 .mask = CLOCKSOURCE_MASK(32),
119 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
120};
121
122void hw_timer_init(void)
123{
124 mcfslt_cycles_per_jiffy = MCF_BUSCLK / HZ;
125 /*
126 * The coldfire slice timer (SLT) runs from STCNT to 0 included,
127 * then STCNT again and so on. It counts thus actually
128 * STCNT + 1 steps for 1 tick, not STCNT. So if you want
129 * n cycles, initialize STCNT with n - 1.
130 */
131 __raw_writel(mcfslt_cycles_per_jiffy - 1, TA(MCFSLT_STCNT));
132 __raw_writel(MCFSLT_SCR_RUN | MCFSLT_SCR_IEN | MCFSLT_SCR_TEN,
133 TA(MCFSLT_SCR));
134 /* initialize mcfslt_cnt knowing that slice timers count down */
135 mcfslt_cnt = mcfslt_cycles_per_jiffy;
136
137 setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq);
138
139 mcfslt_clk.mult = clocksource_hz2mult(MCF_BUSCLK, mcfslt_clk.shift);
140 clocksource_register(&mcfslt_clk);
141
142#ifdef CONFIG_HIGHPROFILE
143 mcfslt_profile_init();
144#endif
145}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4c9f402295dd..784cf822963a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2196,10 +2196,14 @@ config TC
2196 bool "TURBOchannel support" 2196 bool "TURBOchannel support"
2197 depends on MACH_DECSTATION 2197 depends on MACH_DECSTATION
2198 help 2198 help
2199 TurboChannel is a DEC (now Compaq (now HP)) bus for Alpha and MIPS 2199 TURBOchannel is a DEC (now Compaq (now HP)) bus for Alpha and MIPS
2200 processors. Documentation on writing device drivers for TurboChannel 2200 processors. TURBOchannel programming specifications are available
2201 is available at: 2201 at:
2202 <http://www.cs.arizona.edu/computer.help/policy/DIGITAL_unix/AA-PS3HD-TET1_html/TITLE.html>. 2202 <ftp://ftp.hp.com/pub/alphaserver/archive/triadd/>
2203 and:
2204 <http://www.computer-refuge.org/classiccmp/ftp.digital.com/pub/DEC/TriAdd/>
2205 Linux driver support status is documented at:
2206 <http://www.linux-mips.org/wiki/DECstation>
2203 2207
2204#config ACCESSBUS 2208#config ACCESSBUS
2205# bool "Access.Bus support" 2209# bool "Access.Bus support"
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 47842b7d26ae..ec3faa413f3b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -3,7 +3,6 @@
3 * 3 *
4 * MIPS floating point support 4 * MIPS floating point support
5 * Copyright (C) 1994-2000 Algorithmics Ltd. 5 * Copyright (C) 1994-2000 Algorithmics Ltd.
6 * http://www.algor.co.uk
7 * 6 *
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 7 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. 8 * Copyright (C) 2000 MIPS Technologies, Inc.
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
index bcf73bb5c33a..b422fcad852a 100644
--- a/arch/mips/math-emu/dp_add.c
+++ b/arch/mips/math-emu/dp_add.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_cmp.c b/arch/mips/math-emu/dp_cmp.c
index 8ab4f320a478..0f32486b0ed9 100644
--- a/arch/mips/math-emu/dp_cmp.c
+++ b/arch/mips/math-emu/dp_cmp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_div.c b/arch/mips/math-emu/dp_div.c
index 6acedce3b32d..a1bce1b7c09c 100644
--- a/arch/mips/math-emu/dp_div.c
+++ b/arch/mips/math-emu/dp_div.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index 39a71de16f47..88571288c9e0 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index f08f223e488a..14fc01ec742d 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_frexp.c b/arch/mips/math-emu/dp_frexp.c
index e650cb10c947..cb15a5eaecbb 100644
--- a/arch/mips/math-emu/dp_frexp.c
+++ b/arch/mips/math-emu/dp_frexp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_fsp.c b/arch/mips/math-emu/dp_fsp.c
index 494d19ac7049..1dfbd92ba9d0 100644
--- a/arch/mips/math-emu/dp_fsp.c
+++ b/arch/mips/math-emu/dp_fsp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_logb.c b/arch/mips/math-emu/dp_logb.c
index 603388621ca5..151127e59f5c 100644
--- a/arch/mips/math-emu/dp_logb.c
+++ b/arch/mips/math-emu/dp_logb.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_modf.c b/arch/mips/math-emu/dp_modf.c
index a8570e5c3efc..b01f9cf6d402 100644
--- a/arch/mips/math-emu/dp_modf.c
+++ b/arch/mips/math-emu/dp_modf.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index 48908a809c17..aa566e785f5a 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_scalb.c b/arch/mips/math-emu/dp_scalb.c
index b84e6338330e..6f5df438dda8 100644
--- a/arch/mips/math-emu/dp_scalb.c
+++ b/arch/mips/math-emu/dp_scalb.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_simple.c b/arch/mips/math-emu/dp_simple.c
index b90974246e5b..79ce2673a714 100644
--- a/arch/mips/math-emu/dp_simple.c
+++ b/arch/mips/math-emu/dp_simple.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index 032328c49888..a2a51b87ae8f 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
index a2127d685a0d..0de098cbc77b 100644
--- a/arch/mips/math-emu/dp_sub.c
+++ b/arch/mips/math-emu/dp_sub.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_tint.c b/arch/mips/math-emu/dp_tint.c
index 24478623c117..0ebe8598b94a 100644
--- a/arch/mips/math-emu/dp_tint.c
+++ b/arch/mips/math-emu/dp_tint.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/dp_tlong.c b/arch/mips/math-emu/dp_tlong.c
index 0f07ec2be3f9..133ce2ba0012 100644
--- a/arch/mips/math-emu/dp_tlong.c
+++ b/arch/mips/math-emu/dp_tlong.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index cb1b6822711a..30554e1c67b4 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -9,7 +9,6 @@
9/* 9/*
10 * MIPS floating point support 10 * MIPS floating point support
11 * Copyright (C) 1994-2000 Algorithmics Ltd. 11 * Copyright (C) 1994-2000 Algorithmics Ltd.
12 * http://www.algor.co.uk
13 * 12 *
14 * ######################################################################## 13 * ########################################################################
15 * 14 *
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index dd917332792c..22796e012060 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -1,7 +1,6 @@
1/* 1/*
2 * MIPS floating point support 2 * MIPS floating point support
3 * Copyright (C) 1994-2000 Algorithmics Ltd. 3 * Copyright (C) 1994-2000 Algorithmics Ltd.
4 * http://www.algor.co.uk
5 * 4 *
6 * This program is free software; you can distribute it and/or modify it 5 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as 6 * under the terms of the GNU General Public License (Version 2) as
diff --git a/arch/mips/math-emu/ieee754d.c b/arch/mips/math-emu/ieee754d.c
index a0325337b76c..9599bdd32585 100644
--- a/arch/mips/math-emu/ieee754d.c
+++ b/arch/mips/math-emu/ieee754d.c
@@ -4,7 +4,6 @@
4 * MIPS floating point support 4 * MIPS floating point support
5 * 5 *
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
10 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 2f22fd7fd784..080b5ca03fc6 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index 762786538449..f139c724c59a 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -5,7 +5,6 @@
5/* 5/*
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * http://www.algor.co.uk
9 * 8 *
10 * ######################################################################## 9 * ########################################################################
11 * 10 *
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 1a846c5425cd..2701d9500959 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -5,7 +5,6 @@
5/* 5/*
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * http://www.algor.co.uk
9 * 8 *
10 * ######################################################################## 9 * ########################################################################
11 * 10 *
diff --git a/arch/mips/math-emu/ieee754m.c b/arch/mips/math-emu/ieee754m.c
index d66896cd8f21..24190f3c9dd6 100644
--- a/arch/mips/math-emu/ieee754m.c
+++ b/arch/mips/math-emu/ieee754m.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index a19b72185ab9..271d00d6113a 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index d9e3586b5bce..754fd54649b5 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -5,7 +5,6 @@
5/* 5/*
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * http://www.algor.co.uk
9 * 8 *
10 * ######################################################################## 9 * ########################################################################
11 * 10 *
diff --git a/arch/mips/math-emu/ieee754xcpt.c b/arch/mips/math-emu/ieee754xcpt.c
index e02423a0ae23..b99a693c05af 100644
--- a/arch/mips/math-emu/ieee754xcpt.c
+++ b/arch/mips/math-emu/ieee754xcpt.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * MIPS floating point support 2 * MIPS floating point support
3 * Copyright (C) 1994-2000 Algorithmics Ltd. 3 * Copyright (C) 1994-2000 Algorithmics Ltd.
4 * http://www.algor.co.uk
5 * 4 *
6 * ######################################################################## 5 * ########################################################################
7 * 6 *
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index d8c4211bcfbe..ae1a327ccac0 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_cmp.c b/arch/mips/math-emu/sp_cmp.c
index d3eff6b04b5a..716cf37e2465 100644
--- a/arch/mips/math-emu/sp_cmp.c
+++ b/arch/mips/math-emu/sp_cmp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_div.c b/arch/mips/math-emu/sp_div.c
index 2b437fcfdad9..d7747928c954 100644
--- a/arch/mips/math-emu/sp_div.c
+++ b/arch/mips/math-emu/sp_div.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_fdp.c b/arch/mips/math-emu/sp_fdp.c
index 4093723d1aa5..e1515aae0166 100644
--- a/arch/mips/math-emu/sp_fdp.c
+++ b/arch/mips/math-emu/sp_fdp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index e88e125e01c2..9694d6c016cb 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 26d6919a269a..16a651f29865 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_frexp.c b/arch/mips/math-emu/sp_frexp.c
index 359c6483dbfa..5bc993c30044 100644
--- a/arch/mips/math-emu/sp_frexp.c
+++ b/arch/mips/math-emu/sp_frexp.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_logb.c b/arch/mips/math-emu/sp_logb.c
index 3c337219ca32..9c14e0c75bd2 100644
--- a/arch/mips/math-emu/sp_logb.c
+++ b/arch/mips/math-emu/sp_logb.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_modf.c b/arch/mips/math-emu/sp_modf.c
index 76568946b4c0..25a0fbaa0556 100644
--- a/arch/mips/math-emu/sp_modf.c
+++ b/arch/mips/math-emu/sp_modf.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index 3f070f82212f..c06bb4022be5 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_scalb.c b/arch/mips/math-emu/sp_scalb.c
index 44ceb87ea944..dd76196984c8 100644
--- a/arch/mips/math-emu/sp_scalb.c
+++ b/arch/mips/math-emu/sp_scalb.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_simple.c b/arch/mips/math-emu/sp_simple.c
index 2fd53c920e99..ae4fcfafd853 100644
--- a/arch/mips/math-emu/sp_simple.c
+++ b/arch/mips/math-emu/sp_simple.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_sqrt.c b/arch/mips/math-emu/sp_sqrt.c
index 8a934b9f7eb8..fed20175f5fb 100644
--- a/arch/mips/math-emu/sp_sqrt.c
+++ b/arch/mips/math-emu/sp_sqrt.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index dbb802c1a086..886ed5bcfefb 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_tint.c b/arch/mips/math-emu/sp_tint.c
index 352dc3a5f1af..0fe9acc7716e 100644
--- a/arch/mips/math-emu/sp_tint.c
+++ b/arch/mips/math-emu/sp_tint.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/math-emu/sp_tlong.c b/arch/mips/math-emu/sp_tlong.c
index 92cd9c511a10..d0ca6e22be29 100644
--- a/arch/mips/math-emu/sp_tlong.c
+++ b/arch/mips/math-emu/sp_tlong.c
@@ -4,7 +4,6 @@
4/* 4/*
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * http://www.algor.co.uk
8 * 7 *
9 * ######################################################################## 8 * ########################################################################
10 * 9 *
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index 4f6d8da07f93..d5d4c018fb04 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -52,7 +52,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
52{ 52{
53 unsigned int val; 53 unsigned int val;
54 54
55 /* Configues port 1, 2, 3, 4 to be validate*/ 55 /* Configures port 1, 2, 3, 4 to be validate*/
56 pci_read_config_dword(pdev, 0xe0, &val); 56 pci_read_config_dword(pdev, 0xe0, &val);
57 pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x4); 57 pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x4);
58 58
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index ca35b730d189..87ccdb4b5ac9 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -43,7 +43,7 @@
43#include <asm/sibyte/sb1250_scd.h> 43#include <asm/sibyte/sb1250_scd.h>
44#include <asm/sibyte/sb1250_int.h> 44#include <asm/sibyte/sb1250_int.h>
45#else 45#else
46#error invalid SiByte UART configuation 46#error invalid SiByte UART configuration
47#endif 47#endif
48 48
49#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 49#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index 1ad4eed07fbe..5b0c98bd46ab 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -10,7 +10,7 @@
10 * 10 *
11 * © Copyright 1995 Apple Computer, Inc. All rights reserved. 11 * © Copyright 1995 Apple Computer, Inc. All rights reserved.
12 * 12 *
13 * It's available online from http://chrp.apple.com/MacTech.pdf. 13 * It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf
14 * You can obtain paper copies of this book from computer bookstores or by 14 * You can obtain paper copies of this book from computer bookstores or by
15 * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San 15 * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San
16 * Francisco, CA 94104. Reference ISBN 1-55860-393-X. 16 * Francisco, CA 94104. Reference ISBN 1-55860-393-X.
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 6c5547d82bbe..18ea6963ad77 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -86,5 +86,6 @@ struct kvm_guest_debug_arch {
86 86
87#define KVM_INTERRUPT_SET -1U 87#define KVM_INTERRUPT_SET -1U
88#define KVM_INTERRUPT_UNSET -2U 88#define KVM_INTERRUPT_UNSET -2U
89#define KVM_INTERRUPT_SET_LEVEL -3U
89 90
90#endif /* __LINUX_KVM_POWERPC_H */ 91#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index c5ea4cda34b3..5b7504674397 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -58,6 +58,7 @@
58#define BOOK3S_INTERRUPT_INST_STORAGE 0x400 58#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
59#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 59#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
60#define BOOK3S_INTERRUPT_EXTERNAL 0x500 60#define BOOK3S_INTERRUPT_EXTERNAL 0x500
61#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
61#define BOOK3S_INTERRUPT_ALIGNMENT 0x600 62#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
62#define BOOK3S_INTERRUPT_PROGRAM 0x700 63#define BOOK3S_INTERRUPT_PROGRAM 0x700
63#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 64#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
@@ -84,7 +85,8 @@
84#define BOOK3S_IRQPRIO_EXTERNAL 13 85#define BOOK3S_IRQPRIO_EXTERNAL 13
85#define BOOK3S_IRQPRIO_DECREMENTER 14 86#define BOOK3S_IRQPRIO_DECREMENTER 14
86#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 87#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15
87#define BOOK3S_IRQPRIO_MAX 16 88#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16
89#define BOOK3S_IRQPRIO_MAX 17
88 90
89#define BOOK3S_HFLAG_DCBZ32 0x1 91#define BOOK3S_HFLAG_DCBZ32 0x1
90#define BOOK3S_HFLAG_SLB 0x2 92#define BOOK3S_HFLAG_SLB 0x2
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 8274a2d43925..d62e703f1214 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -38,15 +38,6 @@ struct kvmppc_slb {
38 bool class : 1; 38 bool class : 1;
39}; 39};
40 40
41struct kvmppc_sr {
42 u32 raw;
43 u32 vsid;
44 bool Ks : 1;
45 bool Kp : 1;
46 bool nx : 1;
47 bool valid : 1;
48};
49
50struct kvmppc_bat { 41struct kvmppc_bat {
51 u64 raw; 42 u64 raw;
52 u32 bepi; 43 u32 bepi;
@@ -69,6 +60,13 @@ struct kvmppc_sid_map {
69#define SID_MAP_NUM (1 << SID_MAP_BITS) 60#define SID_MAP_NUM (1 << SID_MAP_BITS)
70#define SID_MAP_MASK (SID_MAP_NUM - 1) 61#define SID_MAP_MASK (SID_MAP_NUM - 1)
71 62
63#ifdef CONFIG_PPC_BOOK3S_64
64#define SID_CONTEXTS 1
65#else
66#define SID_CONTEXTS 128
67#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
68#endif
69
72struct kvmppc_vcpu_book3s { 70struct kvmppc_vcpu_book3s {
73 struct kvm_vcpu vcpu; 71 struct kvm_vcpu vcpu;
74 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; 72 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
@@ -79,20 +77,22 @@ struct kvmppc_vcpu_book3s {
79 u64 vsid; 77 u64 vsid;
80 } slb_shadow[64]; 78 } slb_shadow[64];
81 u8 slb_shadow_max; 79 u8 slb_shadow_max;
82 struct kvmppc_sr sr[16];
83 struct kvmppc_bat ibat[8]; 80 struct kvmppc_bat ibat[8];
84 struct kvmppc_bat dbat[8]; 81 struct kvmppc_bat dbat[8];
85 u64 hid[6]; 82 u64 hid[6];
86 u64 gqr[8]; 83 u64 gqr[8];
87 int slb_nr; 84 int slb_nr;
88 u32 dsisr;
89 u64 sdr1; 85 u64 sdr1;
90 u64 hior; 86 u64 hior;
91 u64 msr_mask; 87 u64 msr_mask;
92 u64 vsid_first;
93 u64 vsid_next; 88 u64 vsid_next;
89#ifdef CONFIG_PPC_BOOK3S_32
90 u32 vsid_pool[VSID_POOL_SIZE];
91#else
92 u64 vsid_first;
94 u64 vsid_max; 93 u64 vsid_max;
95 int context_id; 94#endif
95 int context_id[SID_CONTEXTS];
96 ulong prog_flags; /* flags to inject when giving a 700 trap */ 96 ulong prog_flags; /* flags to inject when giving a 700 trap */
97}; 97};
98 98
@@ -131,9 +131,10 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
131 bool upper, u32 val); 131 bool upper, u32 val);
132extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 132extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
133extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 133extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
134extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
134 135
135extern u32 kvmppc_trampoline_lowmem; 136extern ulong kvmppc_trampoline_lowmem;
136extern u32 kvmppc_trampoline_enter; 137extern ulong kvmppc_trampoline_enter;
137extern void kvmppc_rmcall(ulong srr0, ulong srr1); 138extern void kvmppc_rmcall(ulong srr0, ulong srr1);
138extern void kvmppc_load_up_fpu(void); 139extern void kvmppc_load_up_fpu(void);
139extern void kvmppc_load_up_altivec(void); 140extern void kvmppc_load_up_altivec(void);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index b0b23c007d6e..bba3b9b72a39 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/kvm_types.h> 27#include <linux/kvm_types.h>
28#include <linux/kvm_para.h>
28#include <asm/kvm_asm.h> 29#include <asm/kvm_asm.h>
29 30
30#define KVM_MAX_VCPUS 1 31#define KVM_MAX_VCPUS 1
@@ -41,12 +42,17 @@
41 42
42#define HPTEG_CACHE_NUM (1 << 15) 43#define HPTEG_CACHE_NUM (1 << 15)
43#define HPTEG_HASH_BITS_PTE 13 44#define HPTEG_HASH_BITS_PTE 13
45#define HPTEG_HASH_BITS_PTE_LONG 12
44#define HPTEG_HASH_BITS_VPTE 13 46#define HPTEG_HASH_BITS_VPTE 13
45#define HPTEG_HASH_BITS_VPTE_LONG 5 47#define HPTEG_HASH_BITS_VPTE_LONG 5
46#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) 48#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
49#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
47#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) 50#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
48#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) 51#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
49 52
53/* Physical Address Mask - allowed range of real mode RAM access */
54#define KVM_PAM 0x0fffffffffffffffULL
55
50struct kvm; 56struct kvm;
51struct kvm_run; 57struct kvm_run;
52struct kvm_vcpu; 58struct kvm_vcpu;
@@ -159,8 +165,10 @@ struct kvmppc_mmu {
159 165
160struct hpte_cache { 166struct hpte_cache {
161 struct hlist_node list_pte; 167 struct hlist_node list_pte;
168 struct hlist_node list_pte_long;
162 struct hlist_node list_vpte; 169 struct hlist_node list_vpte;
163 struct hlist_node list_vpte_long; 170 struct hlist_node list_vpte_long;
171 struct rcu_head rcu_head;
164 u64 host_va; 172 u64 host_va;
165 u64 pfn; 173 u64 pfn;
166 ulong slot; 174 ulong slot;
@@ -210,28 +218,20 @@ struct kvm_vcpu_arch {
210 u32 cr; 218 u32 cr;
211#endif 219#endif
212 220
213 ulong msr;
214#ifdef CONFIG_PPC_BOOK3S 221#ifdef CONFIG_PPC_BOOK3S
215 ulong shadow_msr; 222 ulong shadow_msr;
216 ulong hflags; 223 ulong hflags;
217 ulong guest_owned_ext; 224 ulong guest_owned_ext;
218#endif 225#endif
219 u32 mmucr; 226 u32 mmucr;
220 ulong sprg0;
221 ulong sprg1;
222 ulong sprg2;
223 ulong sprg3;
224 ulong sprg4; 227 ulong sprg4;
225 ulong sprg5; 228 ulong sprg5;
226 ulong sprg6; 229 ulong sprg6;
227 ulong sprg7; 230 ulong sprg7;
228 ulong srr0;
229 ulong srr1;
230 ulong csrr0; 231 ulong csrr0;
231 ulong csrr1; 232 ulong csrr1;
232 ulong dsrr0; 233 ulong dsrr0;
233 ulong dsrr1; 234 ulong dsrr1;
234 ulong dear;
235 ulong esr; 235 ulong esr;
236 u32 dec; 236 u32 dec;
237 u32 decar; 237 u32 decar;
@@ -290,12 +290,17 @@ struct kvm_vcpu_arch {
290 struct tasklet_struct tasklet; 290 struct tasklet_struct tasklet;
291 u64 dec_jiffies; 291 u64 dec_jiffies;
292 unsigned long pending_exceptions; 292 unsigned long pending_exceptions;
293 struct kvm_vcpu_arch_shared *shared;
294 unsigned long magic_page_pa; /* phys addr to map the magic page to */
295 unsigned long magic_page_ea; /* effect. addr to map the magic page to */
293 296
294#ifdef CONFIG_PPC_BOOK3S 297#ifdef CONFIG_PPC_BOOK3S
295 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 298 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
299 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
296 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 300 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
297 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 301 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
298 int hpte_cache_count; 302 int hpte_cache_count;
303 spinlock_t mmu_lock;
299#endif 304#endif
300}; 305};
301 306
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 2d48f6a63d0b..50533f9adf40 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -20,16 +20,153 @@
20#ifndef __POWERPC_KVM_PARA_H__ 20#ifndef __POWERPC_KVM_PARA_H__
21#define __POWERPC_KVM_PARA_H__ 21#define __POWERPC_KVM_PARA_H__
22 22
23#include <linux/types.h>
24
25struct kvm_vcpu_arch_shared {
26 __u64 scratch1;
27 __u64 scratch2;
28 __u64 scratch3;
29 __u64 critical; /* Guest may not get interrupts if == r1 */
30 __u64 sprg0;
31 __u64 sprg1;
32 __u64 sprg2;
33 __u64 sprg3;
34 __u64 srr0;
35 __u64 srr1;
36 __u64 dar;
37 __u64 msr;
38 __u32 dsisr;
39 __u32 int_pending; /* Tells the guest if we have an interrupt */
40 __u32 sr[16];
41};
42
43#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
44#define HC_VENDOR_KVM (42 << 16)
45#define HC_EV_SUCCESS 0
46#define HC_EV_UNIMPLEMENTED 12
47
48#define KVM_FEATURE_MAGIC_PAGE 1
49
50#define KVM_MAGIC_FEAT_SR (1 << 0)
51
23#ifdef __KERNEL__ 52#ifdef __KERNEL__
24 53
54#ifdef CONFIG_KVM_GUEST
55
56#include <linux/of.h>
57
58static inline int kvm_para_available(void)
59{
60 struct device_node *hyper_node;
61
62 hyper_node = of_find_node_by_path("/hypervisor");
63 if (!hyper_node)
64 return 0;
65
66 if (!of_device_is_compatible(hyper_node, "linux,kvm"))
67 return 0;
68
69 return 1;
70}
71
72extern unsigned long kvm_hypercall(unsigned long *in,
73 unsigned long *out,
74 unsigned long nr);
75
76#else
77
25static inline int kvm_para_available(void) 78static inline int kvm_para_available(void)
26{ 79{
27 return 0; 80 return 0;
28} 81}
29 82
83static unsigned long kvm_hypercall(unsigned long *in,
84 unsigned long *out,
85 unsigned long nr)
86{
87 return HC_EV_UNIMPLEMENTED;
88}
89
90#endif
91
92static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
93{
94 unsigned long in[8];
95 unsigned long out[8];
96 unsigned long r;
97
98 r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
99 *r2 = out[0];
100
101 return r;
102}
103
104static inline long kvm_hypercall0(unsigned int nr)
105{
106 unsigned long in[8];
107 unsigned long out[8];
108
109 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
110}
111
112static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
113{
114 unsigned long in[8];
115 unsigned long out[8];
116
117 in[0] = p1;
118 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
119}
120
121static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
122 unsigned long p2)
123{
124 unsigned long in[8];
125 unsigned long out[8];
126
127 in[0] = p1;
128 in[1] = p2;
129 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
130}
131
132static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
133 unsigned long p2, unsigned long p3)
134{
135 unsigned long in[8];
136 unsigned long out[8];
137
138 in[0] = p1;
139 in[1] = p2;
140 in[2] = p3;
141 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
142}
143
144static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
145 unsigned long p2, unsigned long p3,
146 unsigned long p4)
147{
148 unsigned long in[8];
149 unsigned long out[8];
150
151 in[0] = p1;
152 in[1] = p2;
153 in[2] = p3;
154 in[3] = p4;
155 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
156}
157
158
30static inline unsigned int kvm_arch_para_features(void) 159static inline unsigned int kvm_arch_para_features(void)
31{ 160{
32 return 0; 161 unsigned long r;
162
163 if (!kvm_para_available())
164 return 0;
165
166 if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
167 return 0;
168
169 return r;
33} 170}
34 171
35#endif /* __KERNEL__ */ 172#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 18d139ec2d22..ecb3bc74c344 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -107,6 +107,7 @@ extern int kvmppc_booke_init(void);
107extern void kvmppc_booke_exit(void); 107extern void kvmppc_booke_exit(void);
108 108
109extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 109extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
110extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
110 111
111/* 112/*
112 * Cuts out inst bits with ordering according to spec. 113 * Cuts out inst bits with ordering according to spec.
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4ed076a4db24..36c30f31ec93 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -129,6 +129,8 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
129obj-y += ppc_save_regs.o 129obj-y += ppc_save_regs.o
130endif 130endif
131 131
132obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
133
132# Disable GCOV in odd or sensitive code 134# Disable GCOV in odd or sensitive code
133GCOV_PROFILE_prom_init.o := n 135GCOV_PROFILE_prom_init.o := n
134GCOV_PROFILE_ftrace.o := n 136GCOV_PROFILE_ftrace.o := n
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c3e01945ad4f..bd0df2e6aa8f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -48,11 +48,11 @@
48#ifdef CONFIG_PPC_ISERIES 48#ifdef CONFIG_PPC_ISERIES
49#include <asm/iseries/alpaca.h> 49#include <asm/iseries/alpaca.h>
50#endif 50#endif
51#ifdef CONFIG_KVM 51#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
52#include <linux/kvm_host.h> 52#include <linux/kvm_host.h>
53#ifndef CONFIG_BOOKE
54#include <asm/kvm_book3s.h>
55#endif 53#endif
54#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
55#include <asm/kvm_book3s.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_PPC32 58#ifdef CONFIG_PPC32
@@ -396,12 +396,13 @@ int main(void)
396 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 396 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
399 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
400 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 399 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
401 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 400 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 401 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 402 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 403 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
404 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
405 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
405 406
406 /* book3s */ 407 /* book3s */
407#ifdef CONFIG_PPC_BOOK3S 408#ifdef CONFIG_PPC_BOOK3S
@@ -466,6 +467,22 @@ int main(void)
466 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 467 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
467#endif /* CONFIG_PPC_BOOK3S */ 468#endif /* CONFIG_PPC_BOOK3S */
468#endif 469#endif
470
471#ifdef CONFIG_KVM_GUEST
472 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
473 scratch1));
474 DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared,
475 scratch2));
476 DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared,
477 scratch3));
478 DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared,
479 int_pending));
480 DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
481 DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared,
482 critical));
483 DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr));
484#endif
485
469#ifdef CONFIG_44x 486#ifdef CONFIG_44x
470 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 487 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
471 DEFINE(PTE_T_LOG2, PTE_T_LOG2); 488 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39b0c48872d2..9f8b01d6466f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -299,6 +299,12 @@ slb_miss_user_pseries:
299 b . /* prevent spec. execution */ 299 b . /* prevent spec. execution */
300#endif /* __DISABLED__ */ 300#endif /* __DISABLED__ */
301 301
302/* KVM's trampoline code needs to be close to the interrupt handlers */
303
304#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
305#include "../kvm/book3s_rmhandlers.S"
306#endif
307
302 .align 7 308 .align 7
303 .globl __end_interrupts 309 .globl __end_interrupts
304__end_interrupts: 310__end_interrupts:
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c571cd3c1453..f0dd577e4a5b 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -166,12 +166,6 @@ exception_marker:
166#include "exceptions-64s.S" 166#include "exceptions-64s.S"
167#endif 167#endif
168 168
169/* KVM trampoline code needs to be close to the interrupt handlers */
170
171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
172#include "../kvm/book3s_rmhandlers.S"
173#endif
174
175_GLOBAL(generic_secondary_thread_init) 169_GLOBAL(generic_secondary_thread_init)
176 mr r24,r3 170 mr r24,r3
177 171
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
new file mode 100644
index 000000000000..428d0e538aec
--- /dev/null
+++ b/arch/powerpc/kernel/kvm.c
@@ -0,0 +1,596 @@
1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/init.h>
23#include <linux/kvm_para.h>
24#include <linux/slab.h>
25#include <linux/of.h>
26
27#include <asm/reg.h>
28#include <asm/sections.h>
29#include <asm/cacheflush.h>
30#include <asm/disassemble.h>
31
32#define KVM_MAGIC_PAGE (-4096L)
33#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
34
35#define KVM_INST_LWZ 0x80000000
36#define KVM_INST_STW 0x90000000
37#define KVM_INST_LD 0xe8000000
38#define KVM_INST_STD 0xf8000000
39#define KVM_INST_NOP 0x60000000
40#define KVM_INST_B 0x48000000
41#define KVM_INST_B_MASK 0x03ffffff
42#define KVM_INST_B_MAX 0x01ffffff
43
44#define KVM_MASK_RT 0x03e00000
45#define KVM_RT_30 0x03c00000
46#define KVM_MASK_RB 0x0000f800
47#define KVM_INST_MFMSR 0x7c0000a6
48#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
49#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
50#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
51#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
52#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
53#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
54#define KVM_INST_MFSPR_DAR 0x7c1302a6
55#define KVM_INST_MFSPR_DSISR 0x7c1202a6
56
57#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
58#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
59#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
60#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
61#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
62#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
63#define KVM_INST_MTSPR_DAR 0x7c1303a6
64#define KVM_INST_MTSPR_DSISR 0x7c1203a6
65
66#define KVM_INST_TLBSYNC 0x7c00046c
67#define KVM_INST_MTMSRD_L0 0x7c000164
68#define KVM_INST_MTMSRD_L1 0x7c010164
69#define KVM_INST_MTMSR 0x7c000124
70
71#define KVM_INST_WRTEEI_0 0x7c000146
72#define KVM_INST_WRTEEI_1 0x7c008146
73
74#define KVM_INST_MTSRIN 0x7c0001e4
75
76static bool kvm_patching_worked = true;
77static char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index;
79
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
81{
82 *inst = new_inst;
83 flush_icache_range((ulong)inst, (ulong)inst + 4);
84}
85
86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87{
88#ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90#else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92#endif
93}
94
95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
96{
97#ifdef CONFIG_64BIT
98 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
99#else
100 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101#endif
102}
103
104static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105{
106 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107}
108
109static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110{
111#ifdef CONFIG_64BIT
112 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113#else
114 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115#endif
116}
117
118static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119{
120 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121}
122
123static void kvm_patch_ins_nop(u32 *inst)
124{
125 kvm_patch_ins(inst, KVM_INST_NOP);
126}
127
128static void kvm_patch_ins_b(u32 *inst, int addr)
129{
130#ifdef CONFIG_RELOCATABLE
131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */
133
134 extern u32 __end_interrupts;
135 if ((ulong)inst < (ulong)&__end_interrupts)
136 return;
137#endif
138
139 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
140}
141
142static u32 *kvm_alloc(int len)
143{
144 u32 *p;
145
146 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
147 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
148 kvm_tmp_index, len);
149 kvm_patching_worked = false;
150 return NULL;
151 }
152
153 p = (void*)&kvm_tmp[kvm_tmp_index];
154 kvm_tmp_index += len;
155
156 return p;
157}
158
159extern u32 kvm_emulate_mtmsrd_branch_offs;
160extern u32 kvm_emulate_mtmsrd_reg_offs;
161extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
162extern u32 kvm_emulate_mtmsrd_len;
163extern u32 kvm_emulate_mtmsrd[];
164
165static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
166{
167 u32 *p;
168 int distance_start;
169 int distance_end;
170 ulong next_inst;
171
172 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
173 if (!p)
174 return;
175
176 /* Find out where we are and put everything there */
177 distance_start = (ulong)p - (ulong)inst;
178 next_inst = ((ulong)inst + 4);
179 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
180
181 /* Make sure we only write valid b instructions */
182 if (distance_start > KVM_INST_B_MAX) {
183 kvm_patching_worked = false;
184 return;
185 }
186
187 /* Modify the chunk to fit the invocation */
188 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
189 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
190 switch (get_rt(rt)) {
191 case 30:
192 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
193 magic_var(scratch2), KVM_RT_30);
194 break;
195 case 31:
196 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197 magic_var(scratch1), KVM_RT_30);
198 break;
199 default:
200 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
201 break;
202 }
203
204 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
205 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
206
207 /* Patch the invocation */
208 kvm_patch_ins_b(inst, distance_start);
209}
210
211extern u32 kvm_emulate_mtmsr_branch_offs;
212extern u32 kvm_emulate_mtmsr_reg1_offs;
213extern u32 kvm_emulate_mtmsr_reg2_offs;
214extern u32 kvm_emulate_mtmsr_orig_ins_offs;
215extern u32 kvm_emulate_mtmsr_len;
216extern u32 kvm_emulate_mtmsr[];
217
218static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
219{
220 u32 *p;
221 int distance_start;
222 int distance_end;
223 ulong next_inst;
224
225 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
226 if (!p)
227 return;
228
229 /* Find out where we are and put everything there */
230 distance_start = (ulong)p - (ulong)inst;
231 next_inst = ((ulong)inst + 4);
232 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
233
234 /* Make sure we only write valid b instructions */
235 if (distance_start > KVM_INST_B_MAX) {
236 kvm_patching_worked = false;
237 return;
238 }
239
240 /* Modify the chunk to fit the invocation */
241 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
242 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
243
244 /* Make clobbered registers work too */
245 switch (get_rt(rt)) {
246 case 30:
247 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
248 magic_var(scratch2), KVM_RT_30);
249 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
250 magic_var(scratch2), KVM_RT_30);
251 break;
252 case 31:
253 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
254 magic_var(scratch1), KVM_RT_30);
255 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
256 magic_var(scratch1), KVM_RT_30);
257 break;
258 default:
259 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
260 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
261 break;
262 }
263
264 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
265 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
266
267 /* Patch the invocation */
268 kvm_patch_ins_b(inst, distance_start);
269}
270
271#ifdef CONFIG_BOOKE
272
273extern u32 kvm_emulate_wrteei_branch_offs;
274extern u32 kvm_emulate_wrteei_ee_offs;
275extern u32 kvm_emulate_wrteei_len;
276extern u32 kvm_emulate_wrteei[];
277
278static void kvm_patch_ins_wrteei(u32 *inst)
279{
280 u32 *p;
281 int distance_start;
282 int distance_end;
283 ulong next_inst;
284
285 p = kvm_alloc(kvm_emulate_wrteei_len * 4);
286 if (!p)
287 return;
288
289 /* Find out where we are and put everything there */
290 distance_start = (ulong)p - (ulong)inst;
291 next_inst = ((ulong)inst + 4);
292 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
293
294 /* Make sure we only write valid b instructions */
295 if (distance_start > KVM_INST_B_MAX) {
296 kvm_patching_worked = false;
297 return;
298 }
299
300 /* Modify the chunk to fit the invocation */
301 memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
302 p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
303 p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
304 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
305
306 /* Patch the invocation */
307 kvm_patch_ins_b(inst, distance_start);
308}
309
310#endif
311
312#ifdef CONFIG_PPC_BOOK3S_32
313
314extern u32 kvm_emulate_mtsrin_branch_offs;
315extern u32 kvm_emulate_mtsrin_reg1_offs;
316extern u32 kvm_emulate_mtsrin_reg2_offs;
317extern u32 kvm_emulate_mtsrin_orig_ins_offs;
318extern u32 kvm_emulate_mtsrin_len;
319extern u32 kvm_emulate_mtsrin[];
320
321static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
322{
323 u32 *p;
324 int distance_start;
325 int distance_end;
326 ulong next_inst;
327
328 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
329 if (!p)
330 return;
331
332 /* Find out where we are and put everything there */
333 distance_start = (ulong)p - (ulong)inst;
334 next_inst = ((ulong)inst + 4);
335 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
336
337 /* Make sure we only write valid b instructions */
338 if (distance_start > KVM_INST_B_MAX) {
339 kvm_patching_worked = false;
340 return;
341 }
342
343 /* Modify the chunk to fit the invocation */
344 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
345 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
346 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
347 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
348 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
349 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
350
351 /* Patch the invocation */
352 kvm_patch_ins_b(inst, distance_start);
353}
354
355#endif
356
357static void kvm_map_magic_page(void *data)
358{
359 u32 *features = data;
360
361 ulong in[8];
362 ulong out[8];
363
364 in[0] = KVM_MAGIC_PAGE;
365 in[1] = KVM_MAGIC_PAGE;
366
367 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
368
369 *features = out[0];
370}
371
372static void kvm_check_ins(u32 *inst, u32 features)
373{
374 u32 _inst = *inst;
375 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
376 u32 inst_rt = _inst & KVM_MASK_RT;
377
378 switch (inst_no_rt) {
379 /* Loads */
380 case KVM_INST_MFMSR:
381 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
382 break;
383 case KVM_INST_MFSPR_SPRG0:
384 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
385 break;
386 case KVM_INST_MFSPR_SPRG1:
387 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
388 break;
389 case KVM_INST_MFSPR_SPRG2:
390 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
391 break;
392 case KVM_INST_MFSPR_SPRG3:
393 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
394 break;
395 case KVM_INST_MFSPR_SRR0:
396 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
397 break;
398 case KVM_INST_MFSPR_SRR1:
399 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
400 break;
401 case KVM_INST_MFSPR_DAR:
402 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
403 break;
404 case KVM_INST_MFSPR_DSISR:
405 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
406 break;
407
408 /* Stores */
409 case KVM_INST_MTSPR_SPRG0:
410 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
411 break;
412 case KVM_INST_MTSPR_SPRG1:
413 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
414 break;
415 case KVM_INST_MTSPR_SPRG2:
416 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
417 break;
418 case KVM_INST_MTSPR_SPRG3:
419 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
420 break;
421 case KVM_INST_MTSPR_SRR0:
422 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
423 break;
424 case KVM_INST_MTSPR_SRR1:
425 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
426 break;
427 case KVM_INST_MTSPR_DAR:
428 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
429 break;
430 case KVM_INST_MTSPR_DSISR:
431 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
432 break;
433
434 /* Nops */
435 case KVM_INST_TLBSYNC:
436 kvm_patch_ins_nop(inst);
437 break;
438
439 /* Rewrites */
440 case KVM_INST_MTMSRD_L1:
441 kvm_patch_ins_mtmsrd(inst, inst_rt);
442 break;
443 case KVM_INST_MTMSR:
444 case KVM_INST_MTMSRD_L0:
445 kvm_patch_ins_mtmsr(inst, inst_rt);
446 break;
447 }
448
449 switch (inst_no_rt & ~KVM_MASK_RB) {
450#ifdef CONFIG_PPC_BOOK3S_32
451 case KVM_INST_MTSRIN:
452 if (features & KVM_MAGIC_FEAT_SR) {
453 u32 inst_rb = _inst & KVM_MASK_RB;
454 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
455 }
456 break;
457 break;
458#endif
459 }
460
461 switch (_inst) {
462#ifdef CONFIG_BOOKE
463 case KVM_INST_WRTEEI_0:
464 case KVM_INST_WRTEEI_1:
465 kvm_patch_ins_wrteei(inst);
466 break;
467#endif
468 }
469}
470
471static void kvm_use_magic_page(void)
472{
473 u32 *p;
474 u32 *start, *end;
475 u32 tmp;
476 u32 features;
477
478 /* Tell the host to map the magic page to -4096 on all CPUs */
479 on_each_cpu(kvm_map_magic_page, &features, 1);
480
481 /* Quick self-test to see if the mapping works */
482 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
483 kvm_patching_worked = false;
484 return;
485 }
486
487 /* Now loop through all code and find instructions */
488 start = (void*)_stext;
489 end = (void*)_etext;
490
491 for (p = start; p < end; p++)
492 kvm_check_ins(p, features);
493
494 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
495 kvm_patching_worked ? "worked" : "failed");
496}
497
498unsigned long kvm_hypercall(unsigned long *in,
499 unsigned long *out,
500 unsigned long nr)
501{
502 unsigned long register r0 asm("r0");
503 unsigned long register r3 asm("r3") = in[0];
504 unsigned long register r4 asm("r4") = in[1];
505 unsigned long register r5 asm("r5") = in[2];
506 unsigned long register r6 asm("r6") = in[3];
507 unsigned long register r7 asm("r7") = in[4];
508 unsigned long register r8 asm("r8") = in[5];
509 unsigned long register r9 asm("r9") = in[6];
510 unsigned long register r10 asm("r10") = in[7];
511 unsigned long register r11 asm("r11") = nr;
512 unsigned long register r12 asm("r12");
513
514 asm volatile("bl kvm_hypercall_start"
515 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
516 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
517 "=r"(r12)
518 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
519 "r"(r9), "r"(r10), "r"(r11)
520 : "memory", "cc", "xer", "ctr", "lr");
521
522 out[0] = r4;
523 out[1] = r5;
524 out[2] = r6;
525 out[3] = r7;
526 out[4] = r8;
527 out[5] = r9;
528 out[6] = r10;
529 out[7] = r11;
530
531 return r3;
532}
533EXPORT_SYMBOL_GPL(kvm_hypercall);
534
535static int kvm_para_setup(void)
536{
537 extern u32 kvm_hypercall_start;
538 struct device_node *hyper_node;
539 u32 *insts;
540 int len, i;
541
542 hyper_node = of_find_node_by_path("/hypervisor");
543 if (!hyper_node)
544 return -1;
545
546 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
547 if (len % 4)
548 return -1;
549 if (len > (4 * 4))
550 return -1;
551
552 for (i = 0; i < (len / 4); i++)
553 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
554
555 return 0;
556}
557
558static __init void kvm_free_tmp(void)
559{
560 unsigned long start, end;
561
562 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
563 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
564
565 /* Free the tmp space we don't need */
566 for (; start < end; start += PAGE_SIZE) {
567 ClearPageReserved(virt_to_page(start));
568 init_page_count(virt_to_page(start));
569 free_page(start);
570 totalram_pages++;
571 }
572}
573
574static int __init kvm_guest_init(void)
575{
576 if (!kvm_para_available())
577 goto free_tmp;
578
579 if (kvm_para_setup())
580 goto free_tmp;
581
582 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
583 kvm_use_magic_page();
584
585#ifdef CONFIG_PPC_BOOK3S_64
586 /* Enable napping */
587 powersave_nap = 1;
588#endif
589
590free_tmp:
591 kvm_free_tmp();
592
593 return 0;
594}
595
596postcore_initcall(kvm_guest_init);
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
new file mode 100644
index 000000000000..f2b1b2523e61
--- /dev/null
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -0,0 +1,302 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25
26/* Hypercall entry point. Will be patched with device tree instructions. */
27
28.global kvm_hypercall_start
29kvm_hypercall_start:
30 li r3, -1
31 nop
32 nop
33 nop
34 blr
35
36#define KVM_MAGIC_PAGE (-4096)
37
38#ifdef CONFIG_64BIT
39#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
40#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
41#else
42#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
43#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
44#endif
45
46#define SCRATCH_SAVE \
47 /* Enable critical section. We are critical if \
48 shared->critical == r1 */ \
49 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
50 \
51 /* Save state */ \
52 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
53 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
54 mfcr r31; \
55 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
56
57#define SCRATCH_RESTORE \
58 /* Restore state */ \
59 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
60 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
61 mtcr r30; \
62 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
63 \
64 /* Disable critical section. We are critical if \
65 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
67
68.global kvm_emulate_mtmsrd
69kvm_emulate_mtmsrd:
70
71 SCRATCH_SAVE
72
73 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75 lis r30, (~(MSR_EE | MSR_RI))@h
76 ori r30, r30, (~(MSR_EE | MSR_RI))@l
77 and r31, r31, r30
78
79 /* OR the register's (MSR_EE|MSR_RI) on MSR */
80kvm_emulate_mtmsrd_reg:
81 ori r30, r0, 0
82 andi. r30, r30, (MSR_EE|MSR_RI)
83 or r31, r31, r30
84
85 /* Put MSR back into magic page */
86 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
87
88 /* Check if we have to fetch an interrupt */
89 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
90 cmpwi r31, 0
91 beq+ no_check
92
93 /* Check if we may trigger an interrupt */
94 andi. r30, r30, MSR_EE
95 beq no_check
96
97 SCRATCH_RESTORE
98
99 /* Nag hypervisor */
100kvm_emulate_mtmsrd_orig_ins:
101 tlbsync
102
103 b kvm_emulate_mtmsrd_branch
104
105no_check:
106
107 SCRATCH_RESTORE
108
109 /* Go back to caller */
110kvm_emulate_mtmsrd_branch:
111 b .
112kvm_emulate_mtmsrd_end:
113
114.global kvm_emulate_mtmsrd_branch_offs
115kvm_emulate_mtmsrd_branch_offs:
116 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
117
118.global kvm_emulate_mtmsrd_reg_offs
119kvm_emulate_mtmsrd_reg_offs:
120 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
121
122.global kvm_emulate_mtmsrd_orig_ins_offs
123kvm_emulate_mtmsrd_orig_ins_offs:
124 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
125
126.global kvm_emulate_mtmsrd_len
127kvm_emulate_mtmsrd_len:
128 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
129
130
131#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
132#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
133
134.global kvm_emulate_mtmsr
135kvm_emulate_mtmsr:
136
137 SCRATCH_SAVE
138
139 /* Fetch old MSR in r31 */
140 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
141
142 /* Find the changed bits between old and new MSR */
143kvm_emulate_mtmsr_reg1:
144 ori r30, r0, 0
145 xor r31, r30, r31
146
147 /* Check if we need to really do mtmsr */
148 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
149 and. r31, r31, r30
150
151 /* No critical bits changed? Maybe we can stay in the guest. */
152 beq maybe_stay_in_guest
153
154do_mtmsr:
155
156 SCRATCH_RESTORE
157
158 /* Just fire off the mtmsr if it's critical */
159kvm_emulate_mtmsr_orig_ins:
160 mtmsr r0
161
162 b kvm_emulate_mtmsr_branch
163
164maybe_stay_in_guest:
165
166 /* Get the target register in r30 */
167kvm_emulate_mtmsr_reg2:
168 ori r30, r0, 0
169
170 /* Check if we have to fetch an interrupt */
171 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
172 cmpwi r31, 0
173 beq+ no_mtmsr
174
175 /* Check if we may trigger an interrupt */
176 andi. r31, r30, MSR_EE
177 beq no_mtmsr
178
179 b do_mtmsr
180
181no_mtmsr:
182
183 /* Put MSR into magic page because we don't call mtmsr */
184 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
185
186 SCRATCH_RESTORE
187
188 /* Go back to caller */
189kvm_emulate_mtmsr_branch:
190 b .
191kvm_emulate_mtmsr_end:
192
193.global kvm_emulate_mtmsr_branch_offs
194kvm_emulate_mtmsr_branch_offs:
195 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
196
197.global kvm_emulate_mtmsr_reg1_offs
198kvm_emulate_mtmsr_reg1_offs:
199 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
200
201.global kvm_emulate_mtmsr_reg2_offs
202kvm_emulate_mtmsr_reg2_offs:
203 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
204
205.global kvm_emulate_mtmsr_orig_ins_offs
206kvm_emulate_mtmsr_orig_ins_offs:
207 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
208
209.global kvm_emulate_mtmsr_len
210kvm_emulate_mtmsr_len:
211 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
212
213
214
215.global kvm_emulate_wrteei
216kvm_emulate_wrteei:
217
218 SCRATCH_SAVE
219
220 /* Fetch old MSR in r31 */
221 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
222
223 /* Remove MSR_EE from old MSR */
224 li r30, 0
225 ori r30, r30, MSR_EE
226 andc r31, r31, r30
227
228 /* OR new MSR_EE onto the old MSR */
229kvm_emulate_wrteei_ee:
230 ori r31, r31, 0
231
232 /* Write new MSR value back */
233 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
234
235 SCRATCH_RESTORE
236
237 /* Go back to caller */
238kvm_emulate_wrteei_branch:
239 b .
240kvm_emulate_wrteei_end:
241
242.global kvm_emulate_wrteei_branch_offs
243kvm_emulate_wrteei_branch_offs:
244 .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
245
246.global kvm_emulate_wrteei_ee_offs
247kvm_emulate_wrteei_ee_offs:
248 .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
249
250.global kvm_emulate_wrteei_len
251kvm_emulate_wrteei_len:
252 .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
253
254
255.global kvm_emulate_mtsrin
256kvm_emulate_mtsrin:
257
258 SCRATCH_SAVE
259
260 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
261 andi. r31, r31, MSR_DR | MSR_IR
262 beq kvm_emulate_mtsrin_reg1
263
264 SCRATCH_RESTORE
265
266kvm_emulate_mtsrin_orig_ins:
267 nop
268 b kvm_emulate_mtsrin_branch
269
270kvm_emulate_mtsrin_reg1:
271 /* rX >> 26 */
272 rlwinm r30,r0,6,26,29
273
274kvm_emulate_mtsrin_reg2:
275 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
276
277 SCRATCH_RESTORE
278
279 /* Go back to caller */
280kvm_emulate_mtsrin_branch:
281 b .
282kvm_emulate_mtsrin_end:
283
284.global kvm_emulate_mtsrin_branch_offs
285kvm_emulate_mtsrin_branch_offs:
286 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
287
288.global kvm_emulate_mtsrin_reg1_offs
289kvm_emulate_mtsrin_reg1_offs:
290 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
291
292.global kvm_emulate_mtsrin_reg2_offs
293kvm_emulate_mtsrin_reg2_offs:
294 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
295
296.global kvm_emulate_mtsrin_orig_ins_offs
297kvm_emulate_mtsrin_orig_ins_offs:
298 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
299
300.global kvm_emulate_mtsrin_len
301kvm_emulate_mtsrin_len:
302 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 73c0a3f64ed1..74d0e7421143 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -43,7 +43,7 @@ int kvmppc_core_check_processor_compat(void)
43{ 43{
44 int r; 44 int r;
45 45
46 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) 46 if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0)
47 r = 0; 47 r = 0;
48 else 48 else
49 r = -ENOTSUPP; 49 r = -ENOTSUPP;
@@ -72,6 +72,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
72 /* Since the guest can directly access the timebase, it must know the 72 /* Since the guest can directly access the timebase, it must know the
73 * real timebase frequency. Accordingly, it must see the state of 73 * real timebase frequency. Accordingly, it must see the state of
74 * CCR1[TCS]. */ 74 * CCR1[TCS]. */
75 /* XXX CCR1 doesn't exist on all 440 SoCs. */
75 vcpu->arch.ccr1 = mfspr(SPRN_CCR1); 76 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
76 77
77 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) 78 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
@@ -123,8 +124,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
123 if (err) 124 if (err)
124 goto free_vcpu; 125 goto free_vcpu;
125 126
127 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
128 if (!vcpu->arch.shared)
129 goto uninit_vcpu;
130
126 return vcpu; 131 return vcpu;
127 132
133uninit_vcpu:
134 kvm_vcpu_uninit(vcpu);
128free_vcpu: 135free_vcpu:
129 kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 136 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
130out: 137out:
@@ -135,6 +142,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
135{ 142{
136 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 143 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
137 144
145 free_page((unsigned long)vcpu->arch.shared);
138 kvm_vcpu_uninit(vcpu); 146 kvm_vcpu_uninit(vcpu);
139 kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 147 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
140} 148}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 9b9b5cdea840..5f3cff83e089 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -47,6 +47,7 @@
47#ifdef DEBUG 47#ifdef DEBUG
48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
49{ 49{
50 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
50 struct kvmppc_44x_tlbe *tlbe; 51 struct kvmppc_44x_tlbe *tlbe;
51 int i; 52 int i;
52 53
@@ -221,14 +222,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
221 222
222int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 223int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
223{ 224{
224 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 225 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
225 226
226 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 227 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
227} 228}
228 229
229int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 230int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
230{ 231{
231 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 232 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
232 233
233 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 234 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
234} 235}
@@ -354,7 +355,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
354 355
355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 356 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, 357 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
357 vcpu->arch.msr & MSR_PR); 358 vcpu->arch.shared->msr & MSR_PR);
358 stlbe.tid = !(asid & 0xff); 359 stlbe.tid = !(asid & 0xff);
359 360
360 /* Keep track of the reference so we can properly release it later. */ 361 /* Keep track of the reference so we can properly release it later. */
@@ -423,7 +424,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
423 424
424 /* Does it match current guest AS? */ 425 /* Does it match current guest AS? */
425 /* XXX what about IS != DS? */ 426 /* XXX what about IS != DS? */
426 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 427 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
427 return 0; 428 return 0;
428 429
429 gpa = get_tlb_raddr(tlbe); 430 gpa = get_tlb_raddr(tlbe);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a3cef30d1d42..e316847c08c0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,6 +17,7 @@
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "trace.h"
20 21
21#include <asm/reg.h> 22#include <asm/reg.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
@@ -35,7 +36,6 @@
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 36#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 37
37/* #define EXIT_DEBUG */ 38/* #define EXIT_DEBUG */
38/* #define EXIT_DEBUG_SIMPLE */
39/* #define DEBUG_EXT */ 39/* #define DEBUG_EXT */
40 40
41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
@@ -105,65 +105,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
105 kvmppc_giveup_ext(vcpu, MSR_VSX); 105 kvmppc_giveup_ext(vcpu, MSR_VSX);
106} 106}
107 107
108#if defined(EXIT_DEBUG)
109static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
110{
111 u64 jd = mftb() - vcpu->arch.dec_jiffies;
112 return vcpu->arch.dec - jd;
113}
114#endif
115
116static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 108static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
117{ 109{
118 vcpu->arch.shadow_msr = vcpu->arch.msr; 110 ulong smsr = vcpu->arch.shared->msr;
111
119 /* Guest MSR values */ 112 /* Guest MSR values */
120 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | 113 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
121 MSR_BE | MSR_DE;
122 /* Process MSR values */ 114 /* Process MSR values */
123 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | 115 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
124 MSR_EE;
125 /* External providers the guest reserved */ 116 /* External providers the guest reserved */
126 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); 117 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
127 /* 64-bit Process MSR values */ 118 /* 64-bit Process MSR values */
128#ifdef CONFIG_PPC_BOOK3S_64 119#ifdef CONFIG_PPC_BOOK3S_64
129 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; 120 smsr |= MSR_ISF | MSR_HV;
130#endif 121#endif
122 vcpu->arch.shadow_msr = smsr;
131} 123}
132 124
133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 125void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
134{ 126{
135 ulong old_msr = vcpu->arch.msr; 127 ulong old_msr = vcpu->arch.shared->msr;
136 128
137#ifdef EXIT_DEBUG 129#ifdef EXIT_DEBUG
138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 130 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
139#endif 131#endif
140 132
141 msr &= to_book3s(vcpu)->msr_mask; 133 msr &= to_book3s(vcpu)->msr_mask;
142 vcpu->arch.msr = msr; 134 vcpu->arch.shared->msr = msr;
143 kvmppc_recalc_shadow_msr(vcpu); 135 kvmppc_recalc_shadow_msr(vcpu);
144 136
145 if (msr & (MSR_WE|MSR_POW)) { 137 if (msr & MSR_POW) {
146 if (!vcpu->arch.pending_exceptions) { 138 if (!vcpu->arch.pending_exceptions) {
147 kvm_vcpu_block(vcpu); 139 kvm_vcpu_block(vcpu);
148 vcpu->stat.halt_wakeup++; 140 vcpu->stat.halt_wakeup++;
141
142 /* Unset POW bit after we woke up */
143 msr &= ~MSR_POW;
144 vcpu->arch.shared->msr = msr;
149 } 145 }
150 } 146 }
151 147
152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != 148 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 149 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
154 kvmppc_mmu_flush_segments(vcpu); 150 kvmppc_mmu_flush_segments(vcpu);
155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 151 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
152
153 /* Preload magic page segment when in kernel mode */
154 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
155 struct kvm_vcpu_arch *a = &vcpu->arch;
156
157 if (msr & MSR_DR)
158 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
159 else
160 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
161 }
156 } 162 }
157 163
158 /* Preload FPU if it's enabled */ 164 /* Preload FPU if it's enabled */
159 if (vcpu->arch.msr & MSR_FP) 165 if (vcpu->arch.shared->msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 166 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
161} 167}
162 168
163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 169void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
164{ 170{
165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu); 171 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
166 vcpu->arch.srr1 = vcpu->arch.msr | flags; 172 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 173 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
168 vcpu->arch.mmu.reset_msr(vcpu); 174 vcpu->arch.mmu.reset_msr(vcpu);
169} 175}
@@ -180,6 +186,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
180 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 186 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
181 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 187 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
182 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 188 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
189 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
183 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 190 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
184 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 191 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
185 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 192 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
@@ -199,6 +206,9 @@ static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
199{ 206{
200 clear_bit(kvmppc_book3s_vec2irqprio(vec), 207 clear_bit(kvmppc_book3s_vec2irqprio(vec),
201 &vcpu->arch.pending_exceptions); 208 &vcpu->arch.pending_exceptions);
209
210 if (!vcpu->arch.pending_exceptions)
211 vcpu->arch.shared->int_pending = 0;
202} 212}
203 213
204void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 214void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -237,13 +247,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
237void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 247void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
238 struct kvm_interrupt *irq) 248 struct kvm_interrupt *irq)
239{ 249{
240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 250 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
251
252 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
253 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
254
255 kvmppc_book3s_queue_irqprio(vcpu, vec);
241} 256}
242 257
243void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 258void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
244 struct kvm_interrupt *irq) 259 struct kvm_interrupt *irq)
245{ 260{
246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 261 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
262 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
247} 263}
248 264
249int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 265int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
@@ -251,14 +267,29 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
251 int deliver = 1; 267 int deliver = 1;
252 int vec = 0; 268 int vec = 0;
253 ulong flags = 0ULL; 269 ulong flags = 0ULL;
270 ulong crit_raw = vcpu->arch.shared->critical;
271 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
272 bool crit;
273
274 /* Truncate crit indicators in 32 bit mode */
275 if (!(vcpu->arch.shared->msr & MSR_SF)) {
276 crit_raw &= 0xffffffff;
277 crit_r1 &= 0xffffffff;
278 }
279
280 /* Critical section when crit == r1 */
281 crit = (crit_raw == crit_r1);
282 /* ... and we're in supervisor mode */
283 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
254 284
255 switch (priority) { 285 switch (priority) {
256 case BOOK3S_IRQPRIO_DECREMENTER: 286 case BOOK3S_IRQPRIO_DECREMENTER:
257 deliver = vcpu->arch.msr & MSR_EE; 287 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
258 vec = BOOK3S_INTERRUPT_DECREMENTER; 288 vec = BOOK3S_INTERRUPT_DECREMENTER;
259 break; 289 break;
260 case BOOK3S_IRQPRIO_EXTERNAL: 290 case BOOK3S_IRQPRIO_EXTERNAL:
261 deliver = vcpu->arch.msr & MSR_EE; 291 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
292 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
262 vec = BOOK3S_INTERRUPT_EXTERNAL; 293 vec = BOOK3S_INTERRUPT_EXTERNAL;
263 break; 294 break;
264 case BOOK3S_IRQPRIO_SYSTEM_RESET: 295 case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -320,9 +351,27 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
320 return deliver; 351 return deliver;
321} 352}
322 353
354/*
355 * This function determines if an irqprio should be cleared once issued.
356 */
357static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
358{
359 switch (priority) {
360 case BOOK3S_IRQPRIO_DECREMENTER:
361 /* DEC interrupts get cleared by mtdec */
362 return false;
363 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
364 /* External interrupts get cleared by userspace */
365 return false;
366 }
367
368 return true;
369}
370
323void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 371void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
324{ 372{
325 unsigned long *pending = &vcpu->arch.pending_exceptions; 373 unsigned long *pending = &vcpu->arch.pending_exceptions;
374 unsigned long old_pending = vcpu->arch.pending_exceptions;
326 unsigned int priority; 375 unsigned int priority;
327 376
328#ifdef EXIT_DEBUG 377#ifdef EXIT_DEBUG
@@ -332,8 +381,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
332 priority = __ffs(*pending); 381 priority = __ffs(*pending);
333 while (priority < BOOK3S_IRQPRIO_MAX) { 382 while (priority < BOOK3S_IRQPRIO_MAX) {
334 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 383 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
335 (priority != BOOK3S_IRQPRIO_DECREMENTER)) { 384 clear_irqprio(vcpu, priority)) {
336 /* DEC interrupts get cleared by mtdec */
337 clear_bit(priority, &vcpu->arch.pending_exceptions); 385 clear_bit(priority, &vcpu->arch.pending_exceptions);
338 break; 386 break;
339 } 387 }
@@ -342,6 +390,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
342 BITS_PER_BYTE * sizeof(*pending), 390 BITS_PER_BYTE * sizeof(*pending),
343 priority + 1); 391 priority + 1);
344 } 392 }
393
394 /* Tell the guest about our interrupt status */
395 if (*pending)
396 vcpu->arch.shared->int_pending = 1;
397 else if (old_pending)
398 vcpu->arch.shared->int_pending = 0;
345} 399}
346 400
347void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 401void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
@@ -398,6 +452,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
398 } 452 }
399} 453}
400 454
455pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
456{
457 ulong mp_pa = vcpu->arch.magic_page_pa;
458
459 /* Magic page override */
460 if (unlikely(mp_pa) &&
461 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
462 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
463 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
464 pfn_t pfn;
465
466 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
467 get_page(pfn_to_page(pfn));
468 return pfn;
469 }
470
471 return gfn_to_pfn(vcpu->kvm, gfn);
472}
473
401/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 474/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
402 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 475 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
403 * emulate 32 bytes dcbz length. 476 * emulate 32 bytes dcbz length.
@@ -415,8 +488,10 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
415 int i; 488 int i;
416 489
417 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 490 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
418 if (is_error_page(hpage)) 491 if (is_error_page(hpage)) {
492 kvm_release_page_clean(hpage);
419 return; 493 return;
494 }
420 495
421 hpage_offset = pte->raddr & ~PAGE_MASK; 496 hpage_offset = pte->raddr & ~PAGE_MASK;
422 hpage_offset &= ~0xFFFULL; 497 hpage_offset &= ~0xFFFULL;
@@ -437,14 +512,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 512static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
438 struct kvmppc_pte *pte) 513 struct kvmppc_pte *pte)
439{ 514{
440 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); 515 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
441 int r; 516 int r;
442 517
443 if (relocated) { 518 if (relocated) {
444 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 519 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
445 } else { 520 } else {
446 pte->eaddr = eaddr; 521 pte->eaddr = eaddr;
447 pte->raddr = eaddr & 0xffffffff; 522 pte->raddr = eaddr & KVM_PAM;
448 pte->vpage = VSID_REAL | eaddr >> 12; 523 pte->vpage = VSID_REAL | eaddr >> 12;
449 pte->may_read = true; 524 pte->may_read = true;
450 pte->may_write = true; 525 pte->may_write = true;
@@ -533,6 +608,13 @@ mmio:
533 608
534static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 609static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
535{ 610{
611 ulong mp_pa = vcpu->arch.magic_page_pa;
612
613 if (unlikely(mp_pa) &&
614 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
615 return 1;
616 }
617
536 return kvm_is_visible_gfn(vcpu->kvm, gfn); 618 return kvm_is_visible_gfn(vcpu->kvm, gfn);
537} 619}
538 620
@@ -545,8 +627,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
545 int page_found = 0; 627 int page_found = 0;
546 struct kvmppc_pte pte; 628 struct kvmppc_pte pte;
547 bool is_mmio = false; 629 bool is_mmio = false;
548 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 630 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
549 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 631 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
550 u64 vsid; 632 u64 vsid;
551 633
552 relocated = data ? dr : ir; 634 relocated = data ? dr : ir;
@@ -558,12 +640,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
558 pte.may_execute = true; 640 pte.may_execute = true;
559 pte.may_read = true; 641 pte.may_read = true;
560 pte.may_write = true; 642 pte.may_write = true;
561 pte.raddr = eaddr & 0xffffffff; 643 pte.raddr = eaddr & KVM_PAM;
562 pte.eaddr = eaddr; 644 pte.eaddr = eaddr;
563 pte.vpage = eaddr >> 12; 645 pte.vpage = eaddr >> 12;
564 } 646 }
565 647
566 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 648 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
567 case 0: 649 case 0:
568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 650 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
569 break; 651 break;
@@ -571,7 +653,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
571 case MSR_IR: 653 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 654 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573 655
574 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) 656 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 657 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else 658 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 659 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -594,20 +676,23 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
594 676
595 if (page_found == -ENOENT) { 677 if (page_found == -ENOENT) {
596 /* Page not found in guest PTE entries */ 678 /* Page not found in guest PTE entries */
597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 679 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 680 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
599 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 681 vcpu->arch.shared->msr |=
682 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
600 kvmppc_book3s_queue_irqprio(vcpu, vec); 683 kvmppc_book3s_queue_irqprio(vcpu, vec);
601 } else if (page_found == -EPERM) { 684 } else if (page_found == -EPERM) {
602 /* Storage protection */ 685 /* Storage protection */
603 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 686 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
604 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; 687 vcpu->arch.shared->dsisr =
605 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 688 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
606 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 689 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
690 vcpu->arch.shared->msr |=
691 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
607 kvmppc_book3s_queue_irqprio(vcpu, vec); 692 kvmppc_book3s_queue_irqprio(vcpu, vec);
608 } else if (page_found == -EINVAL) { 693 } else if (page_found == -EINVAL) {
609 /* Page not found in guest SLB */ 694 /* Page not found in guest SLB */
610 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 695 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
611 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 696 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
612 } else if (!is_mmio && 697 } else if (!is_mmio &&
613 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 698 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -695,9 +780,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
695 780
696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 781 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
697 if (ret == -ENOENT) { 782 if (ret == -ENOENT) {
698 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); 783 ulong msr = vcpu->arch.shared->msr;
699 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); 784
700 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 785 msr = kvmppc_set_field(msr, 33, 33, 1);
786 msr = kvmppc_set_field(msr, 34, 36, 0);
787 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
701 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 788 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
702 return EMULATE_AGAIN; 789 return EMULATE_AGAIN;
703 } 790 }
@@ -736,7 +823,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 823 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
737 return RESUME_GUEST; 824 return RESUME_GUEST;
738 825
739 if (!(vcpu->arch.msr & msr)) { 826 if (!(vcpu->arch.shared->msr & msr)) {
740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 827 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
741 return RESUME_GUEST; 828 return RESUME_GUEST;
742 } 829 }
@@ -796,16 +883,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
796 883
797 run->exit_reason = KVM_EXIT_UNKNOWN; 884 run->exit_reason = KVM_EXIT_UNKNOWN;
798 run->ready_for_interrupt_injection = 1; 885 run->ready_for_interrupt_injection = 1;
799#ifdef EXIT_DEBUG 886
800 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", 887 trace_kvm_book3s_exit(exit_nr, vcpu);
801 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
802 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
803#elif defined (EXIT_DEBUG_SIMPLE)
804 if ((exit_nr != 0x900) && (exit_nr != 0x500))
805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
807 vcpu->arch.msr);
808#endif
809 kvm_resched(vcpu); 888 kvm_resched(vcpu);
810 switch (exit_nr) { 889 switch (exit_nr) {
811 case BOOK3S_INTERRUPT_INST_STORAGE: 890 case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -836,9 +915,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 915 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
837 r = RESUME_GUEST; 916 r = RESUME_GUEST;
838 } else { 917 } else {
839 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 918 vcpu->arch.shared->msr |=
919 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
840 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 920 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
841 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
842 r = RESUME_GUEST; 921 r = RESUME_GUEST;
843 } 922 }
844 break; 923 break;
@@ -861,17 +940,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
861 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 940 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
862 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 941 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
863 } else { 942 } else {
864 vcpu->arch.dear = dar; 943 vcpu->arch.shared->dar = dar;
865 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 944 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
866 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 945 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
867 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
868 r = RESUME_GUEST; 946 r = RESUME_GUEST;
869 } 947 }
870 break; 948 break;
871 } 949 }
872 case BOOK3S_INTERRUPT_DATA_SEGMENT: 950 case BOOK3S_INTERRUPT_DATA_SEGMENT:
873 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 951 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
874 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 952 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
875 kvmppc_book3s_queue_irqprio(vcpu, 953 kvmppc_book3s_queue_irqprio(vcpu,
876 BOOK3S_INTERRUPT_DATA_SEGMENT); 954 BOOK3S_INTERRUPT_DATA_SEGMENT);
877 } 955 }
@@ -904,7 +982,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
904program_interrupt: 982program_interrupt:
905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 983 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
906 984
907 if (vcpu->arch.msr & MSR_PR) { 985 if (vcpu->arch.shared->msr & MSR_PR) {
908#ifdef EXIT_DEBUG 986#ifdef EXIT_DEBUG
909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 987 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
910#endif 988#endif
@@ -941,10 +1019,10 @@ program_interrupt:
941 break; 1019 break;
942 } 1020 }
943 case BOOK3S_INTERRUPT_SYSCALL: 1021 case BOOK3S_INTERRUPT_SYSCALL:
944 // XXX make user settable
945 if (vcpu->arch.osi_enabled && 1022 if (vcpu->arch.osi_enabled &&
946 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && 1023 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
947 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { 1024 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1025 /* MOL hypercalls */
948 u64 *gprs = run->osi.gprs; 1026 u64 *gprs = run->osi.gprs;
949 int i; 1027 int i;
950 1028
@@ -953,8 +1031,13 @@ program_interrupt:
953 gprs[i] = kvmppc_get_gpr(vcpu, i); 1031 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1; 1032 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV; 1033 r = RESUME_HOST_NV;
956 1034 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
1035 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1036 /* KVM PV hypercalls */
1037 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1038 r = RESUME_GUEST;
957 } else { 1039 } else {
1040 /* Guest syscalls */
958 vcpu->stat.syscall_exits++; 1041 vcpu->stat.syscall_exits++;
959 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1042 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
960 r = RESUME_GUEST; 1043 r = RESUME_GUEST;
@@ -989,9 +1072,9 @@ program_interrupt:
989 } 1072 }
990 case BOOK3S_INTERRUPT_ALIGNMENT: 1073 case BOOK3S_INTERRUPT_ALIGNMENT:
991 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1074 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
992 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, 1075 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
993 kvmppc_get_last_inst(vcpu)); 1076 kvmppc_get_last_inst(vcpu));
994 vcpu->arch.dear = kvmppc_alignment_dar(vcpu, 1077 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
995 kvmppc_get_last_inst(vcpu)); 1078 kvmppc_get_last_inst(vcpu));
996 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1079 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
997 } 1080 }
@@ -1031,9 +1114,7 @@ program_interrupt:
1031 } 1114 }
1032 } 1115 }
1033 1116
1034#ifdef EXIT_DEBUG 1117 trace_kvm_book3s_reenter(r, vcpu);
1035 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
1036#endif
1037 1118
1038 return r; 1119 return r;
1039} 1120}
@@ -1052,14 +1133,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1052 regs->ctr = kvmppc_get_ctr(vcpu); 1133 regs->ctr = kvmppc_get_ctr(vcpu);
1053 regs->lr = kvmppc_get_lr(vcpu); 1134 regs->lr = kvmppc_get_lr(vcpu);
1054 regs->xer = kvmppc_get_xer(vcpu); 1135 regs->xer = kvmppc_get_xer(vcpu);
1055 regs->msr = vcpu->arch.msr; 1136 regs->msr = vcpu->arch.shared->msr;
1056 regs->srr0 = vcpu->arch.srr0; 1137 regs->srr0 = vcpu->arch.shared->srr0;
1057 regs->srr1 = vcpu->arch.srr1; 1138 regs->srr1 = vcpu->arch.shared->srr1;
1058 regs->pid = vcpu->arch.pid; 1139 regs->pid = vcpu->arch.pid;
1059 regs->sprg0 = vcpu->arch.sprg0; 1140 regs->sprg0 = vcpu->arch.shared->sprg0;
1060 regs->sprg1 = vcpu->arch.sprg1; 1141 regs->sprg1 = vcpu->arch.shared->sprg1;
1061 regs->sprg2 = vcpu->arch.sprg2; 1142 regs->sprg2 = vcpu->arch.shared->sprg2;
1062 regs->sprg3 = vcpu->arch.sprg3; 1143 regs->sprg3 = vcpu->arch.shared->sprg3;
1063 regs->sprg5 = vcpu->arch.sprg4; 1144 regs->sprg5 = vcpu->arch.sprg4;
1064 regs->sprg6 = vcpu->arch.sprg5; 1145 regs->sprg6 = vcpu->arch.sprg5;
1065 regs->sprg7 = vcpu->arch.sprg6; 1146 regs->sprg7 = vcpu->arch.sprg6;
@@ -1080,12 +1161,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1080 kvmppc_set_lr(vcpu, regs->lr); 1161 kvmppc_set_lr(vcpu, regs->lr);
1081 kvmppc_set_xer(vcpu, regs->xer); 1162 kvmppc_set_xer(vcpu, regs->xer);
1082 kvmppc_set_msr(vcpu, regs->msr); 1163 kvmppc_set_msr(vcpu, regs->msr);
1083 vcpu->arch.srr0 = regs->srr0; 1164 vcpu->arch.shared->srr0 = regs->srr0;
1084 vcpu->arch.srr1 = regs->srr1; 1165 vcpu->arch.shared->srr1 = regs->srr1;
1085 vcpu->arch.sprg0 = regs->sprg0; 1166 vcpu->arch.shared->sprg0 = regs->sprg0;
1086 vcpu->arch.sprg1 = regs->sprg1; 1167 vcpu->arch.shared->sprg1 = regs->sprg1;
1087 vcpu->arch.sprg2 = regs->sprg2; 1168 vcpu->arch.shared->sprg2 = regs->sprg2;
1088 vcpu->arch.sprg3 = regs->sprg3; 1169 vcpu->arch.shared->sprg3 = regs->sprg3;
1089 vcpu->arch.sprg5 = regs->sprg4; 1170 vcpu->arch.sprg5 = regs->sprg4;
1090 vcpu->arch.sprg6 = regs->sprg5; 1171 vcpu->arch.sprg6 = regs->sprg5;
1091 vcpu->arch.sprg7 = regs->sprg6; 1172 vcpu->arch.sprg7 = regs->sprg6;
@@ -1111,10 +1192,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1111 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; 1192 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
1112 } 1193 }
1113 } else { 1194 } else {
1114 for (i = 0; i < 16; i++) { 1195 for (i = 0; i < 16; i++)
1115 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1196 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1116 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1197
1117 }
1118 for (i = 0; i < 8; i++) { 1198 for (i = 0; i < 8; i++) {
1119 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; 1199 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1120 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1200 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
@@ -1225,6 +1305,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1225 struct kvmppc_vcpu_book3s *vcpu_book3s; 1305 struct kvmppc_vcpu_book3s *vcpu_book3s;
1226 struct kvm_vcpu *vcpu; 1306 struct kvm_vcpu *vcpu;
1227 int err = -ENOMEM; 1307 int err = -ENOMEM;
1308 unsigned long p;
1228 1309
1229 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); 1310 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1230 if (!vcpu_book3s) 1311 if (!vcpu_book3s)
@@ -1242,6 +1323,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1242 if (err) 1323 if (err)
1243 goto free_shadow_vcpu; 1324 goto free_shadow_vcpu;
1244 1325
1326 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1327 /* the real shared page fills the last 4k of our page */
1328 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1329 if (!p)
1330 goto uninit_vcpu;
1331
1245 vcpu->arch.host_retip = kvm_return_point; 1332 vcpu->arch.host_retip = kvm_return_point;
1246 vcpu->arch.host_msr = mfmsr(); 1333 vcpu->arch.host_msr = mfmsr();
1247#ifdef CONFIG_PPC_BOOK3S_64 1334#ifdef CONFIG_PPC_BOOK3S_64
@@ -1268,10 +1355,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1268 1355
1269 err = kvmppc_mmu_init(vcpu); 1356 err = kvmppc_mmu_init(vcpu);
1270 if (err < 0) 1357 if (err < 0)
1271 goto free_shadow_vcpu; 1358 goto uninit_vcpu;
1272 1359
1273 return vcpu; 1360 return vcpu;
1274 1361
1362uninit_vcpu:
1363 kvm_vcpu_uninit(vcpu);
1275free_shadow_vcpu: 1364free_shadow_vcpu:
1276 kfree(vcpu_book3s->shadow_vcpu); 1365 kfree(vcpu_book3s->shadow_vcpu);
1277free_vcpu: 1366free_vcpu:
@@ -1284,6 +1373,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1284{ 1373{
1285 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1374 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1286 1375
1376 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1287 kvm_vcpu_uninit(vcpu); 1377 kvm_vcpu_uninit(vcpu);
1288 kfree(vcpu_book3s->shadow_vcpu); 1378 kfree(vcpu_book3s->shadow_vcpu);
1289 vfree(vcpu_book3s); 1379 vfree(vcpu_book3s);
@@ -1346,7 +1436,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1346 local_irq_enable(); 1436 local_irq_enable();
1347 1437
1348 /* Preload FPU if it's enabled */ 1438 /* Preload FPU if it's enabled */
1349 if (vcpu->arch.msr & MSR_FP) 1439 if (vcpu->arch.shared->msr & MSR_FP)
1350 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1440 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1351 1441
1352 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1442 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 3292d76101d2..c8cefdd15fd8 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -58,14 +58,39 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
58#endif 58#endif
59} 59}
60 60
61static inline u32 sr_vsid(u32 sr_raw)
62{
63 return sr_raw & 0x0fffffff;
64}
65
66static inline bool sr_valid(u32 sr_raw)
67{
68 return (sr_raw & 0x80000000) ? false : true;
69}
70
71static inline bool sr_ks(u32 sr_raw)
72{
73 return (sr_raw & 0x40000000) ? true: false;
74}
75
76static inline bool sr_kp(u32 sr_raw)
77{
78 return (sr_raw & 0x20000000) ? true: false;
79}
80
81static inline bool sr_nx(u32 sr_raw)
82{
83 return (sr_raw & 0x10000000) ? true: false;
84}
85
61static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
62 struct kvmppc_pte *pte, bool data); 87 struct kvmppc_pte *pte, bool data);
63static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 88static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
64 u64 *vsid); 89 u64 *vsid);
65 90
66static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) 91static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
67{ 92{
68 return &vcpu_book3s->sr[(eaddr >> 28) & 0xf]; 93 return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf];
69} 94}
70 95
71static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 96static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
@@ -87,7 +112,7 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
87} 112}
88 113
89static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, 114static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
90 struct kvmppc_sr *sre, gva_t eaddr, 115 u32 sre, gva_t eaddr,
91 bool primary) 116 bool primary)
92{ 117{
93 u32 page, hash, pteg, htabmask; 118 u32 page, hash, pteg, htabmask;
@@ -96,7 +121,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
96 page = (eaddr & 0x0FFFFFFF) >> 12; 121 page = (eaddr & 0x0FFFFFFF) >> 12;
97 htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; 122 htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;
98 123
99 hash = ((sre->vsid ^ page) << 6); 124 hash = ((sr_vsid(sre) ^ page) << 6);
100 if (!primary) 125 if (!primary)
101 hash = ~hash; 126 hash = ~hash;
102 hash &= htabmask; 127 hash &= htabmask;
@@ -104,8 +129,8 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
104 pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; 129 pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
105 130
106 dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", 131 dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
107 vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg, 132 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
108 sre->vsid); 133 sr_vsid(sre));
109 134
110 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); 135 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
111 if (kvm_is_error_hva(r)) 136 if (kvm_is_error_hva(r))
@@ -113,10 +138,9 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
113 return r | (pteg & ~PAGE_MASK); 138 return r | (pteg & ~PAGE_MASK);
114} 139}
115 140
116static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr, 141static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
117 bool primary)
118{ 142{
119 return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) | 143 return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) |
120 (primary ? 0 : 0x40) | 0x80000000; 144 (primary ? 0 : 0x40) | 0x80000000;
121} 145}
122 146
@@ -133,7 +157,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
133 else 157 else
134 bat = &vcpu_book3s->ibat[i]; 158 bat = &vcpu_book3s->ibat[i];
135 159
136 if (vcpu->arch.msr & MSR_PR) { 160 if (vcpu->arch.shared->msr & MSR_PR) {
137 if (!bat->vp) 161 if (!bat->vp)
138 continue; 162 continue;
139 } else { 163 } else {
@@ -180,17 +204,17 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
180 bool primary) 204 bool primary)
181{ 205{
182 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 206 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
183 struct kvmppc_sr *sre; 207 u32 sre;
184 hva_t ptegp; 208 hva_t ptegp;
185 u32 pteg[16]; 209 u32 pteg[16];
186 u32 ptem = 0; 210 u32 ptem = 0;
187 int i; 211 int i;
188 int found = 0; 212 int found = 0;
189 213
190 sre = find_sr(vcpu_book3s, eaddr); 214 sre = find_sr(vcpu, eaddr);
191 215
192 dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, 216 dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28,
193 sre->vsid, sre->raw); 217 sr_vsid(sre), sre);
194 218
195 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 219 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
196 220
@@ -214,8 +238,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); 238 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
215 pp = pteg[i+1] & 3; 239 pp = pteg[i+1] & 3;
216 240
217 if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || 241 if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) ||
218 (sre->Ks && !(vcpu->arch.msr & MSR_PR))) 242 (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR)))
219 pp |= 4; 243 pp |= 4;
220 244
221 pte->may_write = false; 245 pte->may_write = false;
@@ -269,7 +293,7 @@ no_page_found:
269 dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", 293 dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n",
270 to_book3s(vcpu)->sdr1, ptegp); 294 to_book3s(vcpu)->sdr1, ptegp);
271 for (i=0; i<16; i+=2) { 295 for (i=0; i<16; i+=2) {
272 dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n", 296 dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n",
273 i, pteg[i], pteg[i+1], ptem); 297 i, pteg[i], pteg[i+1], ptem);
274 } 298 }
275 } 299 }
@@ -281,8 +305,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
281 struct kvmppc_pte *pte, bool data) 305 struct kvmppc_pte *pte, bool data)
282{ 306{
283 int r; 307 int r;
308 ulong mp_ea = vcpu->arch.magic_page_ea;
284 309
285 pte->eaddr = eaddr; 310 pte->eaddr = eaddr;
311
312 /* Magic page override */
313 if (unlikely(mp_ea) &&
314 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
315 !(vcpu->arch.shared->msr & MSR_PR)) {
316 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
317 pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
318 pte->raddr &= KVM_PAM;
319 pte->may_execute = true;
320 pte->may_read = true;
321 pte->may_write = true;
322
323 return 0;
324 }
325
286 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 326 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
287 if (r < 0) 327 if (r < 0)
288 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); 328 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
@@ -295,30 +335,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
295 335
296static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) 336static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
297{ 337{
298 return to_book3s(vcpu)->sr[srnum].raw; 338 return vcpu->arch.shared->sr[srnum];
299} 339}
300 340
301static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, 341static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
302 ulong value) 342 ulong value)
303{ 343{
304 struct kvmppc_sr *sre; 344 vcpu->arch.shared->sr[srnum] = value;
305
306 sre = &to_book3s(vcpu)->sr[srnum];
307
308 /* Flush any left-over shadows from the previous SR */
309
310 /* XXX Not necessary? */
311 /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */
312
313 /* And then put in the new SR */
314 sre->raw = value;
315 sre->vsid = (value & 0x0fffffff);
316 sre->valid = (value & 0x80000000) ? false : true;
317 sre->Ks = (value & 0x40000000) ? true : false;
318 sre->Kp = (value & 0x20000000) ? true : false;
319 sre->nx = (value & 0x10000000) ? true : false;
320
321 /* Map the new segment */
322 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); 345 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
323} 346}
324 347
@@ -331,19 +354,19 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
331 u64 *vsid) 354 u64 *vsid)
332{ 355{
333 ulong ea = esid << SID_SHIFT; 356 ulong ea = esid << SID_SHIFT;
334 struct kvmppc_sr *sr; 357 u32 sr;
335 u64 gvsid = esid; 358 u64 gvsid = esid;
336 359
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 360 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea); 361 sr = find_sr(vcpu, ea);
339 if (sr->valid) 362 if (sr_valid(sr))
340 gvsid = sr->vsid; 363 gvsid = sr_vsid(sr);
341 } 364 }
342 365
343 /* In case we only have one of MSR_IR or MSR_DR set, let's put 366 /* In case we only have one of MSR_IR or MSR_DR set, let's put
344 that in the real-mode context (and hope RM doesn't access 367 that in the real-mode context (and hope RM doesn't access
345 high memory) */ 368 high memory) */
346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 369 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
347 case 0: 370 case 0:
348 *vsid = VSID_REAL | esid; 371 *vsid = VSID_REAL | esid;
349 break; 372 break;
@@ -354,8 +377,8 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
354 *vsid = VSID_REAL_DR | gvsid; 377 *vsid = VSID_REAL_DR | gvsid;
355 break; 378 break;
356 case MSR_DR|MSR_IR: 379 case MSR_DR|MSR_IR:
357 if (sr->valid) 380 if (sr_valid(sr))
358 *vsid = sr->vsid; 381 *vsid = sr_vsid(sr);
359 else 382 else
360 *vsid = VSID_BAT | gvsid; 383 *vsid = VSID_BAT | gvsid;
361 break; 384 break;
@@ -363,7 +386,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
363 BUG(); 386 BUG();
364 } 387 }
365 388
366 if (vcpu->arch.msr & MSR_PR) 389 if (vcpu->arch.shared->msr & MSR_PR)
367 *vsid |= VSID_PR; 390 *vsid |= VSID_PR;
368 391
369 return 0; 392 return 0;
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 0b51ef872c1e..9fecbfbce773 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -19,7 +19,6 @@
19 */ 19 */
20 20
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/hash.h>
23 22
24#include <asm/kvm_ppc.h> 23#include <asm/kvm_ppc.h>
25#include <asm/kvm_book3s.h> 24#include <asm/kvm_book3s.h>
@@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
77 * a hash, so we don't waste cycles on looping */ 76 * a hash, so we don't waste cycles on looping */
78static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) 77static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
79{ 78{
80 return hash_64(gvsid, SID_MAP_BITS); 79 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
80 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
81 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
82 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
83 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
84 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
85 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
86 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
81} 87}
82 88
83 89
@@ -86,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
86 struct kvmppc_sid_map *map; 92 struct kvmppc_sid_map *map;
87 u16 sid_map_mask; 93 u16 sid_map_mask;
88 94
89 if (vcpu->arch.msr & MSR_PR) 95 if (vcpu->arch.shared->msr & MSR_PR)
90 gvsid |= VSID_PR; 96 gvsid |= VSID_PR;
91 97
92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -147,8 +153,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
147 struct hpte_cache *pte; 153 struct hpte_cache *pte;
148 154
149 /* Get host physical address for gpa */ 155 /* Get host physical address for gpa */
150 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 156 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
151 if (kvm_is_error_hva(hpaddr)) { 157 if (is_error_pfn(hpaddr)) {
152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 158 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
153 orig_pte->eaddr); 159 orig_pte->eaddr);
154 return -EINVAL; 160 return -EINVAL;
@@ -253,7 +259,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
253 u16 sid_map_mask; 259 u16 sid_map_mask;
254 static int backwards_map = 0; 260 static int backwards_map = 0;
255 261
256 if (vcpu->arch.msr & MSR_PR) 262 if (vcpu->arch.shared->msr & MSR_PR)
257 gvsid |= VSID_PR; 263 gvsid |= VSID_PR;
258 264
259 /* We might get collisions that trap in preceding order, so let's 265 /* We might get collisions that trap in preceding order, so let's
@@ -269,18 +275,15 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
269 backwards_map = !backwards_map; 275 backwards_map = !backwards_map;
270 276
271 /* Uh-oh ... out of mappings. Let's flush! */ 277 /* Uh-oh ... out of mappings. Let's flush! */
272 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { 278 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
273 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 279 vcpu_book3s->vsid_next = 0;
274 memset(vcpu_book3s->sid_map, 0, 280 memset(vcpu_book3s->sid_map, 0,
275 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 281 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
276 kvmppc_mmu_pte_flush(vcpu, 0, 0); 282 kvmppc_mmu_pte_flush(vcpu, 0, 0);
277 kvmppc_mmu_flush_segments(vcpu); 283 kvmppc_mmu_flush_segments(vcpu);
278 } 284 }
279 map->host_vsid = vcpu_book3s->vsid_next; 285 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
280 286 vcpu_book3s->vsid_next++;
281 /* Would have to be 111 to be completely aligned with the rest of
282 Linux, but that is just way too little space! */
283 vcpu_book3s->vsid_next+=1;
284 287
285 map->guest_vsid = gvsid; 288 map->guest_vsid = gvsid;
286 map->valid = true; 289 map->valid = true;
@@ -327,40 +330,38 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
327 330
328void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 331void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
329{ 332{
333 int i;
334
330 kvmppc_mmu_hpte_destroy(vcpu); 335 kvmppc_mmu_hpte_destroy(vcpu);
331 preempt_disable(); 336 preempt_disable();
332 __destroy_context(to_book3s(vcpu)->context_id); 337 for (i = 0; i < SID_CONTEXTS; i++)
338 __destroy_context(to_book3s(vcpu)->context_id[i]);
333 preempt_enable(); 339 preempt_enable();
334} 340}
335 341
336/* From mm/mmu_context_hash32.c */ 342/* From mm/mmu_context_hash32.c */
337#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) 343#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
338 344
339int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 345int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
340{ 346{
341 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 347 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
342 int err; 348 int err;
343 ulong sdr1; 349 ulong sdr1;
350 int i;
351 int j;
344 352
345 err = __init_new_context(); 353 for (i = 0; i < SID_CONTEXTS; i++) {
346 if (err < 0) 354 err = __init_new_context();
347 return -1; 355 if (err < 0)
348 vcpu3s->context_id = err; 356 goto init_fail;
349 357 vcpu3s->context_id[i] = err;
350 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
351 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
352
353#if 0 /* XXX still doesn't guarantee uniqueness */
354 /* We could collide with the Linux vsid space because the vsid
355 * wraps around at 24 bits. We're safe if we do our own space
356 * though, so let's always set the highest bit. */
357 358
358 vcpu3s->vsid_max |= 0x00800000; 359 /* Remember context id for this combination */
359 vcpu3s->vsid_first |= 0x00800000; 360 for (j = 0; j < 16; j++)
360#endif 361 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
361 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); 362 }
362 363
363 vcpu3s->vsid_next = vcpu3s->vsid_first; 364 vcpu3s->vsid_next = 0;
364 365
365 /* Remember where the HTAB is */ 366 /* Remember where the HTAB is */
366 asm ( "mfsdr1 %0" : "=r"(sdr1) ); 367 asm ( "mfsdr1 %0" : "=r"(sdr1) );
@@ -370,4 +371,14 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
370 kvmppc_mmu_hpte_init(vcpu); 371 kvmppc_mmu_hpte_init(vcpu);
371 372
372 return 0; 373 return 0;
374
375init_fail:
376 for (j = 0; j < i; j++) {
377 if (!vcpu3s->context_id[j])
378 continue;
379
380 __destroy_context(to_book3s(vcpu)->context_id[j]);
381 }
382
383 return -1;
373} 384}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 4025ea26b3c1..d7889ef3211e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
163 bool found = false; 163 bool found = false;
164 bool perm_err = false; 164 bool perm_err = false;
165 int second = 0; 165 int second = 0;
166 ulong mp_ea = vcpu->arch.magic_page_ea;
167
168 /* Magic page override */
169 if (unlikely(mp_ea) &&
170 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
171 !(vcpu->arch.shared->msr & MSR_PR)) {
172 gpte->eaddr = eaddr;
173 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
174 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
175 gpte->raddr &= KVM_PAM;
176 gpte->may_execute = true;
177 gpte->may_read = true;
178 gpte->may_write = true;
179
180 return 0;
181 }
166 182
167 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
168 if (!slbe) 184 if (!slbe)
@@ -180,9 +196,9 @@ do_second:
180 goto no_page_found; 196 goto no_page_found;
181 } 197 }
182 198
183 if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) 199 if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
184 key = 4; 200 key = 4;
185 else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) 201 else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
186 key = 4; 202 key = 4;
187 203
188 for (i=0; i<16; i+=2) { 204 for (i=0; i<16; i+=2) {
@@ -381,7 +397,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
381 for (i = 1; i < vcpu_book3s->slb_nr; i++) 397 for (i = 1; i < vcpu_book3s->slb_nr; i++)
382 vcpu_book3s->slb[i].valid = false; 398 vcpu_book3s->slb[i].valid = false;
383 399
384 if (vcpu->arch.msr & MSR_IR) { 400 if (vcpu->arch.shared->msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu); 401 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 402 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
387 } 403 }
@@ -445,14 +461,15 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
445 ulong ea = esid << SID_SHIFT; 461 ulong ea = esid << SID_SHIFT;
446 struct kvmppc_slb *slb; 462 struct kvmppc_slb *slb;
447 u64 gvsid = esid; 463 u64 gvsid = esid;
464 ulong mp_ea = vcpu->arch.magic_page_ea;
448 465
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 467 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb) 468 if (slb)
452 gvsid = slb->vsid; 469 gvsid = slb->vsid;
453 } 470 }
454 471
455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 472 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
456 case 0: 473 case 0:
457 *vsid = VSID_REAL | esid; 474 *vsid = VSID_REAL | esid;
458 break; 475 break;
@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
464 break; 481 break;
465 case MSR_DR|MSR_IR: 482 case MSR_DR|MSR_IR:
466 if (!slb) 483 if (!slb)
467 return -ENOENT; 484 goto no_slb;
468 485
469 *vsid = gvsid; 486 *vsid = gvsid;
470 break; 487 break;
@@ -473,10 +490,21 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
473 break; 490 break;
474 } 491 }
475 492
476 if (vcpu->arch.msr & MSR_PR) 493 if (vcpu->arch.shared->msr & MSR_PR)
477 *vsid |= VSID_PR; 494 *vsid |= VSID_PR;
478 495
479 return 0; 496 return 0;
497
498no_slb:
499 /* Catch magic page case */
500 if (unlikely(mp_ea) &&
501 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
502 !(vcpu->arch.shared->msr & MSR_PR)) {
503 *vsid = VSID_REAL | esid;
504 return 0;
505 }
506
507 return -EINVAL;
480} 508}
481 509
482static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) 510static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 384179a5002b..fa2f08434ba5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <linux/hash.h>
24 23
25#include <asm/kvm_ppc.h> 24#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s.h> 25#include <asm/kvm_book3s.h>
@@ -28,24 +27,9 @@
28#include <asm/machdep.h> 27#include <asm/machdep.h>
29#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
30#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
30#include "trace.h"
31 31
32#define PTE_SIZE 12 32#define PTE_SIZE 12
33#define VSID_ALL 0
34
35/* #define DEBUG_MMU */
36/* #define DEBUG_SLB */
37
38#ifdef DEBUG_MMU
39#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
40#else
41#define dprintk_mmu(a, ...) do { } while(0)
42#endif
43
44#ifdef DEBUG_SLB
45#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
46#else
47#define dprintk_slb(a, ...) do { } while(0)
48#endif
49 33
50void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
51{ 35{
@@ -58,34 +42,39 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
58 * a hash, so we don't waste cycles on looping */ 42 * a hash, so we don't waste cycles on looping */
59static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) 43static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
60{ 44{
61 return hash_64(gvsid, SID_MAP_BITS); 45 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
46 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
47 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
48 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
49 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
50 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
51 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
52 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
62} 53}
63 54
55
64static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) 56static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
65{ 57{
66 struct kvmppc_sid_map *map; 58 struct kvmppc_sid_map *map;
67 u16 sid_map_mask; 59 u16 sid_map_mask;
68 60
69 if (vcpu->arch.msr & MSR_PR) 61 if (vcpu->arch.shared->msr & MSR_PR)
70 gvsid |= VSID_PR; 62 gvsid |= VSID_PR;
71 63
72 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
73 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 65 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
74 if (map->guest_vsid == gvsid) { 66 if (map->valid && (map->guest_vsid == gvsid)) {
75 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", 67 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
76 gvsid, map->host_vsid);
77 return map; 68 return map;
78 } 69 }
79 70
80 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; 71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
81 if (map->guest_vsid == gvsid) { 72 if (map->valid && (map->guest_vsid == gvsid)) {
82 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", 73 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
83 gvsid, map->host_vsid);
84 return map; 74 return map;
85 } 75 }
86 76
87 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", 77 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
88 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
89 return NULL; 78 return NULL;
90} 79}
91 80
@@ -101,18 +90,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
101 struct kvmppc_sid_map *map; 90 struct kvmppc_sid_map *map;
102 91
103 /* Get host physical address for gpa */ 92 /* Get host physical address for gpa */
104 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 93 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
105 if (kvm_is_error_hva(hpaddr)) { 94 if (is_error_pfn(hpaddr)) {
106 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 95 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
107 return -EINVAL; 96 return -EINVAL;
108 } 97 }
109 hpaddr <<= PAGE_SHIFT; 98 hpaddr <<= PAGE_SHIFT;
110#if PAGE_SHIFT == 12 99 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
111#elif PAGE_SHIFT == 16
112 hpaddr |= orig_pte->raddr & 0xf000;
113#else
114#error Unknown page size
115#endif
116 100
117 /* and write the mapping ea -> hpa into the pt */ 101 /* and write the mapping ea -> hpa into the pt */
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 102 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -161,10 +145,7 @@ map_again:
161 } else { 145 } else {
162 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); 146 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
163 147
164 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", 148 trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
165 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
166 (rflags & HPTE_R_N) ? '-' : 'x',
167 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
168 149
169 /* The ppc_md code may give us a secondary entry even though we 150 /* The ppc_md code may give us a secondary entry even though we
170 asked for a primary. Fix up. */ 151 asked for a primary. Fix up. */
@@ -191,7 +172,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
191 u16 sid_map_mask; 172 u16 sid_map_mask;
192 static int backwards_map = 0; 173 static int backwards_map = 0;
193 174
194 if (vcpu->arch.msr & MSR_PR) 175 if (vcpu->arch.shared->msr & MSR_PR)
195 gvsid |= VSID_PR; 176 gvsid |= VSID_PR;
196 177
197 /* We might get collisions that trap in preceding order, so let's 178 /* We might get collisions that trap in preceding order, so let's
@@ -219,8 +200,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
219 map->guest_vsid = gvsid; 200 map->guest_vsid = gvsid;
220 map->valid = true; 201 map->valid = true;
221 202
222 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", 203 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
223 sid_map_mask, gvsid, map->host_vsid);
224 204
225 return map; 205 return map;
226} 206}
@@ -292,7 +272,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
292 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; 272 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
293 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; 273 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
294 274
295 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); 275 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
296 276
297 return 0; 277 return 0;
298} 278}
@@ -306,7 +286,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
306void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 286void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
307{ 287{
308 kvmppc_mmu_hpte_destroy(vcpu); 288 kvmppc_mmu_hpte_destroy(vcpu);
309 __destroy_context(to_book3s(vcpu)->context_id); 289 __destroy_context(to_book3s(vcpu)->context_id[0]);
310} 290}
311 291
312int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 292int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
@@ -317,10 +297,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
317 err = __init_new_context(); 297 err = __init_new_context();
318 if (err < 0) 298 if (err < 0)
319 return -1; 299 return -1;
320 vcpu3s->context_id = err; 300 vcpu3s->context_id[0] = err;
321 301
322 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; 302 vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
323 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; 303 vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
324 vcpu3s->vsid_next = vcpu3s->vsid_first; 304 vcpu3s->vsid_next = vcpu3s->vsid_first;
325 305
326 kvmppc_mmu_hpte_init(vcpu); 306 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index c85f906038ce..466846557089 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -73,8 +73,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 switch (get_xop(inst)) { 73 switch (get_xop(inst)) {
74 case OP_19_XOP_RFID: 74 case OP_19_XOP_RFID:
75 case OP_19_XOP_RFI: 75 case OP_19_XOP_RFI:
76 kvmppc_set_pc(vcpu, vcpu->arch.srr0); 76 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
77 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 77 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
78 *advance = 0; 78 *advance = 0;
79 break; 79 break;
80 80
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 case 31: 86 case 31:
87 switch (get_xop(inst)) { 87 switch (get_xop(inst)) {
88 case OP_31_XOP_MFMSR: 88 case OP_31_XOP_MFMSR:
89 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); 89 kvmppc_set_gpr(vcpu, get_rt(inst),
90 vcpu->arch.shared->msr);
90 break; 91 break;
91 case OP_31_XOP_MTMSRD: 92 case OP_31_XOP_MTMSRD:
92 { 93 {
93 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 94 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
94 if (inst & 0x10000) { 95 if (inst & 0x10000) {
95 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 96 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
96 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 97 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
97 } else 98 } else
98 kvmppc_set_msr(vcpu, rs); 99 kvmppc_set_msr(vcpu, rs);
99 break; 100 break;
@@ -204,14 +205,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
204 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 205 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
205 206
206 addr = (ra + rb) & ~31ULL; 207 addr = (ra + rb) & ~31ULL;
207 if (!(vcpu->arch.msr & MSR_SF)) 208 if (!(vcpu->arch.shared->msr & MSR_SF))
208 addr &= 0xffffffff; 209 addr &= 0xffffffff;
209 vaddr = addr; 210 vaddr = addr;
210 211
211 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 212 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
212 if ((r == -ENOENT) || (r == -EPERM)) { 213 if ((r == -ENOENT) || (r == -EPERM)) {
213 *advance = 0; 214 *advance = 0;
214 vcpu->arch.dear = vaddr; 215 vcpu->arch.shared->dar = vaddr;
215 to_svcpu(vcpu)->fault_dar = vaddr; 216 to_svcpu(vcpu)->fault_dar = vaddr;
216 217
217 dsisr = DSISR_ISSTORE; 218 dsisr = DSISR_ISSTORE;
@@ -220,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
220 else if (r == -EPERM) 221 else if (r == -EPERM)
221 dsisr |= DSISR_PROTFAULT; 222 dsisr |= DSISR_PROTFAULT;
222 223
223 to_book3s(vcpu)->dsisr = dsisr; 224 vcpu->arch.shared->dsisr = dsisr;
224 to_svcpu(vcpu)->fault_dsisr = dsisr; 225 to_svcpu(vcpu)->fault_dsisr = dsisr;
225 226
226 kvmppc_book3s_queue_irqprio(vcpu, 227 kvmppc_book3s_queue_irqprio(vcpu,
@@ -263,7 +264,7 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
263 } 264 }
264} 265}
265 266
266static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) 267static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
267{ 268{
268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 269 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
269 struct kvmppc_bat *bat; 270 struct kvmppc_bat *bat;
@@ -285,35 +286,7 @@ static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
285 BUG(); 286 BUG();
286 } 287 }
287 288
288 if (sprn % 2) 289 return bat;
289 return bat->raw >> 32;
290 else
291 return bat->raw;
292}
293
294static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
295{
296 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
297 struct kvmppc_bat *bat;
298
299 switch (sprn) {
300 case SPRN_IBAT0U ... SPRN_IBAT3L:
301 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
302 break;
303 case SPRN_IBAT4U ... SPRN_IBAT7L:
304 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
305 break;
306 case SPRN_DBAT0U ... SPRN_DBAT3L:
307 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
308 break;
309 case SPRN_DBAT4U ... SPRN_DBAT7L:
310 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
311 break;
312 default:
313 BUG();
314 }
315
316 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
317} 290}
318 291
319int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 292int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
@@ -326,10 +299,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
326 to_book3s(vcpu)->sdr1 = spr_val; 299 to_book3s(vcpu)->sdr1 = spr_val;
327 break; 300 break;
328 case SPRN_DSISR: 301 case SPRN_DSISR:
329 to_book3s(vcpu)->dsisr = spr_val; 302 vcpu->arch.shared->dsisr = spr_val;
330 break; 303 break;
331 case SPRN_DAR: 304 case SPRN_DAR:
332 vcpu->arch.dear = spr_val; 305 vcpu->arch.shared->dar = spr_val;
333 break; 306 break;
334 case SPRN_HIOR: 307 case SPRN_HIOR:
335 to_book3s(vcpu)->hior = spr_val; 308 to_book3s(vcpu)->hior = spr_val;
@@ -338,12 +311,16 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
338 case SPRN_IBAT4U ... SPRN_IBAT7L: 311 case SPRN_IBAT4U ... SPRN_IBAT7L:
339 case SPRN_DBAT0U ... SPRN_DBAT3L: 312 case SPRN_DBAT0U ... SPRN_DBAT3L:
340 case SPRN_DBAT4U ... SPRN_DBAT7L: 313 case SPRN_DBAT4U ... SPRN_DBAT7L:
341 kvmppc_write_bat(vcpu, sprn, (u32)spr_val); 314 {
315 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
316
317 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
342 /* BAT writes happen so rarely that we're ok to flush 318 /* BAT writes happen so rarely that we're ok to flush
343 * everything here */ 319 * everything here */
344 kvmppc_mmu_pte_flush(vcpu, 0, 0); 320 kvmppc_mmu_pte_flush(vcpu, 0, 0);
345 kvmppc_mmu_flush_segments(vcpu); 321 kvmppc_mmu_flush_segments(vcpu);
346 break; 322 break;
323 }
347 case SPRN_HID0: 324 case SPRN_HID0:
348 to_book3s(vcpu)->hid[0] = spr_val; 325 to_book3s(vcpu)->hid[0] = spr_val;
349 break; 326 break;
@@ -433,16 +410,24 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
433 case SPRN_IBAT4U ... SPRN_IBAT7L: 410 case SPRN_IBAT4U ... SPRN_IBAT7L:
434 case SPRN_DBAT0U ... SPRN_DBAT3L: 411 case SPRN_DBAT0U ... SPRN_DBAT3L:
435 case SPRN_DBAT4U ... SPRN_DBAT7L: 412 case SPRN_DBAT4U ... SPRN_DBAT7L:
436 kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); 413 {
414 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
415
416 if (sprn % 2)
417 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
418 else
419 kvmppc_set_gpr(vcpu, rt, bat->raw);
420
437 break; 421 break;
422 }
438 case SPRN_SDR1: 423 case SPRN_SDR1:
439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 424 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
440 break; 425 break;
441 case SPRN_DSISR: 426 case SPRN_DSISR:
442 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); 427 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
443 break; 428 break;
444 case SPRN_DAR: 429 case SPRN_DAR:
445 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); 430 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
446 break; 431 break;
447 case SPRN_HIOR: 432 case SPRN_HIOR:
448 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); 433 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 4868d4a7ebc5..79751d8dd131 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,6 +21,7 @@
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include "trace.h"
24 25
25#include <asm/kvm_ppc.h> 26#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s.h> 27#include <asm/kvm_book3s.h>
@@ -30,14 +31,6 @@
30 31
31#define PTE_SIZE 12 32#define PTE_SIZE 12
32 33
33/* #define DEBUG_MMU */
34
35#ifdef DEBUG_MMU
36#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
37#else
38#define dprintk_mmu(a, ...) do { } while(0)
39#endif
40
41static struct kmem_cache *hpte_cache; 34static struct kmem_cache *hpte_cache;
42 35
43static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) 36static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
@@ -45,6 +38,12 @@ static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
45 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); 38 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
46} 39}
47 40
41static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
42{
43 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
44 HPTEG_HASH_BITS_PTE_LONG);
45}
46
48static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) 47static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49{ 48{
50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); 49 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
@@ -60,77 +59,128 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60{ 59{
61 u64 index; 60 u64 index;
62 61
62 trace_kvm_book3s_mmu_map(pte);
63
64 spin_lock(&vcpu->arch.mmu_lock);
65
63 /* Add to ePTE list */ 66 /* Add to ePTE list */
64 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 67 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
65 hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); 68 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
69
70 /* Add to ePTE_long list */
71 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72 hlist_add_head_rcu(&pte->list_pte_long,
73 &vcpu->arch.hpte_hash_pte_long[index]);
66 74
67 /* Add to vPTE list */ 75 /* Add to vPTE list */
68 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); 76 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
69 hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); 77 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
70 78
71 /* Add to vPTE_long list */ 79 /* Add to vPTE_long list */
72 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); 80 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
73 hlist_add_head(&pte->list_vpte_long, 81 hlist_add_head_rcu(&pte->list_vpte_long,
74 &vcpu->arch.hpte_hash_vpte_long[index]); 82 &vcpu->arch.hpte_hash_vpte_long[index]);
83
84 spin_unlock(&vcpu->arch.mmu_lock);
85}
86
87static void free_pte_rcu(struct rcu_head *head)
88{
89 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
90 kmem_cache_free(hpte_cache, pte);
75} 91}
76 92
77static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 93static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
78{ 94{
79 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", 95 trace_kvm_book3s_mmu_invalidate(pte);
80 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
81 96
82 /* Different for 32 and 64 bit */ 97 /* Different for 32 and 64 bit */
83 kvmppc_mmu_invalidate_pte(vcpu, pte); 98 kvmppc_mmu_invalidate_pte(vcpu, pte);
84 99
100 spin_lock(&vcpu->arch.mmu_lock);
101
102 /* pte already invalidated in between? */
103 if (hlist_unhashed(&pte->list_pte)) {
104 spin_unlock(&vcpu->arch.mmu_lock);
105 return;
106 }
107
108 hlist_del_init_rcu(&pte->list_pte);
109 hlist_del_init_rcu(&pte->list_pte_long);
110 hlist_del_init_rcu(&pte->list_vpte);
111 hlist_del_init_rcu(&pte->list_vpte_long);
112
85 if (pte->pte.may_write) 113 if (pte->pte.may_write)
86 kvm_release_pfn_dirty(pte->pfn); 114 kvm_release_pfn_dirty(pte->pfn);
87 else 115 else
88 kvm_release_pfn_clean(pte->pfn); 116 kvm_release_pfn_clean(pte->pfn);
89 117
90 hlist_del(&pte->list_pte); 118 spin_unlock(&vcpu->arch.mmu_lock);
91 hlist_del(&pte->list_vpte);
92 hlist_del(&pte->list_vpte_long);
93 119
94 vcpu->arch.hpte_cache_count--; 120 vcpu->arch.hpte_cache_count--;
95 kmem_cache_free(hpte_cache, pte); 121 call_rcu(&pte->rcu_head, free_pte_rcu);
96} 122}
97 123
98static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) 124static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
99{ 125{
100 struct hpte_cache *pte; 126 struct hpte_cache *pte;
101 struct hlist_node *node, *tmp; 127 struct hlist_node *node;
102 int i; 128 int i;
103 129
130 rcu_read_lock();
131
104 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
105 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 133 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
106 134
107 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) 135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
108 invalidate_pte(vcpu, pte); 136 invalidate_pte(vcpu, pte);
109 } 137 }
138
139 rcu_read_unlock();
110} 140}
111 141
112static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) 142static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
113{ 143{
114 struct hlist_head *list; 144 struct hlist_head *list;
115 struct hlist_node *node, *tmp; 145 struct hlist_node *node;
116 struct hpte_cache *pte; 146 struct hpte_cache *pte;
117 147
118 /* Find the list of entries in the map */ 148 /* Find the list of entries in the map */
119 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 149 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
120 150
151 rcu_read_lock();
152
121 /* Check the list for matching entries and invalidate */ 153 /* Check the list for matching entries and invalidate */
122 hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) 154 hlist_for_each_entry_rcu(pte, node, list, list_pte)
123 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) 155 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
124 invalidate_pte(vcpu, pte); 156 invalidate_pte(vcpu, pte);
157
158 rcu_read_unlock();
125} 159}
126 160
127void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) 161static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
128{ 162{
129 u64 i; 163 struct hlist_head *list;
164 struct hlist_node *node;
165 struct hpte_cache *pte;
130 166
131 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", 167 /* Find the list of entries in the map */
132 vcpu->arch.hpte_cache_count, guest_ea, ea_mask); 168 list = &vcpu->arch.hpte_hash_pte_long[
169 kvmppc_mmu_hash_pte_long(guest_ea)];
133 170
171 rcu_read_lock();
172
173 /* Check the list for matching entries and invalidate */
174 hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
175 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
176 invalidate_pte(vcpu, pte);
177
178 rcu_read_unlock();
179}
180
181void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
182{
183 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
134 guest_ea &= ea_mask; 184 guest_ea &= ea_mask;
135 185
136 switch (ea_mask) { 186 switch (ea_mask) {
@@ -138,9 +188,7 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
138 kvmppc_mmu_pte_flush_page(vcpu, guest_ea); 188 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
139 break; 189 break;
140 case 0x0ffff000: 190 case 0x0ffff000:
141 /* 32-bit flush w/o segment, go through all possible segments */ 191 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
142 for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL)
143 kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL);
144 break; 192 break;
145 case 0: 193 case 0:
146 /* Doing a complete flush -> start from scratch */ 194 /* Doing a complete flush -> start from scratch */
@@ -156,39 +204,46 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
156static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) 204static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
157{ 205{
158 struct hlist_head *list; 206 struct hlist_head *list;
159 struct hlist_node *node, *tmp; 207 struct hlist_node *node;
160 struct hpte_cache *pte; 208 struct hpte_cache *pte;
161 u64 vp_mask = 0xfffffffffULL; 209 u64 vp_mask = 0xfffffffffULL;
162 210
163 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 211 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
164 212
213 rcu_read_lock();
214
165 /* Check the list for matching entries and invalidate */ 215 /* Check the list for matching entries and invalidate */
166 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) 216 hlist_for_each_entry_rcu(pte, node, list, list_vpte)
167 if ((pte->pte.vpage & vp_mask) == guest_vp) 217 if ((pte->pte.vpage & vp_mask) == guest_vp)
168 invalidate_pte(vcpu, pte); 218 invalidate_pte(vcpu, pte);
219
220 rcu_read_unlock();
169} 221}
170 222
171/* Flush with mask 0xffffff000 */ 223/* Flush with mask 0xffffff000 */
172static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 224static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
173{ 225{
174 struct hlist_head *list; 226 struct hlist_head *list;
175 struct hlist_node *node, *tmp; 227 struct hlist_node *node;
176 struct hpte_cache *pte; 228 struct hpte_cache *pte;
177 u64 vp_mask = 0xffffff000ULL; 229 u64 vp_mask = 0xffffff000ULL;
178 230
179 list = &vcpu->arch.hpte_hash_vpte_long[ 231 list = &vcpu->arch.hpte_hash_vpte_long[
180 kvmppc_mmu_hash_vpte_long(guest_vp)]; 232 kvmppc_mmu_hash_vpte_long(guest_vp)];
181 233
234 rcu_read_lock();
235
182 /* Check the list for matching entries and invalidate */ 236 /* Check the list for matching entries and invalidate */
183 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) 237 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
184 if ((pte->pte.vpage & vp_mask) == guest_vp) 238 if ((pte->pte.vpage & vp_mask) == guest_vp)
185 invalidate_pte(vcpu, pte); 239 invalidate_pte(vcpu, pte);
240
241 rcu_read_unlock();
186} 242}
187 243
188void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) 244void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
189{ 245{
190 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", 246 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
191 vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
192 guest_vp &= vp_mask; 247 guest_vp &= vp_mask;
193 248
194 switch(vp_mask) { 249 switch(vp_mask) {
@@ -206,21 +261,24 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
206 261
207void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 262void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
208{ 263{
209 struct hlist_node *node, *tmp; 264 struct hlist_node *node;
210 struct hpte_cache *pte; 265 struct hpte_cache *pte;
211 int i; 266 int i;
212 267
213 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", 268 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
214 vcpu->arch.hpte_cache_count, pa_start, pa_end); 269
270 rcu_read_lock();
215 271
216 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
217 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 273 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
218 274
219 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) 275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
220 if ((pte->pte.raddr >= pa_start) && 276 if ((pte->pte.raddr >= pa_start) &&
221 (pte->pte.raddr < pa_end)) 277 (pte->pte.raddr < pa_end))
222 invalidate_pte(vcpu, pte); 278 invalidate_pte(vcpu, pte);
223 } 279 }
280
281 rcu_read_unlock();
224} 282}
225 283
226struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 284struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
@@ -254,11 +312,15 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
254 /* init hpte lookup hashes */ 312 /* init hpte lookup hashes */
255 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, 313 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
256 ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); 314 ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
315 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
316 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
257 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, 317 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
258 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); 318 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
259 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, 319 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
260 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); 320 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
261 321
322 spin_lock_init(&vcpu->arch.mmu_lock);
323
262 return 0; 324 return 0;
263} 325}
264 326
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 35a701f3ece4..7b0ee96c1bed 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -165,14 +165,15 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
165static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 165static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
166{ 166{
167 u64 dsisr; 167 u64 dsisr;
168 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
168 169
169 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); 170 shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
170 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 171 shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
171 vcpu->arch.dear = eaddr; 172 shared->dar = eaddr;
172 /* Page Fault */ 173 /* Page Fault */
173 dsisr = kvmppc_set_field(0, 33, 33, 1); 174 dsisr = kvmppc_set_field(0, 33, 33, 1);
174 if (is_store) 175 if (is_store)
175 to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); 176 shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
176 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 177 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
177} 178}
178 179
@@ -658,7 +659,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
658 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 659 if (!kvmppc_inst_is_paired_single(vcpu, inst))
659 return EMULATE_FAIL; 660 return EMULATE_FAIL;
660 661
661 if (!(vcpu->arch.msr & MSR_FP)) { 662 if (!(vcpu->arch.shared->msr & MSR_FP)) {
662 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 663 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
663 return EMULATE_AGAIN; 664 return EMULATE_AGAIN;
664 } 665 }
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 506d5c316c96..2b9c9088d00e 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -202,8 +202,25 @@ _GLOBAL(kvmppc_rmcall)
202 202
203#if defined(CONFIG_PPC_BOOK3S_32) 203#if defined(CONFIG_PPC_BOOK3S_32)
204#define STACK_LR INT_FRAME_SIZE+4 204#define STACK_LR INT_FRAME_SIZE+4
205
206/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
207#define MSR_EXT_START \
208 PPC_STL r20, _NIP(r1); \
209 mfmsr r20; \
210 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
211 andc r3,r20,r3; /* Disable DR,EE */ \
212 mtmsr r3; \
213 sync
214
215#define MSR_EXT_END \
216 mtmsr r20; /* Enable DR,EE */ \
217 sync; \
218 PPC_LL r20, _NIP(r1)
219
205#elif defined(CONFIG_PPC_BOOK3S_64) 220#elif defined(CONFIG_PPC_BOOK3S_64)
206#define STACK_LR _LINK 221#define STACK_LR _LINK
222#define MSR_EXT_START
223#define MSR_EXT_END
207#endif 224#endif
208 225
209/* 226/*
@@ -215,19 +232,12 @@ _GLOBAL(kvmppc_load_up_ ## what); \
215 PPC_STLU r1, -INT_FRAME_SIZE(r1); \ 232 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
216 mflr r3; \ 233 mflr r3; \
217 PPC_STL r3, STACK_LR(r1); \ 234 PPC_STL r3, STACK_LR(r1); \
218 PPC_STL r20, _NIP(r1); \ 235 MSR_EXT_START; \
219 mfmsr r20; \
220 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
221 andc r3,r20,r3; /* Disable DR,EE */ \
222 mtmsr r3; \
223 sync; \
224 \ 236 \
225 bl FUNC(load_up_ ## what); \ 237 bl FUNC(load_up_ ## what); \
226 \ 238 \
227 mtmsr r20; /* Enable DR,EE */ \ 239 MSR_EXT_END; \
228 sync; \
229 PPC_LL r3, STACK_LR(r1); \ 240 PPC_LL r3, STACK_LR(r1); \
230 PPC_LL r20, _NIP(r1); \
231 mtlr r3; \ 241 mtlr r3; \
232 addi r1, r1, INT_FRAME_SIZE; \ 242 addi r1, r1, INT_FRAME_SIZE; \
233 blr 243 blr
@@ -242,10 +252,10 @@ define_load_up(vsx)
242 252
243.global kvmppc_trampoline_lowmem 253.global kvmppc_trampoline_lowmem
244kvmppc_trampoline_lowmem: 254kvmppc_trampoline_lowmem:
245 .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START 255 PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
246 256
247.global kvmppc_trampoline_enter 257.global kvmppc_trampoline_enter
248kvmppc_trampoline_enter: 258kvmppc_trampoline_enter:
249 .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START 259 PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
250 260
251#include "book3s_segment.S" 261#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8d4e35f5372c..77575d08c818 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -62,9 +62,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62{ 62{
63 int i; 63 int i;
64 64
65 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); 65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); 66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); 67 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
68 vcpu->arch.shared->srr1);
68 69
69 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); 70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
70 71
@@ -130,13 +131,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
130void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 131void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
131 struct kvm_interrupt *irq) 132 struct kvm_interrupt *irq)
132{ 133{
133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); 134 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
135
136 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
137 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
138
139 kvmppc_booke_queue_irqprio(vcpu, prio);
134} 140}
135 141
136void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 142void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
137 struct kvm_interrupt *irq) 143 struct kvm_interrupt *irq)
138{ 144{
139 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); 145 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
146 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
140} 147}
141 148
142/* Deliver the interrupt of the corresponding priority, if possible. */ 149/* Deliver the interrupt of the corresponding priority, if possible. */
@@ -146,6 +153,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
146 int allowed = 0; 153 int allowed = 0;
147 ulong uninitialized_var(msr_mask); 154 ulong uninitialized_var(msr_mask);
148 bool update_esr = false, update_dear = false; 155 bool update_esr = false, update_dear = false;
156 ulong crit_raw = vcpu->arch.shared->critical;
157 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
158 bool crit;
159 bool keep_irq = false;
160
161 /* Truncate crit indicators in 32 bit mode */
162 if (!(vcpu->arch.shared->msr & MSR_SF)) {
163 crit_raw &= 0xffffffff;
164 crit_r1 &= 0xffffffff;
165 }
166
167 /* Critical section when crit == r1 */
168 crit = (crit_raw == crit_r1);
169 /* ... and we're in supervisor mode */
170 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
171
172 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
173 priority = BOOKE_IRQPRIO_EXTERNAL;
174 keep_irq = true;
175 }
149 176
150 switch (priority) { 177 switch (priority) {
151 case BOOKE_IRQPRIO_DTLB_MISS: 178 case BOOKE_IRQPRIO_DTLB_MISS:
@@ -169,36 +196,38 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
169 break; 196 break;
170 case BOOKE_IRQPRIO_CRITICAL: 197 case BOOKE_IRQPRIO_CRITICAL:
171 case BOOKE_IRQPRIO_WATCHDOG: 198 case BOOKE_IRQPRIO_WATCHDOG:
172 allowed = vcpu->arch.msr & MSR_CE; 199 allowed = vcpu->arch.shared->msr & MSR_CE;
173 msr_mask = MSR_ME; 200 msr_mask = MSR_ME;
174 break; 201 break;
175 case BOOKE_IRQPRIO_MACHINE_CHECK: 202 case BOOKE_IRQPRIO_MACHINE_CHECK:
176 allowed = vcpu->arch.msr & MSR_ME; 203 allowed = vcpu->arch.shared->msr & MSR_ME;
177 msr_mask = 0; 204 msr_mask = 0;
178 break; 205 break;
179 case BOOKE_IRQPRIO_EXTERNAL: 206 case BOOKE_IRQPRIO_EXTERNAL:
180 case BOOKE_IRQPRIO_DECREMENTER: 207 case BOOKE_IRQPRIO_DECREMENTER:
181 case BOOKE_IRQPRIO_FIT: 208 case BOOKE_IRQPRIO_FIT:
182 allowed = vcpu->arch.msr & MSR_EE; 209 allowed = vcpu->arch.shared->msr & MSR_EE;
210 allowed = allowed && !crit;
183 msr_mask = MSR_CE|MSR_ME|MSR_DE; 211 msr_mask = MSR_CE|MSR_ME|MSR_DE;
184 break; 212 break;
185 case BOOKE_IRQPRIO_DEBUG: 213 case BOOKE_IRQPRIO_DEBUG:
186 allowed = vcpu->arch.msr & MSR_DE; 214 allowed = vcpu->arch.shared->msr & MSR_DE;
187 msr_mask = MSR_ME; 215 msr_mask = MSR_ME;
188 break; 216 break;
189 } 217 }
190 218
191 if (allowed) { 219 if (allowed) {
192 vcpu->arch.srr0 = vcpu->arch.pc; 220 vcpu->arch.shared->srr0 = vcpu->arch.pc;
193 vcpu->arch.srr1 = vcpu->arch.msr; 221 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 222 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
195 if (update_esr == true) 223 if (update_esr == true)
196 vcpu->arch.esr = vcpu->arch.queued_esr; 224 vcpu->arch.esr = vcpu->arch.queued_esr;
197 if (update_dear == true) 225 if (update_dear == true)
198 vcpu->arch.dear = vcpu->arch.queued_dear; 226 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
199 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 227 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
200 228
201 clear_bit(priority, &vcpu->arch.pending_exceptions); 229 if (!keep_irq)
230 clear_bit(priority, &vcpu->arch.pending_exceptions);
202 } 231 }
203 232
204 return allowed; 233 return allowed;
@@ -208,6 +237,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
208void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 237void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
209{ 238{
210 unsigned long *pending = &vcpu->arch.pending_exceptions; 239 unsigned long *pending = &vcpu->arch.pending_exceptions;
240 unsigned long old_pending = vcpu->arch.pending_exceptions;
211 unsigned int priority; 241 unsigned int priority;
212 242
213 priority = __ffs(*pending); 243 priority = __ffs(*pending);
@@ -219,6 +249,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
219 BITS_PER_BYTE * sizeof(*pending), 249 BITS_PER_BYTE * sizeof(*pending),
220 priority + 1); 250 priority + 1);
221 } 251 }
252
253 /* Tell the guest about our interrupt status */
254 if (*pending)
255 vcpu->arch.shared->int_pending = 1;
256 else if (old_pending)
257 vcpu->arch.shared->int_pending = 0;
222} 258}
223 259
224/** 260/**
@@ -265,7 +301,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 break; 301 break;
266 302
267 case BOOKE_INTERRUPT_PROGRAM: 303 case BOOKE_INTERRUPT_PROGRAM:
268 if (vcpu->arch.msr & MSR_PR) { 304 if (vcpu->arch.shared->msr & MSR_PR) {
269 /* Program traps generated by user-level software must be handled 305 /* Program traps generated by user-level software must be handled
270 * by the guest kernel. */ 306 * by the guest kernel. */
271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 307 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
@@ -337,7 +373,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
337 break; 373 break;
338 374
339 case BOOKE_INTERRUPT_SYSCALL: 375 case BOOKE_INTERRUPT_SYSCALL:
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); 376 if (!(vcpu->arch.shared->msr & MSR_PR) &&
377 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
378 /* KVM PV hypercalls */
379 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
380 r = RESUME_GUEST;
381 } else {
382 /* Guest syscalls */
383 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
384 }
341 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 385 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
342 r = RESUME_GUEST; 386 r = RESUME_GUEST;
343 break; 387 break;
@@ -466,15 +510,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
466/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ 510/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
467int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 511int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
468{ 512{
513 int i;
514
469 vcpu->arch.pc = 0; 515 vcpu->arch.pc = 0;
470 vcpu->arch.msr = 0; 516 vcpu->arch.shared->msr = 0;
471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 517 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
472 518
473 vcpu->arch.shadow_pid = 1; 519 vcpu->arch.shadow_pid = 1;
474 520
475 /* Eye-catching number so we know if the guest takes an interrupt 521 /* Eye-catching numbers so we know if the guest takes an interrupt
476 * before it's programmed its own IVPR. */ 522 * before it's programmed its own IVPR/IVORs. */
477 vcpu->arch.ivpr = 0x55550000; 523 vcpu->arch.ivpr = 0x55550000;
524 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
525 vcpu->arch.ivor[i] = 0x7700 | i * 4;
478 526
479 kvmppc_init_timing_stats(vcpu); 527 kvmppc_init_timing_stats(vcpu);
480 528
@@ -490,14 +538,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
490 regs->ctr = vcpu->arch.ctr; 538 regs->ctr = vcpu->arch.ctr;
491 regs->lr = vcpu->arch.lr; 539 regs->lr = vcpu->arch.lr;
492 regs->xer = kvmppc_get_xer(vcpu); 540 regs->xer = kvmppc_get_xer(vcpu);
493 regs->msr = vcpu->arch.msr; 541 regs->msr = vcpu->arch.shared->msr;
494 regs->srr0 = vcpu->arch.srr0; 542 regs->srr0 = vcpu->arch.shared->srr0;
495 regs->srr1 = vcpu->arch.srr1; 543 regs->srr1 = vcpu->arch.shared->srr1;
496 regs->pid = vcpu->arch.pid; 544 regs->pid = vcpu->arch.pid;
497 regs->sprg0 = vcpu->arch.sprg0; 545 regs->sprg0 = vcpu->arch.shared->sprg0;
498 regs->sprg1 = vcpu->arch.sprg1; 546 regs->sprg1 = vcpu->arch.shared->sprg1;
499 regs->sprg2 = vcpu->arch.sprg2; 547 regs->sprg2 = vcpu->arch.shared->sprg2;
500 regs->sprg3 = vcpu->arch.sprg3; 548 regs->sprg3 = vcpu->arch.shared->sprg3;
501 regs->sprg5 = vcpu->arch.sprg4; 549 regs->sprg5 = vcpu->arch.sprg4;
502 regs->sprg6 = vcpu->arch.sprg5; 550 regs->sprg6 = vcpu->arch.sprg5;
503 regs->sprg7 = vcpu->arch.sprg6; 551 regs->sprg7 = vcpu->arch.sprg6;
@@ -518,12 +566,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
518 vcpu->arch.lr = regs->lr; 566 vcpu->arch.lr = regs->lr;
519 kvmppc_set_xer(vcpu, regs->xer); 567 kvmppc_set_xer(vcpu, regs->xer);
520 kvmppc_set_msr(vcpu, regs->msr); 568 kvmppc_set_msr(vcpu, regs->msr);
521 vcpu->arch.srr0 = regs->srr0; 569 vcpu->arch.shared->srr0 = regs->srr0;
522 vcpu->arch.srr1 = regs->srr1; 570 vcpu->arch.shared->srr1 = regs->srr1;
523 vcpu->arch.sprg0 = regs->sprg0; 571 vcpu->arch.shared->sprg0 = regs->sprg0;
524 vcpu->arch.sprg1 = regs->sprg1; 572 vcpu->arch.shared->sprg1 = regs->sprg1;
525 vcpu->arch.sprg2 = regs->sprg2; 573 vcpu->arch.shared->sprg2 = regs->sprg2;
526 vcpu->arch.sprg3 = regs->sprg3; 574 vcpu->arch.shared->sprg3 = regs->sprg3;
527 vcpu->arch.sprg5 = regs->sprg4; 575 vcpu->arch.sprg5 = regs->sprg4;
528 vcpu->arch.sprg6 = regs->sprg5; 576 vcpu->arch.sprg6 = regs->sprg5;
529 vcpu->arch.sprg7 = regs->sprg6; 577 vcpu->arch.sprg7 = regs->sprg6;
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index d59bcca1f9d8..492bb7030358 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -46,7 +46,9 @@
46#define BOOKE_IRQPRIO_FIT 17 46#define BOOKE_IRQPRIO_FIT 17
47#define BOOKE_IRQPRIO_DECREMENTER 18 47#define BOOKE_IRQPRIO_DECREMENTER 18
48#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 48#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
49#define BOOKE_IRQPRIO_MAX 19 49/* Internal pseudo-irqprio for level triggered externals */
50#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
51#define BOOKE_IRQPRIO_MAX 20
50 52
51extern unsigned long kvmppc_booke_handlers; 53extern unsigned long kvmppc_booke_handlers;
52 54
@@ -54,12 +56,12 @@ extern unsigned long kvmppc_booke_handlers;
54 * changing. */ 56 * changing. */
55static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 57static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
56{ 58{
57 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) 59 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
58 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 60 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
59 61
60 vcpu->arch.msr = new_msr; 62 vcpu->arch.shared->msr = new_msr;
61 63
62 if (vcpu->arch.msr & MSR_WE) { 64 if (vcpu->arch.shared->msr & MSR_WE) {
63 kvm_vcpu_block(vcpu); 65 kvm_vcpu_block(vcpu);
64 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
65 }; 67 };
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index cbc790ee1928..1260f5f24c0c 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -31,8 +31,8 @@
31 31
32static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) 32static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
33{ 33{
34 vcpu->arch.pc = vcpu->arch.srr0; 34 vcpu->arch.pc = vcpu->arch.shared->srr0;
35 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 35 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
36} 36}
37 37
38int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 38int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -62,7 +62,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 62
63 case OP_31_XOP_MFMSR: 63 case OP_31_XOP_MFMSR:
64 rt = get_rt(inst); 64 rt = get_rt(inst);
65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); 65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
67 break; 67 break;
68 68
@@ -74,13 +74,13 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
74 74
75 case OP_31_XOP_WRTEE: 75 case OP_31_XOP_WRTEE:
76 rs = get_rs(inst); 76 rs = get_rs(inst);
77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 77 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); 78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
80 break; 80 break;
81 81
82 case OP_31_XOP_WRTEEI: 82 case OP_31_XOP_WRTEEI:
83 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 83 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
84 | (inst & MSR_EE); 84 | (inst & MSR_EE);
85 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 85 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
86 break; 86 break;
@@ -105,7 +105,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
105 105
106 switch (sprn) { 106 switch (sprn) {
107 case SPRN_DEAR: 107 case SPRN_DEAR:
108 vcpu->arch.dear = spr_val; break; 108 vcpu->arch.shared->dar = spr_val; break;
109 case SPRN_ESR: 109 case SPRN_ESR:
110 vcpu->arch.esr = spr_val; break; 110 vcpu->arch.esr = spr_val; break;
111 case SPRN_DBCR0: 111 case SPRN_DBCR0:
@@ -200,7 +200,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
200 case SPRN_IVPR: 200 case SPRN_IVPR:
201 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; 201 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
202 case SPRN_DEAR: 202 case SPRN_DEAR:
203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; 203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
204 case SPRN_ESR: 204 case SPRN_ESR:
205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; 205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
206 case SPRN_DBCR0: 206 case SPRN_DBCR0:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 380a78cf484d..049846911ce4 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -415,7 +415,8 @@ lightweight_exit:
415 lwz r8, VCPU_GPR(r8)(r4) 415 lwz r8, VCPU_GPR(r8)(r4)
416 lwz r3, VCPU_PC(r4) 416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3 417 mtsrr0 r3
418 lwz r3, VCPU_MSR(r4) 418 lwz r3, VCPU_SHARED(r4)
419 lwz r3, VCPU_SHARED_MSR(r3)
419 oris r3, r3, KVMPPC_MSR_MASK@h 420 oris r3, r3, KVMPPC_MSR_MASK@h
420 ori r3, r3, KVMPPC_MSR_MASK@l 421 ori r3, r3, KVMPPC_MSR_MASK@l
421 mtsrr1 r3 422 mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index e8a00b0c4449..71750f2dd5d3 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -117,8 +117,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
117 if (err) 117 if (err)
118 goto uninit_vcpu; 118 goto uninit_vcpu;
119 119
120 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
121 if (!vcpu->arch.shared)
122 goto uninit_tlb;
123
120 return vcpu; 124 return vcpu;
121 125
126uninit_tlb:
127 kvmppc_e500_tlb_uninit(vcpu_e500);
122uninit_vcpu: 128uninit_vcpu:
123 kvm_vcpu_uninit(vcpu); 129 kvm_vcpu_uninit(vcpu);
124free_vcpu: 130free_vcpu:
@@ -131,6 +137,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
131{ 137{
132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
133 139
140 free_page((unsigned long)vcpu->arch.shared);
134 kvmppc_e500_tlb_uninit(vcpu_e500); 141 kvmppc_e500_tlb_uninit(vcpu_e500);
135 kvm_vcpu_uninit(vcpu); 142 kvm_vcpu_uninit(vcpu);
136 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 21011e12caeb..d6d6d47a75a9 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -226,8 +226,7 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
226 226
227 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); 227 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
228 stlbe->mas1 = 0; 228 stlbe->mas1 = 0;
229 trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 229 trace_kvm_stlb_inval(index_of(tlbsel, esel));
230 stlbe->mas3, stlbe->mas7);
231} 230}
232 231
233static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, 232static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -298,7 +297,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
298 /* Get reference to new page. */ 297 /* Get reference to new page. */
299 new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); 298 new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
300 if (is_error_page(new_page)) { 299 if (is_error_page(new_page)) {
301 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); 300 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
301 (long)gfn);
302 kvm_release_page_clean(new_page); 302 kvm_release_page_clean(new_page);
303 return; 303 return;
304 } 304 }
@@ -314,10 +314,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
314 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 314 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
315 stlbe->mas2 = (gvaddr & MAS2_EPN) 315 stlbe->mas2 = (gvaddr & MAS2_EPN)
316 | e500_shadow_mas2_attrib(gtlbe->mas2, 316 | e500_shadow_mas2_attrib(gtlbe->mas2,
317 vcpu_e500->vcpu.arch.msr & MSR_PR); 317 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
318 stlbe->mas3 = (hpaddr & MAS3_RPN) 318 stlbe->mas3 = (hpaddr & MAS3_RPN)
319 | e500_shadow_mas3_attrib(gtlbe->mas3, 319 | e500_shadow_mas3_attrib(gtlbe->mas3,
320 vcpu_e500->vcpu.arch.msr & MSR_PR); 320 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
321 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; 321 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
322 322
323 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 323 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
@@ -576,28 +576,28 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
576 576
577int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 577int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
578{ 578{
579 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 579 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
580 580
581 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 581 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
582} 582}
583 583
584int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 584int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
585{ 585{
586 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 586 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
587 587
588 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 588 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
589} 589}
590 590
591void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) 591void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
592{ 592{
593 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 593 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
594 594
595 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); 595 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
596} 596}
597 597
598void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) 598void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
599{ 599{
600 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 600 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
601 601
602 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); 602 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
603} 603}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index d28e3010a5e2..458946b4775d 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -171,7 +171,7 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
171 171
172 /* Does it match current guest AS? */ 172 /* Does it match current guest AS? */
173 /* XXX what about IS != DS? */ 173 /* XXX what about IS != DS? */
174 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 174 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
175 return 0; 175 return 0;
176 176
177 gpa = get_tlb_raddr(tlbe); 177 gpa = get_tlb_raddr(tlbe);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index b83ba581fd8e..c64fd2909bb2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -242,9 +242,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
242 242
243 switch (sprn) { 243 switch (sprn) {
244 case SPRN_SRR0: 244 case SPRN_SRR0:
245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; 245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
246 break;
246 case SPRN_SRR1: 247 case SPRN_SRR1:
247 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
249 break;
248 case SPRN_PVR: 250 case SPRN_PVR:
249 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 251 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
250 case SPRN_PIR: 252 case SPRN_PIR:
@@ -261,13 +263,17 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
261 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 263 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
262 264
263 case SPRN_SPRG0: 265 case SPRN_SPRG0:
264 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; 266 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
267 break;
265 case SPRN_SPRG1: 268 case SPRN_SPRG1:
266 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; 269 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
270 break;
267 case SPRN_SPRG2: 271 case SPRN_SPRG2:
268 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; 272 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
273 break;
269 case SPRN_SPRG3: 274 case SPRN_SPRG3:
270 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; 275 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
276 break;
271 /* Note: SPRG4-7 are user-readable, so we don't get 277 /* Note: SPRG4-7 are user-readable, so we don't get
272 * a trap. */ 278 * a trap. */
273 279
@@ -320,9 +326,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
320 rs = get_rs(inst); 326 rs = get_rs(inst);
321 switch (sprn) { 327 switch (sprn) {
322 case SPRN_SRR0: 328 case SPRN_SRR0:
323 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; 329 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
330 break;
324 case SPRN_SRR1: 331 case SPRN_SRR1:
325 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; 332 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
333 break;
326 334
327 /* XXX We need to context-switch the timebase for 335 /* XXX We need to context-switch the timebase for
328 * watchdog and FIT. */ 336 * watchdog and FIT. */
@@ -337,13 +345,17 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
337 break; 345 break;
338 346
339 case SPRN_SPRG0: 347 case SPRN_SPRG0:
340 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; 348 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
349 break;
341 case SPRN_SPRG1: 350 case SPRN_SPRG1:
342 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; 351 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
352 break;
343 case SPRN_SPRG2: 353 case SPRN_SPRG2:
344 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; 354 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
355 break;
345 case SPRN_SPRG3: 356 case SPRN_SPRG3:
346 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; 357 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
358 break;
347 359
348 default: 360 default:
349 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 361 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 72a4ad86ee91..2f87a1627f6c 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -38,9 +38,56 @@
38 38
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{ 40{
41 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 41 return !(v->arch.shared->msr & MSR_WE) ||
42 !!(v->arch.pending_exceptions);
42} 43}
43 44
45int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
46{
47 int nr = kvmppc_get_gpr(vcpu, 11);
48 int r;
49 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
50 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
51 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
52 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
53 unsigned long r2 = 0;
54
55 if (!(vcpu->arch.shared->msr & MSR_SF)) {
56 /* 32 bit mode */
57 param1 &= 0xffffffff;
58 param2 &= 0xffffffff;
59 param3 &= 0xffffffff;
60 param4 &= 0xffffffff;
61 }
62
63 switch (nr) {
64 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
65 {
66 vcpu->arch.magic_page_pa = param1;
67 vcpu->arch.magic_page_ea = param2;
68
69 r2 = KVM_MAGIC_FEAT_SR;
70
71 r = HC_EV_SUCCESS;
72 break;
73 }
74 case HC_VENDOR_KVM | KVM_HC_FEATURES:
75 r = HC_EV_SUCCESS;
76#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
77 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
78#endif
79
80 /* Second return value is in r4 */
81 break;
82 default:
83 r = HC_EV_UNIMPLEMENTED;
84 break;
85 }
86
87 kvmppc_set_gpr(vcpu, 4, r2);
88
89 return r;
90}
44 91
45int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 92int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
46{ 93{
@@ -145,8 +192,10 @@ int kvm_dev_ioctl_check_extension(long ext)
145 case KVM_CAP_PPC_SEGSTATE: 192 case KVM_CAP_PPC_SEGSTATE:
146 case KVM_CAP_PPC_PAIRED_SINGLES: 193 case KVM_CAP_PPC_PAIRED_SINGLES:
147 case KVM_CAP_PPC_UNSET_IRQ: 194 case KVM_CAP_PPC_UNSET_IRQ:
195 case KVM_CAP_PPC_IRQ_LEVEL:
148 case KVM_CAP_ENABLE_CAP: 196 case KVM_CAP_ENABLE_CAP:
149 case KVM_CAP_PPC_OSI: 197 case KVM_CAP_PPC_OSI:
198 case KVM_CAP_PPC_GET_PVINFO:
150 r = 1; 199 r = 1;
151 break; 200 break;
152 case KVM_CAP_COALESCED_MMIO: 201 case KVM_CAP_COALESCED_MMIO:
@@ -534,16 +583,53 @@ out:
534 return r; 583 return r;
535} 584}
536 585
586static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
587{
588 u32 inst_lis = 0x3c000000;
589 u32 inst_ori = 0x60000000;
590 u32 inst_nop = 0x60000000;
591 u32 inst_sc = 0x44000002;
592 u32 inst_imm_mask = 0xffff;
593
594 /*
595 * The hypercall to get into KVM from within guest context is as
596 * follows:
597 *
598 * lis r0, r0, KVM_SC_MAGIC_R0@h
599 * ori r0, KVM_SC_MAGIC_R0@l
600 * sc
601 * nop
602 */
603 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
604 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
605 pvinfo->hcall[2] = inst_sc;
606 pvinfo->hcall[3] = inst_nop;
607
608 return 0;
609}
610
537long kvm_arch_vm_ioctl(struct file *filp, 611long kvm_arch_vm_ioctl(struct file *filp,
538 unsigned int ioctl, unsigned long arg) 612 unsigned int ioctl, unsigned long arg)
539{ 613{
614 void __user *argp = (void __user *)arg;
540 long r; 615 long r;
541 616
542 switch (ioctl) { 617 switch (ioctl) {
618 case KVM_PPC_GET_PVINFO: {
619 struct kvm_ppc_pvinfo pvinfo;
620 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
621 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
622 r = -EFAULT;
623 goto out;
624 }
625
626 break;
627 }
543 default: 628 default:
544 r = -ENOTTY; 629 r = -ENOTTY;
545 } 630 }
546 631
632out:
547 return r; 633 return r;
548} 634}
549 635
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index a8e840018052..3aca1b042b8c 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -98,6 +98,245 @@ TRACE_EVENT(kvm_gtlb_write,
98 __entry->word1, __entry->word2) 98 __entry->word1, __entry->word2)
99); 99);
100 100
101
102/*************************************************************************
103 * Book3S trace points *
104 *************************************************************************/
105
106#ifdef CONFIG_PPC_BOOK3S
107
108TRACE_EVENT(kvm_book3s_exit,
109 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
110 TP_ARGS(exit_nr, vcpu),
111
112 TP_STRUCT__entry(
113 __field( unsigned int, exit_nr )
114 __field( unsigned long, pc )
115 __field( unsigned long, msr )
116 __field( unsigned long, dar )
117 __field( unsigned long, srr1 )
118 ),
119
120 TP_fast_assign(
121 __entry->exit_nr = exit_nr;
122 __entry->pc = kvmppc_get_pc(vcpu);
123 __entry->dar = kvmppc_get_fault_dar(vcpu);
124 __entry->msr = vcpu->arch.shared->msr;
125 __entry->srr1 = to_svcpu(vcpu)->shadow_srr1;
126 ),
127
128 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
129 __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar,
130 __entry->srr1)
131);
132
133TRACE_EVENT(kvm_book3s_reenter,
134 TP_PROTO(int r, struct kvm_vcpu *vcpu),
135 TP_ARGS(r, vcpu),
136
137 TP_STRUCT__entry(
138 __field( unsigned int, r )
139 __field( unsigned long, pc )
140 ),
141
142 TP_fast_assign(
143 __entry->r = r;
144 __entry->pc = kvmppc_get_pc(vcpu);
145 ),
146
147 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
148);
149
150#ifdef CONFIG_PPC_BOOK3S_64
151
152TRACE_EVENT(kvm_book3s_64_mmu_map,
153 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
154 struct kvmppc_pte *orig_pte),
155 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
156
157 TP_STRUCT__entry(
158 __field( unsigned char, flag_w )
159 __field( unsigned char, flag_x )
160 __field( unsigned long, eaddr )
161 __field( unsigned long, hpteg )
162 __field( unsigned long, va )
163 __field( unsigned long long, vpage )
164 __field( unsigned long, hpaddr )
165 ),
166
167 TP_fast_assign(
168 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
169 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
170 __entry->eaddr = orig_pte->eaddr;
171 __entry->hpteg = hpteg;
172 __entry->va = va;
173 __entry->vpage = orig_pte->vpage;
174 __entry->hpaddr = hpaddr;
175 ),
176
177 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
178 __entry->flag_w, __entry->flag_x, __entry->eaddr,
179 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
180);
181
182#endif /* CONFIG_PPC_BOOK3S_64 */
183
184TRACE_EVENT(kvm_book3s_mmu_map,
185 TP_PROTO(struct hpte_cache *pte),
186 TP_ARGS(pte),
187
188 TP_STRUCT__entry(
189 __field( u64, host_va )
190 __field( u64, pfn )
191 __field( ulong, eaddr )
192 __field( u64, vpage )
193 __field( ulong, raddr )
194 __field( int, flags )
195 ),
196
197 TP_fast_assign(
198 __entry->host_va = pte->host_va;
199 __entry->pfn = pte->pfn;
200 __entry->eaddr = pte->pte.eaddr;
201 __entry->vpage = pte->pte.vpage;
202 __entry->raddr = pte->pte.raddr;
203 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
204 (pte->pte.may_write ? 0x2 : 0) |
205 (pte->pte.may_execute ? 0x1 : 0);
206 ),
207
208 TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
209 __entry->host_va, __entry->pfn, __entry->eaddr,
210 __entry->vpage, __entry->raddr, __entry->flags)
211);
212
213TRACE_EVENT(kvm_book3s_mmu_invalidate,
214 TP_PROTO(struct hpte_cache *pte),
215 TP_ARGS(pte),
216
217 TP_STRUCT__entry(
218 __field( u64, host_va )
219 __field( u64, pfn )
220 __field( ulong, eaddr )
221 __field( u64, vpage )
222 __field( ulong, raddr )
223 __field( int, flags )
224 ),
225
226 TP_fast_assign(
227 __entry->host_va = pte->host_va;
228 __entry->pfn = pte->pfn;
229 __entry->eaddr = pte->pte.eaddr;
230 __entry->vpage = pte->pte.vpage;
231 __entry->raddr = pte->pte.raddr;
232 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
233 (pte->pte.may_write ? 0x2 : 0) |
234 (pte->pte.may_execute ? 0x1 : 0);
235 ),
236
237 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
238 __entry->host_va, __entry->pfn, __entry->eaddr,
239 __entry->vpage, __entry->raddr, __entry->flags)
240);
241
242TRACE_EVENT(kvm_book3s_mmu_flush,
243 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
244 unsigned long long p2),
245 TP_ARGS(type, vcpu, p1, p2),
246
247 TP_STRUCT__entry(
248 __field( int, count )
249 __field( unsigned long long, p1 )
250 __field( unsigned long long, p2 )
251 __field( const char *, type )
252 ),
253
254 TP_fast_assign(
255 __entry->count = vcpu->arch.hpte_cache_count;
256 __entry->p1 = p1;
257 __entry->p2 = p2;
258 __entry->type = type;
259 ),
260
261 TP_printk("Flush %d %sPTEs: %llx - %llx",
262 __entry->count, __entry->type, __entry->p1, __entry->p2)
263);
264
265TRACE_EVENT(kvm_book3s_slb_found,
266 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
267 TP_ARGS(gvsid, hvsid),
268
269 TP_STRUCT__entry(
270 __field( unsigned long long, gvsid )
271 __field( unsigned long long, hvsid )
272 ),
273
274 TP_fast_assign(
275 __entry->gvsid = gvsid;
276 __entry->hvsid = hvsid;
277 ),
278
279 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
280);
281
282TRACE_EVENT(kvm_book3s_slb_fail,
283 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
284 TP_ARGS(sid_map_mask, gvsid),
285
286 TP_STRUCT__entry(
287 __field( unsigned short, sid_map_mask )
288 __field( unsigned long long, gvsid )
289 ),
290
291 TP_fast_assign(
292 __entry->sid_map_mask = sid_map_mask;
293 __entry->gvsid = gvsid;
294 ),
295
296 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
297 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
298);
299
300TRACE_EVENT(kvm_book3s_slb_map,
301 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
302 unsigned long long hvsid),
303 TP_ARGS(sid_map_mask, gvsid, hvsid),
304
305 TP_STRUCT__entry(
306 __field( unsigned short, sid_map_mask )
307 __field( unsigned long long, guest_vsid )
308 __field( unsigned long long, host_vsid )
309 ),
310
311 TP_fast_assign(
312 __entry->sid_map_mask = sid_map_mask;
313 __entry->guest_vsid = gvsid;
314 __entry->host_vsid = hvsid;
315 ),
316
317 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
318 __entry->guest_vsid, __entry->host_vsid)
319);
320
321TRACE_EVENT(kvm_book3s_slbmte,
322 TP_PROTO(u64 slb_vsid, u64 slb_esid),
323 TP_ARGS(slb_vsid, slb_esid),
324
325 TP_STRUCT__entry(
326 __field( u64, slb_vsid )
327 __field( u64, slb_esid )
328 ),
329
330 TP_fast_assign(
331 __entry->slb_vsid = slb_vsid;
332 __entry->slb_esid = slb_esid;
333 ),
334
335 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
336);
337
338#endif /* CONFIG_PPC_BOOK3S */
339
101#endif /* _TRACE_KVM_H */ 340#endif /* _TRACE_KVM_H */
102 341
103/* This part must be outside protection */ 342/* This part must be outside protection */
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 81c9208025fa..956154f32cfe 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -21,6 +21,16 @@ source "arch/powerpc/platforms/44x/Kconfig"
21source "arch/powerpc/platforms/40x/Kconfig" 21source "arch/powerpc/platforms/40x/Kconfig"
22source "arch/powerpc/platforms/amigaone/Kconfig" 22source "arch/powerpc/platforms/amigaone/Kconfig"
23 23
24config KVM_GUEST
25 bool "KVM Guest support"
26 default y
27 ---help---
28 This option enables various optimizations for running under the KVM
29 hypervisor. Overhead for the kernel when not running inside KVM should
30 be minimal.
31
32 In case of doubt, say Y
33
24config PPC_NATIVE 34config PPC_NATIVE
25 bool 35 bool
26 depends on 6xx || PPC64 36 depends on 6xx || PPC64
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 42e512ba8b43..287d7bbb6d36 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -5,6 +5,7 @@ header-y += chsc.h
5header-y += cmb.h 5header-y += cmb.h
6header-y += dasd.h 6header-y += dasd.h
7header-y += debug.h 7header-y += debug.h
8header-y += kvm_virtio.h
8header-y += monwriter.h 9header-y += monwriter.h
9header-y += qeth.h 10header-y += qeth.h
10header-y += schid.h 11header-y += schid.h
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index acdfdff26611..72f614181eff 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -54,4 +54,11 @@ struct kvm_vqconfig {
54 * This is pagesize for historical reasons. */ 54 * This is pagesize for historical reasons. */
55#define KVM_S390_VIRTIO_RING_ALIGN 4096 55#define KVM_S390_VIRTIO_RING_ALIGN 4096
56 56
57
58/* These values are supposed to be in ext_params on an interrupt */
59#define VIRTIO_PARAM_MASK 0xff
60#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
61#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
62#define VIRTIO_PARAM_DEV_ADD 0x2
63
57#endif 64#endif
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 1f99ecfc48e1..b36c6b3fe144 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -139,6 +139,7 @@ struct x86_emulate_ops {
139 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); 139 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
140 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu); 140 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
141 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); 141 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
142 void (*get_idt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
142 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); 143 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
143 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); 144 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
144 int (*cpl)(struct kvm_vcpu *vcpu); 145 int (*cpl)(struct kvm_vcpu *vcpu);
@@ -156,7 +157,10 @@ struct operand {
156 unsigned long orig_val; 157 unsigned long orig_val;
157 u64 orig_val64; 158 u64 orig_val64;
158 }; 159 };
159 unsigned long *ptr; 160 union {
161 unsigned long *reg;
162 unsigned long mem;
163 } addr;
160 union { 164 union {
161 unsigned long val; 165 unsigned long val;
162 u64 val64; 166 u64 val64;
@@ -190,6 +194,7 @@ struct decode_cache {
190 bool has_seg_override; 194 bool has_seg_override;
191 u8 seg_override; 195 u8 seg_override;
192 unsigned int d; 196 unsigned int d;
197 int (*execute)(struct x86_emulate_ctxt *ctxt);
193 unsigned long regs[NR_VCPU_REGS]; 198 unsigned long regs[NR_VCPU_REGS];
194 unsigned long eip; 199 unsigned long eip;
195 /* modrm */ 200 /* modrm */
@@ -197,17 +202,16 @@ struct decode_cache {
197 u8 modrm_mod; 202 u8 modrm_mod;
198 u8 modrm_reg; 203 u8 modrm_reg;
199 u8 modrm_rm; 204 u8 modrm_rm;
200 u8 use_modrm_ea; 205 u8 modrm_seg;
201 bool rip_relative; 206 bool rip_relative;
202 unsigned long modrm_ea;
203 void *modrm_ptr;
204 unsigned long modrm_val;
205 struct fetch_cache fetch; 207 struct fetch_cache fetch;
206 struct read_cache io_read; 208 struct read_cache io_read;
207 struct read_cache mem_read; 209 struct read_cache mem_read;
208}; 210};
209 211
210struct x86_emulate_ctxt { 212struct x86_emulate_ctxt {
213 struct x86_emulate_ops *ops;
214
211 /* Register state before/after emulation. */ 215 /* Register state before/after emulation. */
212 struct kvm_vcpu *vcpu; 216 struct kvm_vcpu *vcpu;
213 217
@@ -220,12 +224,11 @@ struct x86_emulate_ctxt {
220 /* interruptibility state, as a result of execution of STI or MOV SS */ 224 /* interruptibility state, as a result of execution of STI or MOV SS */
221 int interruptibility; 225 int interruptibility;
222 226
223 bool restart; /* restart string instruction after writeback */ 227 bool perm_ok; /* do not check permissions if true */
224 228
225 int exception; /* exception that happens during emulation or -1 */ 229 int exception; /* exception that happens during emulation or -1 */
226 u32 error_code; /* error code for exception */ 230 u32 error_code; /* error code for exception */
227 bool error_code_valid; 231 bool error_code_valid;
228 unsigned long cr2; /* faulted address in case of #PF */
229 232
230 /* decode cache */ 233 /* decode cache */
231 struct decode_cache decode; 234 struct decode_cache decode;
@@ -249,13 +252,14 @@ struct x86_emulate_ctxt {
249#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 252#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
250#endif 253#endif
251 254
252int x86_decode_insn(struct x86_emulate_ctxt *ctxt, 255int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
253 struct x86_emulate_ops *ops); 256#define EMULATION_FAILED -1
254int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 257#define EMULATION_OK 0
255 struct x86_emulate_ops *ops); 258#define EMULATION_RESTART 1
259int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
256int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 260int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
257 struct x86_emulate_ops *ops,
258 u16 tss_selector, int reason, 261 u16 tss_selector, int reason,
259 bool has_error_code, u32 error_code); 262 bool has_error_code, u32 error_code);
260 263int emulate_int_real(struct x86_emulate_ctxt *ctxt,
264 struct x86_emulate_ops *ops, int irq);
261#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 265#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c52e2eb40a1e..9e6fe391094e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -236,10 +236,14 @@ struct kvm_pio_request {
236 */ 236 */
237struct kvm_mmu { 237struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 238 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
240 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
239 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 241 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
242 void (*inject_page_fault)(struct kvm_vcpu *vcpu);
240 void (*free)(struct kvm_vcpu *vcpu); 243 void (*free)(struct kvm_vcpu *vcpu);
241 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 244 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
242 u32 *error); 245 u32 *error);
246 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
243 void (*prefetch_page)(struct kvm_vcpu *vcpu, 247 void (*prefetch_page)(struct kvm_vcpu *vcpu,
244 struct kvm_mmu_page *page); 248 struct kvm_mmu_page *page);
245 int (*sync_page)(struct kvm_vcpu *vcpu, 249 int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -249,13 +253,18 @@ struct kvm_mmu {
249 int root_level; 253 int root_level;
250 int shadow_root_level; 254 int shadow_root_level;
251 union kvm_mmu_page_role base_role; 255 union kvm_mmu_page_role base_role;
256 bool direct_map;
252 257
253 u64 *pae_root; 258 u64 *pae_root;
259 u64 *lm_root;
254 u64 rsvd_bits_mask[2][4]; 260 u64 rsvd_bits_mask[2][4];
261
262 bool nx;
263
264 u64 pdptrs[4]; /* pae */
255}; 265};
256 266
257struct kvm_vcpu_arch { 267struct kvm_vcpu_arch {
258 u64 host_tsc;
259 /* 268 /*
260 * rip and regs accesses must go through 269 * rip and regs accesses must go through
261 * kvm_{register,rip}_{read,write} functions. 270 * kvm_{register,rip}_{read,write} functions.
@@ -272,7 +281,6 @@ struct kvm_vcpu_arch {
272 unsigned long cr4_guest_owned_bits; 281 unsigned long cr4_guest_owned_bits;
273 unsigned long cr8; 282 unsigned long cr8;
274 u32 hflags; 283 u32 hflags;
275 u64 pdptrs[4]; /* pae */
276 u64 efer; 284 u64 efer;
277 u64 apic_base; 285 u64 apic_base;
278 struct kvm_lapic *apic; /* kernel irqchip context */ 286 struct kvm_lapic *apic; /* kernel irqchip context */
@@ -282,7 +290,41 @@ struct kvm_vcpu_arch {
282 u64 ia32_misc_enable_msr; 290 u64 ia32_misc_enable_msr;
283 bool tpr_access_reporting; 291 bool tpr_access_reporting;
284 292
293 /*
294 * Paging state of the vcpu
295 *
296 * If the vcpu runs in guest mode with two level paging this still saves
297 * the paging mode of the l1 guest. This context is always used to
298 * handle faults.
299 */
285 struct kvm_mmu mmu; 300 struct kvm_mmu mmu;
301
302 /*
303 * Paging state of an L2 guest (used for nested npt)
304 *
305 * This context will save all necessary information to walk page tables
306 * of the an L2 guest. This context is only initialized for page table
307 * walking and not for faulting since we never handle l2 page faults on
308 * the host.
309 */
310 struct kvm_mmu nested_mmu;
311
312 /*
313 * Pointer to the mmu context currently used for
314 * gva_to_gpa translations.
315 */
316 struct kvm_mmu *walk_mmu;
317
318 /*
319 * This struct is filled with the necessary information to propagate a
320 * page fault into the guest
321 */
322 struct {
323 u64 address;
324 unsigned error_code;
325 bool nested;
326 } fault;
327
286 /* only needed in kvm_pv_mmu_op() path, but it's hot so 328 /* only needed in kvm_pv_mmu_op() path, but it's hot so
287 * put it here to avoid allocation */ 329 * put it here to avoid allocation */
288 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 330 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -336,9 +378,15 @@ struct kvm_vcpu_arch {
336 378
337 gpa_t time; 379 gpa_t time;
338 struct pvclock_vcpu_time_info hv_clock; 380 struct pvclock_vcpu_time_info hv_clock;
339 unsigned int hv_clock_tsc_khz; 381 unsigned int hw_tsc_khz;
340 unsigned int time_offset; 382 unsigned int time_offset;
341 struct page *time_page; 383 struct page *time_page;
384 u64 last_host_tsc;
385 u64 last_guest_tsc;
386 u64 last_kernel_ns;
387 u64 last_tsc_nsec;
388 u64 last_tsc_write;
389 bool tsc_catchup;
342 390
343 bool nmi_pending; 391 bool nmi_pending;
344 bool nmi_injected; 392 bool nmi_injected;
@@ -367,9 +415,9 @@ struct kvm_vcpu_arch {
367}; 415};
368 416
369struct kvm_arch { 417struct kvm_arch {
370 unsigned int n_free_mmu_pages; 418 unsigned int n_used_mmu_pages;
371 unsigned int n_requested_mmu_pages; 419 unsigned int n_requested_mmu_pages;
372 unsigned int n_alloc_mmu_pages; 420 unsigned int n_max_mmu_pages;
373 atomic_t invlpg_counter; 421 atomic_t invlpg_counter;
374 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 422 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
375 /* 423 /*
@@ -394,8 +442,14 @@ struct kvm_arch {
394 gpa_t ept_identity_map_addr; 442 gpa_t ept_identity_map_addr;
395 443
396 unsigned long irq_sources_bitmap; 444 unsigned long irq_sources_bitmap;
397 u64 vm_init_tsc;
398 s64 kvmclock_offset; 445 s64 kvmclock_offset;
446 spinlock_t tsc_write_lock;
447 u64 last_tsc_nsec;
448 u64 last_tsc_offset;
449 u64 last_tsc_write;
450 u32 virtual_tsc_khz;
451 u32 virtual_tsc_mult;
452 s8 virtual_tsc_shift;
399 453
400 struct kvm_xen_hvm_config xen_hvm_config; 454 struct kvm_xen_hvm_config xen_hvm_config;
401 455
@@ -505,6 +559,7 @@ struct kvm_x86_ops {
505 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 559 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
506 bool has_error_code, u32 error_code, 560 bool has_error_code, u32 error_code,
507 bool reinject); 561 bool reinject);
562 void (*cancel_injection)(struct kvm_vcpu *vcpu);
508 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 563 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
509 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 564 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
510 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 565 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -517,11 +572,16 @@ struct kvm_x86_ops {
517 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 572 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
518 int (*get_lpage_level)(void); 573 int (*get_lpage_level)(void);
519 bool (*rdtscp_supported)(void); 574 bool (*rdtscp_supported)(void);
575 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
576
577 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
520 578
521 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 579 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
522 580
523 bool (*has_wbinvd_exit)(void); 581 bool (*has_wbinvd_exit)(void);
524 582
583 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
584
525 const struct trace_print_flags *exit_reasons_str; 585 const struct trace_print_flags *exit_reasons_str;
526}; 586};
527 587
@@ -544,7 +604,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
544unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 604unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
545void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 605void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
546 606
547int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 607int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
548 608
549int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 609int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
550 const void *val, int bytes); 610 const void *val, int bytes);
@@ -608,8 +668,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
608void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 668void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
609void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 669void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
610void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 670void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
611void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 671void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
612 u32 error_code); 672int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
673 gfn_t gfn, void *data, int offset, int len,
674 u32 access);
675void kvm_propagate_fault(struct kvm_vcpu *vcpu);
613bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 676bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
614 677
615int kvm_pic_set_irq(void *opaque, int irq, int level); 678int kvm_pic_set_irq(void *opaque, int irq, int level);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 05eba5e9a8e8..7b562b6184bc 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -158,6 +158,12 @@ static inline unsigned int kvm_arch_para_features(void)
158 return cpuid_eax(KVM_CPUID_FEATURES); 158 return cpuid_eax(KVM_CPUID_FEATURES);
159} 159}
160 160
161#ifdef CONFIG_KVM_GUEST
162void __init kvm_guest_init(void);
163#else
164#define kvm_guest_init() do { } while (0)
161#endif 165#endif
162 166
167#endif /* __KERNEL__ */
168
163#endif /* _ASM_X86_KVM_PARA_H */ 169#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 986f7790fdb2..83c4bb1d917d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -198,6 +198,7 @@
198#define MSR_IA32_TSC 0x00000010 198#define MSR_IA32_TSC 0x00000010
199#define MSR_IA32_PLATFORM_ID 0x00000017 199#define MSR_IA32_PLATFORM_ID 0x00000017
200#define MSR_IA32_EBL_CR_POWERON 0x0000002a 200#define MSR_IA32_EBL_CR_POWERON 0x0000002a
201#define MSR_EBC_FREQUENCY_ID 0x0000002c
201#define MSR_IA32_FEATURE_CONTROL 0x0000003a 202#define MSR_IA32_FEATURE_CONTROL 0x0000003a
202 203
203#define FEATURE_CONTROL_LOCKED (1<<0) 204#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index cd02f324aa6b..7f7e577a0e39 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -12,4 +12,42 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
12 struct pvclock_vcpu_time_info *vcpu, 12 struct pvclock_vcpu_time_info *vcpu,
13 struct timespec *ts); 13 struct timespec *ts);
14 14
15/*
16 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
17 * yielding a 64-bit result.
18 */
19static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
20{
21 u64 product;
22#ifdef __i386__
23 u32 tmp1, tmp2;
24#endif
25
26 if (shift < 0)
27 delta >>= -shift;
28 else
29 delta <<= shift;
30
31#ifdef __i386__
32 __asm__ (
33 "mul %5 ; "
34 "mov %4,%%eax ; "
35 "mov %%edx,%4 ; "
36 "mul %5 ; "
37 "xor %5,%5 ; "
38 "add %4,%%eax ; "
39 "adc %5,%%edx ; "
40 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
41 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
42#elif defined(__x86_64__)
43 __asm__ (
44 "mul %%rdx ; shrd $32,%%rdx,%%rax"
45 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
46#else
47#error implement me!
48#endif
49
50 return product;
51}
52
15#endif /* _ASM_X86_PVCLOCK_H */ 53#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index fbbc4dadecc4..0e4f24c2a746 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -189,8 +189,8 @@
189 * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01. 189 * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01.
190 * 190 *
191 * [This document is available free from Intel by calling 800.628.8686 (fax 191 * [This document is available free from Intel by calling 800.628.8686 (fax
192 * 916.356.6100) or 800.548.4725; or via anonymous ftp from 192 * 916.356.6100) or 800.548.4725; or from
193 * ftp://ftp.intel.com/pub/IAL/software_specs/apmv11.doc. It is also 193 * http://www.microsoft.com/whdc/archive/amp_12.mspx It is also
194 * available from Microsoft by calling 206.882.8080.] 194 * available from Microsoft by calling 206.882.8080.]
195 * 195 *
196 * APM 1.2 Reference: 196 * APM 1.2 Reference:
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index eb9b76c716c2..ca43ce31a19c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,13 +128,15 @@ static struct clocksource kvm_clock = {
128static int kvm_register_clock(char *txt) 128static int kvm_register_clock(char *txt)
129{ 129{
130 int cpu = smp_processor_id(); 130 int cpu = smp_processor_id();
131 int low, high; 131 int low, high, ret;
132
132 low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1; 133 low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
133 high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32); 134 high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
135 ret = native_write_msr_safe(msr_kvm_system_time, low, high);
134 printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n", 136 printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
135 cpu, high, low, txt); 137 cpu, high, low, txt);
136 138
137 return native_write_msr_safe(msr_kvm_system_time, low, high); 139 return ret;
138} 140}
139 141
140#ifdef CONFIG_X86_LOCAL_APIC 142#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 0b3d37e83606..1cca374a2bac 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -12,7 +12,7 @@
12 * Software Developer's Manual 12 * Software Developer's Manual
13 * Order Number 253668 or free download from: 13 * Order Number 253668 or free download from:
14 * 14 *
15 * http://developer.intel.com/design/pentium4/manuals/253668.htm 15 * http://developer.intel.com/Assets/PDF/manual/253668.pdf
16 * 16 *
17 * For more information, go to http://www.urbanmyth.org/microcode 17 * For more information, go to http://www.urbanmyth.org/microcode
18 * 18 *
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 356170262a93..dcb65cc0a053 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -12,7 +12,7 @@
12 * Software Developer's Manual 12 * Software Developer's Manual
13 * Order Number 253668 or free download from: 13 * Order Number 253668 or free download from:
14 * 14 *
15 * http://developer.intel.com/design/pentium4/manuals/253668.htm 15 * http://developer.intel.com/Assets/PDF/manual/253668.pdf
16 * 16 *
17 * For more information, go to http://www.urbanmyth.org/microcode 17 * For more information, go to http://www.urbanmyth.org/microcode
18 * 18 *
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 239427ca02af..bab3b9e6f66d 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -82,7 +82,8 @@ static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) 82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
83{ 83{
84 u64 delta = native_read_tsc() - shadow->tsc_timestamp; 84 u64 delta = native_read_tsc() - shadow->tsc_timestamp;
85 return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); 85 return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul,
86 shadow->tsc_shift);
86} 87}
87 88
88/* 89/*
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 970bbd479516..ddc131ff438f 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -64,6 +64,13 @@ config KVM_AMD
64 To compile this as a module, choose M here: the module 64 To compile this as a module, choose M here: the module
65 will be called kvm-amd. 65 will be called kvm-amd.
66 66
67config KVM_MMU_AUDIT
68 bool "Audit KVM MMU"
69 depends on KVM && TRACEPOINTS
70 ---help---
71 This option adds a R/W kVM module parameter 'mmu_audit', which allows
72 audit KVM MMU at runtime.
73
67# OK, it's a little counter-intuitive to do this, but it puts it neatly under 74# OK, it's a little counter-intuitive to do this, but it puts it neatly under
68# the virtualization menu. 75# the virtualization menu.
69source drivers/vhost/Kconfig 76source drivers/vhost/Kconfig
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 66ca98aafdd6..38b6e8dafaff 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -9,7 +9,7 @@
9 * privileged instructions: 9 * privileged instructions:
10 * 10 *
11 * Copyright (C) 2006 Qumranet 11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates. 12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * 13 *
14 * Avi Kivity <avi@qumranet.com> 14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com>
@@ -51,13 +51,13 @@
51#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ 51#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52#define DstReg (2<<1) /* Register operand. */ 52#define DstReg (2<<1) /* Register operand. */
53#define DstMem (3<<1) /* Memory operand. */ 53#define DstMem (3<<1) /* Memory operand. */
54#define DstAcc (4<<1) /* Destination Accumulator */ 54#define DstAcc (4<<1) /* Destination Accumulator */
55#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 55#define DstDI (5<<1) /* Destination is in ES:(E)DI */
56#define DstMem64 (6<<1) /* 64bit memory operand */ 56#define DstMem64 (6<<1) /* 64bit memory operand */
57#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
57#define DstMask (7<<1) 58#define DstMask (7<<1)
58/* Source operand type. */ 59/* Source operand type. */
59#define SrcNone (0<<4) /* No source operand. */ 60#define SrcNone (0<<4) /* No source operand. */
60#define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61#define SrcReg (1<<4) /* Register operand. */ 61#define SrcReg (1<<4) /* Register operand. */
62#define SrcMem (2<<4) /* Memory operand. */ 62#define SrcMem (2<<4) /* Memory operand. */
63#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 63#define SrcMem16 (3<<4) /* Memory operand (16-bit). */
@@ -71,6 +71,7 @@
71#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 71#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 72#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73#define SrcAcc (0xd<<4) /* Source Accumulator */ 73#define SrcAcc (0xd<<4) /* Source Accumulator */
74#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
74#define SrcMask (0xf<<4) 75#define SrcMask (0xf<<4)
75/* Generic ModRM decode. */ 76/* Generic ModRM decode. */
76#define ModRM (1<<8) 77#define ModRM (1<<8)
@@ -82,8 +83,10 @@
82#define Stack (1<<13) /* Stack instruction (push/pop) */ 83#define Stack (1<<13) /* Stack instruction (push/pop) */
83#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 84#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ 85#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85#define GroupMask 0xff /* Group number stored in bits 0:7 */
86/* Misc flags */ 86/* Misc flags */
87#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89#define Undefined (1<<25) /* No Such Instruction */
87#define Lock (1<<26) /* lock prefix is allowed for the instruction */ 90#define Lock (1<<26) /* lock prefix is allowed for the instruction */
88#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 91#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
89#define No64 (1<<28) 92#define No64 (1<<28)
@@ -92,285 +95,30 @@
92#define Src2CL (1<<29) 95#define Src2CL (1<<29)
93#define Src2ImmByte (2<<29) 96#define Src2ImmByte (2<<29)
94#define Src2One (3<<29) 97#define Src2One (3<<29)
98#define Src2Imm (4<<29)
95#define Src2Mask (7<<29) 99#define Src2Mask (7<<29)
96 100
97enum { 101#define X2(x...) x, x
98 Group1_80, Group1_81, Group1_82, Group1_83, 102#define X3(x...) X2(x), x
99 Group1A, Group3_Byte, Group3, Group4, Group5, Group7, 103#define X4(x...) X2(x), X2(x)
100 Group8, Group9, 104#define X5(x...) X4(x), x
105#define X6(x...) X4(x), X2(x)
106#define X7(x...) X4(x), X3(x)
107#define X8(x...) X4(x), X4(x)
108#define X16(x...) X8(x), X8(x)
109
110struct opcode {
111 u32 flags;
112 union {
113 int (*execute)(struct x86_emulate_ctxt *ctxt);
114 struct opcode *group;
115 struct group_dual *gdual;
116 } u;
101}; 117};
102 118
103static u32 opcode_table[256] = { 119struct group_dual {
104 /* 0x00 - 0x07 */ 120 struct opcode mod012[8];
105 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, 121 struct opcode mod3[8];
106 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
107 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
108 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
109 /* 0x08 - 0x0F */
110 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
111 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
112 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
113 ImplicitOps | Stack | No64, 0,
114 /* 0x10 - 0x17 */
115 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
116 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
117 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
118 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
119 /* 0x18 - 0x1F */
120 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
121 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
122 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
123 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
124 /* 0x20 - 0x27 */
125 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
126 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
127 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
128 /* 0x28 - 0x2F */
129 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
130 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
131 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
132 /* 0x30 - 0x37 */
133 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
134 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
135 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
136 /* 0x38 - 0x3F */
137 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
138 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
139 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
140 0, 0,
141 /* 0x40 - 0x47 */
142 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
143 /* 0x48 - 0x4F */
144 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
145 /* 0x50 - 0x57 */
146 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
147 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
148 /* 0x58 - 0x5F */
149 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
150 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
151 /* 0x60 - 0x67 */
152 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
153 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
154 0, 0, 0, 0,
155 /* 0x68 - 0x6F */
156 SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
157 DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
158 SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
159 /* 0x70 - 0x77 */
160 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
161 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
162 /* 0x78 - 0x7F */
163 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
164 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
165 /* 0x80 - 0x87 */
166 Group | Group1_80, Group | Group1_81,
167 Group | Group1_82, Group | Group1_83,
168 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
169 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
170 /* 0x88 - 0x8F */
171 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
172 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
173 DstMem | SrcNone | ModRM | Mov, ModRM | DstReg,
174 ImplicitOps | SrcMem16 | ModRM, Group | Group1A,
175 /* 0x90 - 0x97 */
176 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
177 /* 0x98 - 0x9F */
178 0, 0, SrcImmFAddr | No64, 0,
179 ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
180 /* 0xA0 - 0xA7 */
181 ByteOp | DstAcc | SrcMem | Mov | MemAbs, DstAcc | SrcMem | Mov | MemAbs,
182 ByteOp | DstMem | SrcAcc | Mov | MemAbs, DstMem | SrcAcc | Mov | MemAbs,
183 ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
184 ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
185 /* 0xA8 - 0xAF */
186 DstAcc | SrcImmByte | ByteOp, DstAcc | SrcImm, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
187 ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
188 ByteOp | DstDI | String, DstDI | String,
189 /* 0xB0 - 0xB7 */
190 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
191 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
192 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
193 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
194 /* 0xB8 - 0xBF */
195 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
196 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
197 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
198 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
199 /* 0xC0 - 0xC7 */
200 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
201 0, ImplicitOps | Stack, 0, 0,
202 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
203 /* 0xC8 - 0xCF */
204 0, 0, 0, ImplicitOps | Stack,
205 ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
206 /* 0xD0 - 0xD7 */
207 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
208 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
209 0, 0, 0, 0,
210 /* 0xD8 - 0xDF */
211 0, 0, 0, 0, 0, 0, 0, 0,
212 /* 0xE0 - 0xE7 */
213 0, 0, 0, 0,
214 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
215 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
216 /* 0xE8 - 0xEF */
217 SrcImm | Stack, SrcImm | ImplicitOps,
218 SrcImmFAddr | No64, SrcImmByte | ImplicitOps,
219 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
220 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
221 /* 0xF0 - 0xF7 */
222 0, 0, 0, 0,
223 ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
224 /* 0xF8 - 0xFF */
225 ImplicitOps, 0, ImplicitOps, ImplicitOps,
226 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
227};
228
229static u32 twobyte_table[256] = {
230 /* 0x00 - 0x0F */
231 0, Group | GroupDual | Group7, 0, 0,
232 0, ImplicitOps, ImplicitOps | Priv, 0,
233 ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
234 0, ImplicitOps | ModRM, 0, 0,
235 /* 0x10 - 0x1F */
236 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
237 /* 0x20 - 0x2F */
238 ModRM | ImplicitOps | Priv, ModRM | Priv,
239 ModRM | ImplicitOps | Priv, ModRM | Priv,
240 0, 0, 0, 0,
241 0, 0, 0, 0, 0, 0, 0, 0,
242 /* 0x30 - 0x3F */
243 ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
244 ImplicitOps, ImplicitOps | Priv, 0, 0,
245 0, 0, 0, 0, 0, 0, 0, 0,
246 /* 0x40 - 0x47 */
247 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
248 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
249 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
250 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
251 /* 0x48 - 0x4F */
252 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
253 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
254 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
255 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
256 /* 0x50 - 0x5F */
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 /* 0x60 - 0x6F */
259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
260 /* 0x70 - 0x7F */
261 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262 /* 0x80 - 0x8F */
263 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
264 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
265 /* 0x90 - 0x9F */
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 /* 0xA0 - 0xA7 */
268 ImplicitOps | Stack, ImplicitOps | Stack,
269 0, DstMem | SrcReg | ModRM | BitOp,
270 DstMem | SrcReg | Src2ImmByte | ModRM,
271 DstMem | SrcReg | Src2CL | ModRM, 0, 0,
272 /* 0xA8 - 0xAF */
273 ImplicitOps | Stack, ImplicitOps | Stack,
274 0, DstMem | SrcReg | ModRM | BitOp | Lock,
275 DstMem | SrcReg | Src2ImmByte | ModRM,
276 DstMem | SrcReg | Src2CL | ModRM,
277 ModRM, 0,
278 /* 0xB0 - 0xB7 */
279 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
280 0, DstMem | SrcReg | ModRM | BitOp | Lock,
281 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
282 DstReg | SrcMem16 | ModRM | Mov,
283 /* 0xB8 - 0xBF */
284 0, 0,
285 Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
286 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
287 DstReg | SrcMem16 | ModRM | Mov,
288 /* 0xC0 - 0xCF */
289 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
290 0, 0, 0, Group | GroupDual | Group9,
291 0, 0, 0, 0, 0, 0, 0, 0,
292 /* 0xD0 - 0xDF */
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 /* 0xE0 - 0xEF */
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 /* 0xF0 - 0xFF */
297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
298};
299
300static u32 group_table[] = {
301 [Group1_80*8] =
302 ByteOp | DstMem | SrcImm | ModRM | Lock,
303 ByteOp | DstMem | SrcImm | ModRM | Lock,
304 ByteOp | DstMem | SrcImm | ModRM | Lock,
305 ByteOp | DstMem | SrcImm | ModRM | Lock,
306 ByteOp | DstMem | SrcImm | ModRM | Lock,
307 ByteOp | DstMem | SrcImm | ModRM | Lock,
308 ByteOp | DstMem | SrcImm | ModRM | Lock,
309 ByteOp | DstMem | SrcImm | ModRM,
310 [Group1_81*8] =
311 DstMem | SrcImm | ModRM | Lock,
312 DstMem | SrcImm | ModRM | Lock,
313 DstMem | SrcImm | ModRM | Lock,
314 DstMem | SrcImm | ModRM | Lock,
315 DstMem | SrcImm | ModRM | Lock,
316 DstMem | SrcImm | ModRM | Lock,
317 DstMem | SrcImm | ModRM | Lock,
318 DstMem | SrcImm | ModRM,
319 [Group1_82*8] =
320 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
321 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
322 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
323 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
324 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
325 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
326 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
327 ByteOp | DstMem | SrcImm | ModRM | No64,
328 [Group1_83*8] =
329 DstMem | SrcImmByte | ModRM | Lock,
330 DstMem | SrcImmByte | ModRM | Lock,
331 DstMem | SrcImmByte | ModRM | Lock,
332 DstMem | SrcImmByte | ModRM | Lock,
333 DstMem | SrcImmByte | ModRM | Lock,
334 DstMem | SrcImmByte | ModRM | Lock,
335 DstMem | SrcImmByte | ModRM | Lock,
336 DstMem | SrcImmByte | ModRM,
337 [Group1A*8] =
338 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
339 [Group3_Byte*8] =
340 ByteOp | SrcImm | DstMem | ModRM, ByteOp | SrcImm | DstMem | ModRM,
341 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
342 0, 0, 0, 0,
343 [Group3*8] =
344 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
345 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
346 0, 0, 0, 0,
347 [Group4*8] =
348 ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock,
349 0, 0, 0, 0, 0, 0,
350 [Group5*8] =
351 DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock,
352 SrcMem | ModRM | Stack, 0,
353 SrcMem | ModRM | Stack, SrcMemFAddr | ModRM | ImplicitOps,
354 SrcMem | ModRM | Stack, 0,
355 [Group7*8] =
356 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
357 SrcNone | ModRM | DstMem | Mov, 0,
358 SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
359 [Group8*8] =
360 0, 0, 0, 0,
361 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
362 DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
363 [Group9*8] =
364 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
365};
366
367static u32 group2_table[] = {
368 [Group7*8] =
369 SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
370 SrcNone | ModRM | DstMem | Mov, 0,
371 SrcMem16 | ModRM | Mov | Priv, 0,
372 [Group9*8] =
373 0, 0, 0, 0, 0, 0, 0, 0,
374}; 122};
375 123
376/* EFLAGS bit definitions. */ 124/* EFLAGS bit definitions. */
@@ -392,6 +140,9 @@ static u32 group2_table[] = {
392#define EFLG_PF (1<<2) 140#define EFLG_PF (1<<2)
393#define EFLG_CF (1<<0) 141#define EFLG_CF (1<<0)
394 142
143#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
144#define EFLG_RESERVED_ONE_MASK 2
145
395/* 146/*
396 * Instruction emulation: 147 * Instruction emulation:
397 * Most instructions are emulated directly via a fragment of inline assembly 148 * Most instructions are emulated directly via a fragment of inline assembly
@@ -444,13 +195,13 @@ static u32 group2_table[] = {
444#define ON64(x) 195#define ON64(x)
445#endif 196#endif
446 197
447#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ 198#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
448 do { \ 199 do { \
449 __asm__ __volatile__ ( \ 200 __asm__ __volatile__ ( \
450 _PRE_EFLAGS("0", "4", "2") \ 201 _PRE_EFLAGS("0", "4", "2") \
451 _op _suffix " %"_x"3,%1; " \ 202 _op _suffix " %"_x"3,%1; " \
452 _POST_EFLAGS("0", "4", "2") \ 203 _POST_EFLAGS("0", "4", "2") \
453 : "=m" (_eflags), "=m" ((_dst).val), \ 204 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
454 "=&r" (_tmp) \ 205 "=&r" (_tmp) \
455 : _y ((_src).val), "i" (EFLAGS_MASK)); \ 206 : _y ((_src).val), "i" (EFLAGS_MASK)); \
456 } while (0) 207 } while (0)
@@ -463,13 +214,13 @@ static u32 group2_table[] = {
463 \ 214 \
464 switch ((_dst).bytes) { \ 215 switch ((_dst).bytes) { \
465 case 2: \ 216 case 2: \
466 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ 217 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
467 break; \ 218 break; \
468 case 4: \ 219 case 4: \
469 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \ 220 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
470 break; \ 221 break; \
471 case 8: \ 222 case 8: \
472 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \ 223 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
473 break; \ 224 break; \
474 } \ 225 } \
475 } while (0) 226 } while (0)
@@ -479,7 +230,7 @@ static u32 group2_table[] = {
479 unsigned long _tmp; \ 230 unsigned long _tmp; \
480 switch ((_dst).bytes) { \ 231 switch ((_dst).bytes) { \
481 case 1: \ 232 case 1: \
482 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ 233 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
483 break; \ 234 break; \
484 default: \ 235 default: \
485 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ 236 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
@@ -566,6 +317,74 @@ static u32 group2_table[] = {
566 } \ 317 } \
567 } while (0) 318 } while (0)
568 319
320#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
321 do { \
322 unsigned long _tmp; \
323 \
324 __asm__ __volatile__ ( \
325 _PRE_EFLAGS("0", "4", "1") \
326 _op _suffix " %5; " \
327 _POST_EFLAGS("0", "4", "1") \
328 : "=m" (_eflags), "=&r" (_tmp), \
329 "+a" (_rax), "+d" (_rdx) \
330 : "i" (EFLAGS_MASK), "m" ((_src).val), \
331 "a" (_rax), "d" (_rdx)); \
332 } while (0)
333
334#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
335 do { \
336 unsigned long _tmp; \
337 \
338 __asm__ __volatile__ ( \
339 _PRE_EFLAGS("0", "5", "1") \
340 "1: \n\t" \
341 _op _suffix " %6; " \
342 "2: \n\t" \
343 _POST_EFLAGS("0", "5", "1") \
344 ".pushsection .fixup,\"ax\" \n\t" \
345 "3: movb $1, %4 \n\t" \
346 "jmp 2b \n\t" \
347 ".popsection \n\t" \
348 _ASM_EXTABLE(1b, 3b) \
349 : "=m" (_eflags), "=&r" (_tmp), \
350 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
351 : "i" (EFLAGS_MASK), "m" ((_src).val), \
352 "a" (_rax), "d" (_rdx)); \
353 } while (0)
354
355/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
356#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
357 do { \
358 switch((_src).bytes) { \
359 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
360 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
361 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
362 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
363 } \
364 } while (0)
365
366#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
367 do { \
368 switch((_src).bytes) { \
369 case 1: \
370 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
371 _eflags, "b", _ex); \
372 break; \
373 case 2: \
374 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
375 _eflags, "w", _ex); \
376 break; \
377 case 4: \
378 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
379 _eflags, "l", _ex); \
380 break; \
381 case 8: ON64( \
382 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
383 _eflags, "q", _ex)); \
384 break; \
385 } \
386 } while (0)
387
569/* Fetch next part of the instruction being emulated. */ 388/* Fetch next part of the instruction being emulated. */
570#define insn_fetch(_type, _size, _eip) \ 389#define insn_fetch(_type, _size, _eip) \
571({ unsigned long _x; \ 390({ unsigned long _x; \
@@ -661,7 +480,6 @@ static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
661 ctxt->exception = vec; 480 ctxt->exception = vec;
662 ctxt->error_code = error; 481 ctxt->error_code = error;
663 ctxt->error_code_valid = valid; 482 ctxt->error_code_valid = valid;
664 ctxt->restart = false;
665} 483}
666 484
667static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 485static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
@@ -669,11 +487,9 @@ static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
669 emulate_exception(ctxt, GP_VECTOR, err, true); 487 emulate_exception(ctxt, GP_VECTOR, err, true);
670} 488}
671 489
672static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, 490static void emulate_pf(struct x86_emulate_ctxt *ctxt)
673 int err)
674{ 491{
675 ctxt->cr2 = addr; 492 emulate_exception(ctxt, PF_VECTOR, 0, true);
676 emulate_exception(ctxt, PF_VECTOR, err, true);
677} 493}
678 494
679static void emulate_ud(struct x86_emulate_ctxt *ctxt) 495static void emulate_ud(struct x86_emulate_ctxt *ctxt)
@@ -686,6 +502,12 @@ static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
686 emulate_exception(ctxt, TS_VECTOR, err, true); 502 emulate_exception(ctxt, TS_VECTOR, err, true);
687} 503}
688 504
505static int emulate_de(struct x86_emulate_ctxt *ctxt)
506{
507 emulate_exception(ctxt, DE_VECTOR, 0, false);
508 return X86EMUL_PROPAGATE_FAULT;
509}
510
689static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 511static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
690 struct x86_emulate_ops *ops, 512 struct x86_emulate_ops *ops,
691 unsigned long eip, u8 *dest) 513 unsigned long eip, u8 *dest)
@@ -742,7 +564,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs,
742 564
743static int read_descriptor(struct x86_emulate_ctxt *ctxt, 565static int read_descriptor(struct x86_emulate_ctxt *ctxt,
744 struct x86_emulate_ops *ops, 566 struct x86_emulate_ops *ops,
745 void *ptr, 567 ulong addr,
746 u16 *size, unsigned long *address, int op_bytes) 568 u16 *size, unsigned long *address, int op_bytes)
747{ 569{
748 int rc; 570 int rc;
@@ -750,12 +572,10 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
750 if (op_bytes == 2) 572 if (op_bytes == 2)
751 op_bytes = 3; 573 op_bytes = 3;
752 *address = 0; 574 *address = 0;
753 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, 575 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
754 ctxt->vcpu, NULL);
755 if (rc != X86EMUL_CONTINUE) 576 if (rc != X86EMUL_CONTINUE)
756 return rc; 577 return rc;
757 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, 578 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
758 ctxt->vcpu, NULL);
759 return rc; 579 return rc;
760} 580}
761 581
@@ -794,6 +614,24 @@ static int test_cc(unsigned int condition, unsigned int flags)
794 return (!!rc ^ (condition & 1)); 614 return (!!rc ^ (condition & 1));
795} 615}
796 616
617static void fetch_register_operand(struct operand *op)
618{
619 switch (op->bytes) {
620 case 1:
621 op->val = *(u8 *)op->addr.reg;
622 break;
623 case 2:
624 op->val = *(u16 *)op->addr.reg;
625 break;
626 case 4:
627 op->val = *(u32 *)op->addr.reg;
628 break;
629 case 8:
630 op->val = *(u64 *)op->addr.reg;
631 break;
632 }
633}
634
797static void decode_register_operand(struct operand *op, 635static void decode_register_operand(struct operand *op,
798 struct decode_cache *c, 636 struct decode_cache *c,
799 int inhibit_bytereg) 637 int inhibit_bytereg)
@@ -805,34 +643,25 @@ static void decode_register_operand(struct operand *op,
805 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); 643 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
806 op->type = OP_REG; 644 op->type = OP_REG;
807 if ((c->d & ByteOp) && !inhibit_bytereg) { 645 if ((c->d & ByteOp) && !inhibit_bytereg) {
808 op->ptr = decode_register(reg, c->regs, highbyte_regs); 646 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
809 op->val = *(u8 *)op->ptr;
810 op->bytes = 1; 647 op->bytes = 1;
811 } else { 648 } else {
812 op->ptr = decode_register(reg, c->regs, 0); 649 op->addr.reg = decode_register(reg, c->regs, 0);
813 op->bytes = c->op_bytes; 650 op->bytes = c->op_bytes;
814 switch (op->bytes) {
815 case 2:
816 op->val = *(u16 *)op->ptr;
817 break;
818 case 4:
819 op->val = *(u32 *)op->ptr;
820 break;
821 case 8:
822 op->val = *(u64 *) op->ptr;
823 break;
824 }
825 } 651 }
652 fetch_register_operand(op);
826 op->orig_val = op->val; 653 op->orig_val = op->val;
827} 654}
828 655
829static int decode_modrm(struct x86_emulate_ctxt *ctxt, 656static int decode_modrm(struct x86_emulate_ctxt *ctxt,
830 struct x86_emulate_ops *ops) 657 struct x86_emulate_ops *ops,
658 struct operand *op)
831{ 659{
832 struct decode_cache *c = &ctxt->decode; 660 struct decode_cache *c = &ctxt->decode;
833 u8 sib; 661 u8 sib;
834 int index_reg = 0, base_reg = 0, scale; 662 int index_reg = 0, base_reg = 0, scale;
835 int rc = X86EMUL_CONTINUE; 663 int rc = X86EMUL_CONTINUE;
664 ulong modrm_ea = 0;
836 665
837 if (c->rex_prefix) { 666 if (c->rex_prefix) {
838 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ 667 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
@@ -844,16 +673,19 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
844 c->modrm_mod |= (c->modrm & 0xc0) >> 6; 673 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
845 c->modrm_reg |= (c->modrm & 0x38) >> 3; 674 c->modrm_reg |= (c->modrm & 0x38) >> 3;
846 c->modrm_rm |= (c->modrm & 0x07); 675 c->modrm_rm |= (c->modrm & 0x07);
847 c->modrm_ea = 0; 676 c->modrm_seg = VCPU_SREG_DS;
848 c->use_modrm_ea = 1;
849 677
850 if (c->modrm_mod == 3) { 678 if (c->modrm_mod == 3) {
851 c->modrm_ptr = decode_register(c->modrm_rm, 679 op->type = OP_REG;
680 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
681 op->addr.reg = decode_register(c->modrm_rm,
852 c->regs, c->d & ByteOp); 682 c->regs, c->d & ByteOp);
853 c->modrm_val = *(unsigned long *)c->modrm_ptr; 683 fetch_register_operand(op);
854 return rc; 684 return rc;
855 } 685 }
856 686
687 op->type = OP_MEM;
688
857 if (c->ad_bytes == 2) { 689 if (c->ad_bytes == 2) {
858 unsigned bx = c->regs[VCPU_REGS_RBX]; 690 unsigned bx = c->regs[VCPU_REGS_RBX];
859 unsigned bp = c->regs[VCPU_REGS_RBP]; 691 unsigned bp = c->regs[VCPU_REGS_RBP];
@@ -864,47 +696,46 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
864 switch (c->modrm_mod) { 696 switch (c->modrm_mod) {
865 case 0: 697 case 0:
866 if (c->modrm_rm == 6) 698 if (c->modrm_rm == 6)
867 c->modrm_ea += insn_fetch(u16, 2, c->eip); 699 modrm_ea += insn_fetch(u16, 2, c->eip);
868 break; 700 break;
869 case 1: 701 case 1:
870 c->modrm_ea += insn_fetch(s8, 1, c->eip); 702 modrm_ea += insn_fetch(s8, 1, c->eip);
871 break; 703 break;
872 case 2: 704 case 2:
873 c->modrm_ea += insn_fetch(u16, 2, c->eip); 705 modrm_ea += insn_fetch(u16, 2, c->eip);
874 break; 706 break;
875 } 707 }
876 switch (c->modrm_rm) { 708 switch (c->modrm_rm) {
877 case 0: 709 case 0:
878 c->modrm_ea += bx + si; 710 modrm_ea += bx + si;
879 break; 711 break;
880 case 1: 712 case 1:
881 c->modrm_ea += bx + di; 713 modrm_ea += bx + di;
882 break; 714 break;
883 case 2: 715 case 2:
884 c->modrm_ea += bp + si; 716 modrm_ea += bp + si;
885 break; 717 break;
886 case 3: 718 case 3:
887 c->modrm_ea += bp + di; 719 modrm_ea += bp + di;
888 break; 720 break;
889 case 4: 721 case 4:
890 c->modrm_ea += si; 722 modrm_ea += si;
891 break; 723 break;
892 case 5: 724 case 5:
893 c->modrm_ea += di; 725 modrm_ea += di;
894 break; 726 break;
895 case 6: 727 case 6:
896 if (c->modrm_mod != 0) 728 if (c->modrm_mod != 0)
897 c->modrm_ea += bp; 729 modrm_ea += bp;
898 break; 730 break;
899 case 7: 731 case 7:
900 c->modrm_ea += bx; 732 modrm_ea += bx;
901 break; 733 break;
902 } 734 }
903 if (c->modrm_rm == 2 || c->modrm_rm == 3 || 735 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
904 (c->modrm_rm == 6 && c->modrm_mod != 0)) 736 (c->modrm_rm == 6 && c->modrm_mod != 0))
905 if (!c->has_seg_override) 737 c->modrm_seg = VCPU_SREG_SS;
906 set_seg_override(c, VCPU_SREG_SS); 738 modrm_ea = (u16)modrm_ea;
907 c->modrm_ea = (u16)c->modrm_ea;
908 } else { 739 } else {
909 /* 32/64-bit ModR/M decode. */ 740 /* 32/64-bit ModR/M decode. */
910 if ((c->modrm_rm & 7) == 4) { 741 if ((c->modrm_rm & 7) == 4) {
@@ -914,410 +745,74 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
914 scale = sib >> 6; 745 scale = sib >> 6;
915 746
916 if ((base_reg & 7) == 5 && c->modrm_mod == 0) 747 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
917 c->modrm_ea += insn_fetch(s32, 4, c->eip); 748 modrm_ea += insn_fetch(s32, 4, c->eip);
918 else 749 else
919 c->modrm_ea += c->regs[base_reg]; 750 modrm_ea += c->regs[base_reg];
920 if (index_reg != 4) 751 if (index_reg != 4)
921 c->modrm_ea += c->regs[index_reg] << scale; 752 modrm_ea += c->regs[index_reg] << scale;
922 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) { 753 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
923 if (ctxt->mode == X86EMUL_MODE_PROT64) 754 if (ctxt->mode == X86EMUL_MODE_PROT64)
924 c->rip_relative = 1; 755 c->rip_relative = 1;
925 } else 756 } else
926 c->modrm_ea += c->regs[c->modrm_rm]; 757 modrm_ea += c->regs[c->modrm_rm];
927 switch (c->modrm_mod) { 758 switch (c->modrm_mod) {
928 case 0: 759 case 0:
929 if (c->modrm_rm == 5) 760 if (c->modrm_rm == 5)
930 c->modrm_ea += insn_fetch(s32, 4, c->eip); 761 modrm_ea += insn_fetch(s32, 4, c->eip);
931 break; 762 break;
932 case 1: 763 case 1:
933 c->modrm_ea += insn_fetch(s8, 1, c->eip); 764 modrm_ea += insn_fetch(s8, 1, c->eip);
934 break; 765 break;
935 case 2: 766 case 2:
936 c->modrm_ea += insn_fetch(s32, 4, c->eip); 767 modrm_ea += insn_fetch(s32, 4, c->eip);
937 break; 768 break;
938 } 769 }
939 } 770 }
771 op->addr.mem = modrm_ea;
940done: 772done:
941 return rc; 773 return rc;
942} 774}
943 775
944static int decode_abs(struct x86_emulate_ctxt *ctxt, 776static int decode_abs(struct x86_emulate_ctxt *ctxt,
945 struct x86_emulate_ops *ops) 777 struct x86_emulate_ops *ops,
778 struct operand *op)
946{ 779{
947 struct decode_cache *c = &ctxt->decode; 780 struct decode_cache *c = &ctxt->decode;
948 int rc = X86EMUL_CONTINUE; 781 int rc = X86EMUL_CONTINUE;
949 782
783 op->type = OP_MEM;
950 switch (c->ad_bytes) { 784 switch (c->ad_bytes) {
951 case 2: 785 case 2:
952 c->modrm_ea = insn_fetch(u16, 2, c->eip); 786 op->addr.mem = insn_fetch(u16, 2, c->eip);
953 break; 787 break;
954 case 4: 788 case 4:
955 c->modrm_ea = insn_fetch(u32, 4, c->eip); 789 op->addr.mem = insn_fetch(u32, 4, c->eip);
956 break; 790 break;
957 case 8: 791 case 8:
958 c->modrm_ea = insn_fetch(u64, 8, c->eip); 792 op->addr.mem = insn_fetch(u64, 8, c->eip);
959 break; 793 break;
960 } 794 }
961done: 795done:
962 return rc; 796 return rc;
963} 797}
964 798
965int 799static void fetch_bit_operand(struct decode_cache *c)
966x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
967{ 800{
968 struct decode_cache *c = &ctxt->decode; 801 long sv = 0, mask;
969 int rc = X86EMUL_CONTINUE;
970 int mode = ctxt->mode;
971 int def_op_bytes, def_ad_bytes, group;
972
973
974 /* we cannot decode insn before we complete previous rep insn */
975 WARN_ON(ctxt->restart);
976
977 c->eip = ctxt->eip;
978 c->fetch.start = c->fetch.end = c->eip;
979 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
980
981 switch (mode) {
982 case X86EMUL_MODE_REAL:
983 case X86EMUL_MODE_VM86:
984 case X86EMUL_MODE_PROT16:
985 def_op_bytes = def_ad_bytes = 2;
986 break;
987 case X86EMUL_MODE_PROT32:
988 def_op_bytes = def_ad_bytes = 4;
989 break;
990#ifdef CONFIG_X86_64
991 case X86EMUL_MODE_PROT64:
992 def_op_bytes = 4;
993 def_ad_bytes = 8;
994 break;
995#endif
996 default:
997 return -1;
998 }
999
1000 c->op_bytes = def_op_bytes;
1001 c->ad_bytes = def_ad_bytes;
1002
1003 /* Legacy prefixes. */
1004 for (;;) {
1005 switch (c->b = insn_fetch(u8, 1, c->eip)) {
1006 case 0x66: /* operand-size override */
1007 /* switch between 2/4 bytes */
1008 c->op_bytes = def_op_bytes ^ 6;
1009 break;
1010 case 0x67: /* address-size override */
1011 if (mode == X86EMUL_MODE_PROT64)
1012 /* switch between 4/8 bytes */
1013 c->ad_bytes = def_ad_bytes ^ 12;
1014 else
1015 /* switch between 2/4 bytes */
1016 c->ad_bytes = def_ad_bytes ^ 6;
1017 break;
1018 case 0x26: /* ES override */
1019 case 0x2e: /* CS override */
1020 case 0x36: /* SS override */
1021 case 0x3e: /* DS override */
1022 set_seg_override(c, (c->b >> 3) & 3);
1023 break;
1024 case 0x64: /* FS override */
1025 case 0x65: /* GS override */
1026 set_seg_override(c, c->b & 7);
1027 break;
1028 case 0x40 ... 0x4f: /* REX */
1029 if (mode != X86EMUL_MODE_PROT64)
1030 goto done_prefixes;
1031 c->rex_prefix = c->b;
1032 continue;
1033 case 0xf0: /* LOCK */
1034 c->lock_prefix = 1;
1035 break;
1036 case 0xf2: /* REPNE/REPNZ */
1037 c->rep_prefix = REPNE_PREFIX;
1038 break;
1039 case 0xf3: /* REP/REPE/REPZ */
1040 c->rep_prefix = REPE_PREFIX;
1041 break;
1042 default:
1043 goto done_prefixes;
1044 }
1045
1046 /* Any legacy prefix after a REX prefix nullifies its effect. */
1047
1048 c->rex_prefix = 0;
1049 }
1050
1051done_prefixes:
1052
1053 /* REX prefix. */
1054 if (c->rex_prefix)
1055 if (c->rex_prefix & 8)
1056 c->op_bytes = 8; /* REX.W */
1057
1058 /* Opcode byte(s). */
1059 c->d = opcode_table[c->b];
1060 if (c->d == 0) {
1061 /* Two-byte opcode? */
1062 if (c->b == 0x0f) {
1063 c->twobyte = 1;
1064 c->b = insn_fetch(u8, 1, c->eip);
1065 c->d = twobyte_table[c->b];
1066 }
1067 }
1068
1069 if (c->d & Group) {
1070 group = c->d & GroupMask;
1071 c->modrm = insn_fetch(u8, 1, c->eip);
1072 --c->eip;
1073
1074 group = (group << 3) + ((c->modrm >> 3) & 7);
1075 if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
1076 c->d = group2_table[group];
1077 else
1078 c->d = group_table[group];
1079 }
1080
1081 /* Unrecognised? */
1082 if (c->d == 0) {
1083 DPRINTF("Cannot emulate %02x\n", c->b);
1084 return -1;
1085 }
1086
1087 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
1088 c->op_bytes = 8;
1089
1090 /* ModRM and SIB bytes. */
1091 if (c->d & ModRM)
1092 rc = decode_modrm(ctxt, ops);
1093 else if (c->d & MemAbs)
1094 rc = decode_abs(ctxt, ops);
1095 if (rc != X86EMUL_CONTINUE)
1096 goto done;
1097
1098 if (!c->has_seg_override)
1099 set_seg_override(c, VCPU_SREG_DS);
1100
1101 if (!(!c->twobyte && c->b == 0x8d))
1102 c->modrm_ea += seg_override_base(ctxt, ops, c);
1103
1104 if (c->ad_bytes != 8)
1105 c->modrm_ea = (u32)c->modrm_ea;
1106
1107 if (c->rip_relative)
1108 c->modrm_ea += c->eip;
1109
1110 /*
1111 * Decode and fetch the source operand: register, memory
1112 * or immediate.
1113 */
1114 switch (c->d & SrcMask) {
1115 case SrcNone:
1116 break;
1117 case SrcReg:
1118 decode_register_operand(&c->src, c, 0);
1119 break;
1120 case SrcMem16:
1121 c->src.bytes = 2;
1122 goto srcmem_common;
1123 case SrcMem32:
1124 c->src.bytes = 4;
1125 goto srcmem_common;
1126 case SrcMem:
1127 c->src.bytes = (c->d & ByteOp) ? 1 :
1128 c->op_bytes;
1129 /* Don't fetch the address for invlpg: it could be unmapped. */
1130 if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
1131 break;
1132 srcmem_common:
1133 /*
1134 * For instructions with a ModR/M byte, switch to register
1135 * access if Mod = 3.
1136 */
1137 if ((c->d & ModRM) && c->modrm_mod == 3) {
1138 c->src.type = OP_REG;
1139 c->src.val = c->modrm_val;
1140 c->src.ptr = c->modrm_ptr;
1141 break;
1142 }
1143 c->src.type = OP_MEM;
1144 c->src.ptr = (unsigned long *)c->modrm_ea;
1145 c->src.val = 0;
1146 break;
1147 case SrcImm:
1148 case SrcImmU:
1149 c->src.type = OP_IMM;
1150 c->src.ptr = (unsigned long *)c->eip;
1151 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1152 if (c->src.bytes == 8)
1153 c->src.bytes = 4;
1154 /* NB. Immediates are sign-extended as necessary. */
1155 switch (c->src.bytes) {
1156 case 1:
1157 c->src.val = insn_fetch(s8, 1, c->eip);
1158 break;
1159 case 2:
1160 c->src.val = insn_fetch(s16, 2, c->eip);
1161 break;
1162 case 4:
1163 c->src.val = insn_fetch(s32, 4, c->eip);
1164 break;
1165 }
1166 if ((c->d & SrcMask) == SrcImmU) {
1167 switch (c->src.bytes) {
1168 case 1:
1169 c->src.val &= 0xff;
1170 break;
1171 case 2:
1172 c->src.val &= 0xffff;
1173 break;
1174 case 4:
1175 c->src.val &= 0xffffffff;
1176 break;
1177 }
1178 }
1179 break;
1180 case SrcImmByte:
1181 case SrcImmUByte:
1182 c->src.type = OP_IMM;
1183 c->src.ptr = (unsigned long *)c->eip;
1184 c->src.bytes = 1;
1185 if ((c->d & SrcMask) == SrcImmByte)
1186 c->src.val = insn_fetch(s8, 1, c->eip);
1187 else
1188 c->src.val = insn_fetch(u8, 1, c->eip);
1189 break;
1190 case SrcAcc:
1191 c->src.type = OP_REG;
1192 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1193 c->src.ptr = &c->regs[VCPU_REGS_RAX];
1194 switch (c->src.bytes) {
1195 case 1:
1196 c->src.val = *(u8 *)c->src.ptr;
1197 break;
1198 case 2:
1199 c->src.val = *(u16 *)c->src.ptr;
1200 break;
1201 case 4:
1202 c->src.val = *(u32 *)c->src.ptr;
1203 break;
1204 case 8:
1205 c->src.val = *(u64 *)c->src.ptr;
1206 break;
1207 }
1208 break;
1209 case SrcOne:
1210 c->src.bytes = 1;
1211 c->src.val = 1;
1212 break;
1213 case SrcSI:
1214 c->src.type = OP_MEM;
1215 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1216 c->src.ptr = (unsigned long *)
1217 register_address(c, seg_override_base(ctxt, ops, c),
1218 c->regs[VCPU_REGS_RSI]);
1219 c->src.val = 0;
1220 break;
1221 case SrcImmFAddr:
1222 c->src.type = OP_IMM;
1223 c->src.ptr = (unsigned long *)c->eip;
1224 c->src.bytes = c->op_bytes + 2;
1225 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
1226 break;
1227 case SrcMemFAddr:
1228 c->src.type = OP_MEM;
1229 c->src.ptr = (unsigned long *)c->modrm_ea;
1230 c->src.bytes = c->op_bytes + 2;
1231 break;
1232 }
1233 802
1234 /* 803 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
1235 * Decode and fetch the second source operand: register, memory 804 mask = ~(c->dst.bytes * 8 - 1);
1236 * or immediate.
1237 */
1238 switch (c->d & Src2Mask) {
1239 case Src2None:
1240 break;
1241 case Src2CL:
1242 c->src2.bytes = 1;
1243 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
1244 break;
1245 case Src2ImmByte:
1246 c->src2.type = OP_IMM;
1247 c->src2.ptr = (unsigned long *)c->eip;
1248 c->src2.bytes = 1;
1249 c->src2.val = insn_fetch(u8, 1, c->eip);
1250 break;
1251 case Src2One:
1252 c->src2.bytes = 1;
1253 c->src2.val = 1;
1254 break;
1255 }
1256 805
1257 /* Decode and fetch the destination operand: register or memory. */ 806 if (c->src.bytes == 2)
1258 switch (c->d & DstMask) { 807 sv = (s16)c->src.val & (s16)mask;
1259 case ImplicitOps: 808 else if (c->src.bytes == 4)
1260 /* Special instructions do their own operand decoding. */ 809 sv = (s32)c->src.val & (s32)mask;
1261 return 0;
1262 case DstReg:
1263 decode_register_operand(&c->dst, c,
1264 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
1265 break;
1266 case DstMem:
1267 case DstMem64:
1268 if ((c->d & ModRM) && c->modrm_mod == 3) {
1269 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1270 c->dst.type = OP_REG;
1271 c->dst.val = c->dst.orig_val = c->modrm_val;
1272 c->dst.ptr = c->modrm_ptr;
1273 break;
1274 }
1275 c->dst.type = OP_MEM;
1276 c->dst.ptr = (unsigned long *)c->modrm_ea;
1277 if ((c->d & DstMask) == DstMem64)
1278 c->dst.bytes = 8;
1279 else
1280 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1281 c->dst.val = 0;
1282 if (c->d & BitOp) {
1283 unsigned long mask = ~(c->dst.bytes * 8 - 1);
1284 810
1285 c->dst.ptr = (void *)c->dst.ptr + 811 c->dst.addr.mem += (sv >> 3);
1286 (c->src.val & mask) / 8;
1287 }
1288 break;
1289 case DstAcc:
1290 c->dst.type = OP_REG;
1291 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1292 c->dst.ptr = &c->regs[VCPU_REGS_RAX];
1293 switch (c->dst.bytes) {
1294 case 1:
1295 c->dst.val = *(u8 *)c->dst.ptr;
1296 break;
1297 case 2:
1298 c->dst.val = *(u16 *)c->dst.ptr;
1299 break;
1300 case 4:
1301 c->dst.val = *(u32 *)c->dst.ptr;
1302 break;
1303 case 8:
1304 c->dst.val = *(u64 *)c->dst.ptr;
1305 break;
1306 }
1307 c->dst.orig_val = c->dst.val;
1308 break;
1309 case DstDI:
1310 c->dst.type = OP_MEM;
1311 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1312 c->dst.ptr = (unsigned long *)
1313 register_address(c, es_base(ctxt, ops),
1314 c->regs[VCPU_REGS_RDI]);
1315 c->dst.val = 0;
1316 break;
1317 } 812 }
1318 813
1319done: 814 /* only subword offset */
1320 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; 815 c->src.val &= (c->dst.bytes << 3) - 1;
1321} 816}
1322 817
1323static int read_emulated(struct x86_emulate_ctxt *ctxt, 818static int read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -1337,7 +832,7 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
1337 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, 832 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
1338 ctxt->vcpu); 833 ctxt->vcpu);
1339 if (rc == X86EMUL_PROPAGATE_FAULT) 834 if (rc == X86EMUL_PROPAGATE_FAULT)
1340 emulate_pf(ctxt, addr, err); 835 emulate_pf(ctxt);
1341 if (rc != X86EMUL_CONTINUE) 836 if (rc != X86EMUL_CONTINUE)
1342 return rc; 837 return rc;
1343 mc->end += n; 838 mc->end += n;
@@ -1424,7 +919,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1424 addr = dt.address + index * 8; 919 addr = dt.address + index * 8;
1425 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 920 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1426 if (ret == X86EMUL_PROPAGATE_FAULT) 921 if (ret == X86EMUL_PROPAGATE_FAULT)
1427 emulate_pf(ctxt, addr, err); 922 emulate_pf(ctxt);
1428 923
1429 return ret; 924 return ret;
1430} 925}
@@ -1450,7 +945,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1450 addr = dt.address + index * 8; 945 addr = dt.address + index * 8;
1451 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 946 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1452 if (ret == X86EMUL_PROPAGATE_FAULT) 947 if (ret == X86EMUL_PROPAGATE_FAULT)
1453 emulate_pf(ctxt, addr, err); 948 emulate_pf(ctxt);
1454 949
1455 return ret; 950 return ret;
1456} 951}
@@ -1573,6 +1068,25 @@ exception:
1573 return X86EMUL_PROPAGATE_FAULT; 1068 return X86EMUL_PROPAGATE_FAULT;
1574} 1069}
1575 1070
1071static void write_register_operand(struct operand *op)
1072{
1073 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1074 switch (op->bytes) {
1075 case 1:
1076 *(u8 *)op->addr.reg = (u8)op->val;
1077 break;
1078 case 2:
1079 *(u16 *)op->addr.reg = (u16)op->val;
1080 break;
1081 case 4:
1082 *op->addr.reg = (u32)op->val;
1083 break; /* 64b: zero-extend */
1084 case 8:
1085 *op->addr.reg = op->val;
1086 break;
1087 }
1088}
1089
1576static inline int writeback(struct x86_emulate_ctxt *ctxt, 1090static inline int writeback(struct x86_emulate_ctxt *ctxt,
1577 struct x86_emulate_ops *ops) 1091 struct x86_emulate_ops *ops)
1578{ 1092{
@@ -1582,28 +1096,12 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1582 1096
1583 switch (c->dst.type) { 1097 switch (c->dst.type) {
1584 case OP_REG: 1098 case OP_REG:
1585 /* The 4-byte case *is* correct: 1099 write_register_operand(&c->dst);
1586 * in 64-bit mode we zero-extend.
1587 */
1588 switch (c->dst.bytes) {
1589 case 1:
1590 *(u8 *)c->dst.ptr = (u8)c->dst.val;
1591 break;
1592 case 2:
1593 *(u16 *)c->dst.ptr = (u16)c->dst.val;
1594 break;
1595 case 4:
1596 *c->dst.ptr = (u32)c->dst.val;
1597 break; /* 64b: zero-ext */
1598 case 8:
1599 *c->dst.ptr = c->dst.val;
1600 break;
1601 }
1602 break; 1100 break;
1603 case OP_MEM: 1101 case OP_MEM:
1604 if (c->lock_prefix) 1102 if (c->lock_prefix)
1605 rc = ops->cmpxchg_emulated( 1103 rc = ops->cmpxchg_emulated(
1606 (unsigned long)c->dst.ptr, 1104 c->dst.addr.mem,
1607 &c->dst.orig_val, 1105 &c->dst.orig_val,
1608 &c->dst.val, 1106 &c->dst.val,
1609 c->dst.bytes, 1107 c->dst.bytes,
@@ -1611,14 +1109,13 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1611 ctxt->vcpu); 1109 ctxt->vcpu);
1612 else 1110 else
1613 rc = ops->write_emulated( 1111 rc = ops->write_emulated(
1614 (unsigned long)c->dst.ptr, 1112 c->dst.addr.mem,
1615 &c->dst.val, 1113 &c->dst.val,
1616 c->dst.bytes, 1114 c->dst.bytes,
1617 &err, 1115 &err,
1618 ctxt->vcpu); 1116 ctxt->vcpu);
1619 if (rc == X86EMUL_PROPAGATE_FAULT) 1117 if (rc == X86EMUL_PROPAGATE_FAULT)
1620 emulate_pf(ctxt, 1118 emulate_pf(ctxt);
1621 (unsigned long)c->dst.ptr, err);
1622 if (rc != X86EMUL_CONTINUE) 1119 if (rc != X86EMUL_CONTINUE)
1623 return rc; 1120 return rc;
1624 break; 1121 break;
@@ -1640,8 +1137,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1640 c->dst.bytes = c->op_bytes; 1137 c->dst.bytes = c->op_bytes;
1641 c->dst.val = c->src.val; 1138 c->dst.val = c->src.val;
1642 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); 1139 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1643 c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops), 1140 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1644 c->regs[VCPU_REGS_RSP]); 1141 c->regs[VCPU_REGS_RSP]);
1645} 1142}
1646 1143
1647static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1144static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1701,6 +1198,9 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1701 *(unsigned long *)dest = 1198 *(unsigned long *)dest =
1702 (ctxt->eflags & ~change_mask) | (val & change_mask); 1199 (ctxt->eflags & ~change_mask) | (val & change_mask);
1703 1200
1201 if (rc == X86EMUL_PROPAGATE_FAULT)
1202 emulate_pf(ctxt);
1203
1704 return rc; 1204 return rc;
1705} 1205}
1706 1206
@@ -1778,6 +1278,150 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1778 return rc; 1278 return rc;
1779} 1279}
1780 1280
1281int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1282 struct x86_emulate_ops *ops, int irq)
1283{
1284 struct decode_cache *c = &ctxt->decode;
1285 int rc;
1286 struct desc_ptr dt;
1287 gva_t cs_addr;
1288 gva_t eip_addr;
1289 u16 cs, eip;
1290 u32 err;
1291
1292 /* TODO: Add limit checks */
1293 c->src.val = ctxt->eflags;
1294 emulate_push(ctxt, ops);
1295 rc = writeback(ctxt, ops);
1296 if (rc != X86EMUL_CONTINUE)
1297 return rc;
1298
1299 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1300
1301 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1302 emulate_push(ctxt, ops);
1303 rc = writeback(ctxt, ops);
1304 if (rc != X86EMUL_CONTINUE)
1305 return rc;
1306
1307 c->src.val = c->eip;
1308 emulate_push(ctxt, ops);
1309 rc = writeback(ctxt, ops);
1310 if (rc != X86EMUL_CONTINUE)
1311 return rc;
1312
1313 c->dst.type = OP_NONE;
1314
1315 ops->get_idt(&dt, ctxt->vcpu);
1316
1317 eip_addr = dt.address + (irq << 2);
1318 cs_addr = dt.address + (irq << 2) + 2;
1319
1320 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1321 if (rc != X86EMUL_CONTINUE)
1322 return rc;
1323
1324 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1325 if (rc != X86EMUL_CONTINUE)
1326 return rc;
1327
1328 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1329 if (rc != X86EMUL_CONTINUE)
1330 return rc;
1331
1332 c->eip = eip;
1333
1334 return rc;
1335}
1336
1337static int emulate_int(struct x86_emulate_ctxt *ctxt,
1338 struct x86_emulate_ops *ops, int irq)
1339{
1340 switch(ctxt->mode) {
1341 case X86EMUL_MODE_REAL:
1342 return emulate_int_real(ctxt, ops, irq);
1343 case X86EMUL_MODE_VM86:
1344 case X86EMUL_MODE_PROT16:
1345 case X86EMUL_MODE_PROT32:
1346 case X86EMUL_MODE_PROT64:
1347 default:
1348 /* Protected mode interrupts unimplemented yet */
1349 return X86EMUL_UNHANDLEABLE;
1350 }
1351}
1352
1353static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1354 struct x86_emulate_ops *ops)
1355{
1356 struct decode_cache *c = &ctxt->decode;
1357 int rc = X86EMUL_CONTINUE;
1358 unsigned long temp_eip = 0;
1359 unsigned long temp_eflags = 0;
1360 unsigned long cs = 0;
1361 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1362 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1363 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1364 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1365
1366 /* TODO: Add stack limit check */
1367
1368 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1369
1370 if (rc != X86EMUL_CONTINUE)
1371 return rc;
1372
1373 if (temp_eip & ~0xffff) {
1374 emulate_gp(ctxt, 0);
1375 return X86EMUL_PROPAGATE_FAULT;
1376 }
1377
1378 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1379
1380 if (rc != X86EMUL_CONTINUE)
1381 return rc;
1382
1383 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1384
1385 if (rc != X86EMUL_CONTINUE)
1386 return rc;
1387
1388 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1389
1390 if (rc != X86EMUL_CONTINUE)
1391 return rc;
1392
1393 c->eip = temp_eip;
1394
1395
1396 if (c->op_bytes == 4)
1397 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1398 else if (c->op_bytes == 2) {
1399 ctxt->eflags &= ~0xffff;
1400 ctxt->eflags |= temp_eflags;
1401 }
1402
1403 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1404 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1405
1406 return rc;
1407}
1408
1409static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1410 struct x86_emulate_ops* ops)
1411{
1412 switch(ctxt->mode) {
1413 case X86EMUL_MODE_REAL:
1414 return emulate_iret_real(ctxt, ops);
1415 case X86EMUL_MODE_VM86:
1416 case X86EMUL_MODE_PROT16:
1417 case X86EMUL_MODE_PROT32:
1418 case X86EMUL_MODE_PROT64:
1419 default:
1420 /* iret from protected mode unimplemented yet */
1421 return X86EMUL_UNHANDLEABLE;
1422 }
1423}
1424
1781static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, 1425static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1782 struct x86_emulate_ops *ops) 1426 struct x86_emulate_ops *ops)
1783{ 1427{
@@ -1819,6 +1463,9 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1819 struct x86_emulate_ops *ops) 1463 struct x86_emulate_ops *ops)
1820{ 1464{
1821 struct decode_cache *c = &ctxt->decode; 1465 struct decode_cache *c = &ctxt->decode;
1466 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1467 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1468 u8 de = 0;
1822 1469
1823 switch (c->modrm_reg) { 1470 switch (c->modrm_reg) {
1824 case 0 ... 1: /* test */ 1471 case 0 ... 1: /* test */
@@ -1830,10 +1477,26 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1830 case 3: /* neg */ 1477 case 3: /* neg */
1831 emulate_1op("neg", c->dst, ctxt->eflags); 1478 emulate_1op("neg", c->dst, ctxt->eflags);
1832 break; 1479 break;
1480 case 4: /* mul */
1481 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1482 break;
1483 case 5: /* imul */
1484 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1485 break;
1486 case 6: /* div */
1487 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1488 ctxt->eflags, de);
1489 break;
1490 case 7: /* idiv */
1491 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1492 ctxt->eflags, de);
1493 break;
1833 default: 1494 default:
1834 return 0; 1495 return X86EMUL_UNHANDLEABLE;
1835 } 1496 }
1836 return 1; 1497 if (de)
1498 return emulate_de(ctxt);
1499 return X86EMUL_CONTINUE;
1837} 1500}
1838 1501
1839static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, 1502static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
@@ -1905,6 +1568,23 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1905 return rc; 1568 return rc;
1906} 1569}
1907 1570
1571static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1572 struct x86_emulate_ops *ops, int seg)
1573{
1574 struct decode_cache *c = &ctxt->decode;
1575 unsigned short sel;
1576 int rc;
1577
1578 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1579
1580 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1581 if (rc != X86EMUL_CONTINUE)
1582 return rc;
1583
1584 c->dst.val = c->src.val;
1585 return rc;
1586}
1587
1908static inline void 1588static inline void
1909setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 1589setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1910 struct x86_emulate_ops *ops, struct desc_struct *cs, 1590 struct x86_emulate_ops *ops, struct desc_struct *cs,
@@ -2160,9 +1840,15 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2160 struct x86_emulate_ops *ops, 1840 struct x86_emulate_ops *ops,
2161 u16 port, u16 len) 1841 u16 port, u16 len)
2162{ 1842{
1843 if (ctxt->perm_ok)
1844 return true;
1845
2163 if (emulator_bad_iopl(ctxt, ops)) 1846 if (emulator_bad_iopl(ctxt, ops))
2164 if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) 1847 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2165 return false; 1848 return false;
1849
1850 ctxt->perm_ok = true;
1851
2166 return true; 1852 return true;
2167} 1853}
2168 1854
@@ -2254,7 +1940,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2254 &err); 1940 &err);
2255 if (ret == X86EMUL_PROPAGATE_FAULT) { 1941 if (ret == X86EMUL_PROPAGATE_FAULT) {
2256 /* FIXME: need to provide precise fault address */ 1942 /* FIXME: need to provide precise fault address */
2257 emulate_pf(ctxt, old_tss_base, err); 1943 emulate_pf(ctxt);
2258 return ret; 1944 return ret;
2259 } 1945 }
2260 1946
@@ -2264,7 +1950,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2264 &err); 1950 &err);
2265 if (ret == X86EMUL_PROPAGATE_FAULT) { 1951 if (ret == X86EMUL_PROPAGATE_FAULT) {
2266 /* FIXME: need to provide precise fault address */ 1952 /* FIXME: need to provide precise fault address */
2267 emulate_pf(ctxt, old_tss_base, err); 1953 emulate_pf(ctxt);
2268 return ret; 1954 return ret;
2269 } 1955 }
2270 1956
@@ -2272,7 +1958,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2272 &err); 1958 &err);
2273 if (ret == X86EMUL_PROPAGATE_FAULT) { 1959 if (ret == X86EMUL_PROPAGATE_FAULT) {
2274 /* FIXME: need to provide precise fault address */ 1960 /* FIXME: need to provide precise fault address */
2275 emulate_pf(ctxt, new_tss_base, err); 1961 emulate_pf(ctxt);
2276 return ret; 1962 return ret;
2277 } 1963 }
2278 1964
@@ -2285,7 +1971,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2285 ctxt->vcpu, &err); 1971 ctxt->vcpu, &err);
2286 if (ret == X86EMUL_PROPAGATE_FAULT) { 1972 if (ret == X86EMUL_PROPAGATE_FAULT) {
2287 /* FIXME: need to provide precise fault address */ 1973 /* FIXME: need to provide precise fault address */
2288 emulate_pf(ctxt, new_tss_base, err); 1974 emulate_pf(ctxt);
2289 return ret; 1975 return ret;
2290 } 1976 }
2291 } 1977 }
@@ -2396,7 +2082,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2396 &err); 2082 &err);
2397 if (ret == X86EMUL_PROPAGATE_FAULT) { 2083 if (ret == X86EMUL_PROPAGATE_FAULT) {
2398 /* FIXME: need to provide precise fault address */ 2084 /* FIXME: need to provide precise fault address */
2399 emulate_pf(ctxt, old_tss_base, err); 2085 emulate_pf(ctxt);
2400 return ret; 2086 return ret;
2401 } 2087 }
2402 2088
@@ -2406,7 +2092,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2406 &err); 2092 &err);
2407 if (ret == X86EMUL_PROPAGATE_FAULT) { 2093 if (ret == X86EMUL_PROPAGATE_FAULT) {
2408 /* FIXME: need to provide precise fault address */ 2094 /* FIXME: need to provide precise fault address */
2409 emulate_pf(ctxt, old_tss_base, err); 2095 emulate_pf(ctxt);
2410 return ret; 2096 return ret;
2411 } 2097 }
2412 2098
@@ -2414,7 +2100,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2414 &err); 2100 &err);
2415 if (ret == X86EMUL_PROPAGATE_FAULT) { 2101 if (ret == X86EMUL_PROPAGATE_FAULT) {
2416 /* FIXME: need to provide precise fault address */ 2102 /* FIXME: need to provide precise fault address */
2417 emulate_pf(ctxt, new_tss_base, err); 2103 emulate_pf(ctxt);
2418 return ret; 2104 return ret;
2419 } 2105 }
2420 2106
@@ -2427,7 +2113,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2427 ctxt->vcpu, &err); 2113 ctxt->vcpu, &err);
2428 if (ret == X86EMUL_PROPAGATE_FAULT) { 2114 if (ret == X86EMUL_PROPAGATE_FAULT) {
2429 /* FIXME: need to provide precise fault address */ 2115 /* FIXME: need to provide precise fault address */
2430 emulate_pf(ctxt, new_tss_base, err); 2116 emulate_pf(ctxt);
2431 return ret; 2117 return ret;
2432 } 2118 }
2433 } 2119 }
@@ -2523,10 +2209,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2523} 2209}
2524 2210
2525int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 2211int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2526 struct x86_emulate_ops *ops,
2527 u16 tss_selector, int reason, 2212 u16 tss_selector, int reason,
2528 bool has_error_code, u32 error_code) 2213 bool has_error_code, u32 error_code)
2529{ 2214{
2215 struct x86_emulate_ops *ops = ctxt->ops;
2530 struct decode_cache *c = &ctxt->decode; 2216 struct decode_cache *c = &ctxt->decode;
2531 int rc; 2217 int rc;
2532 2218
@@ -2552,16 +2238,784 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2552 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; 2238 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2553 2239
2554 register_address_increment(c, &c->regs[reg], df * op->bytes); 2240 register_address_increment(c, &c->regs[reg], df * op->bytes);
2555 op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]); 2241 op->addr.mem = register_address(c, base, c->regs[reg]);
2242}
2243
2244static int em_push(struct x86_emulate_ctxt *ctxt)
2245{
2246 emulate_push(ctxt, ctxt->ops);
2247 return X86EMUL_CONTINUE;
2248}
2249
2250static int em_das(struct x86_emulate_ctxt *ctxt)
2251{
2252 struct decode_cache *c = &ctxt->decode;
2253 u8 al, old_al;
2254 bool af, cf, old_cf;
2255
2256 cf = ctxt->eflags & X86_EFLAGS_CF;
2257 al = c->dst.val;
2258
2259 old_al = al;
2260 old_cf = cf;
2261 cf = false;
2262 af = ctxt->eflags & X86_EFLAGS_AF;
2263 if ((al & 0x0f) > 9 || af) {
2264 al -= 6;
2265 cf = old_cf | (al >= 250);
2266 af = true;
2267 } else {
2268 af = false;
2269 }
2270 if (old_al > 0x99 || old_cf) {
2271 al -= 0x60;
2272 cf = true;
2273 }
2274
2275 c->dst.val = al;
2276 /* Set PF, ZF, SF */
2277 c->src.type = OP_IMM;
2278 c->src.val = 0;
2279 c->src.bytes = 1;
2280 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2281 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2282 if (cf)
2283 ctxt->eflags |= X86_EFLAGS_CF;
2284 if (af)
2285 ctxt->eflags |= X86_EFLAGS_AF;
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_call_far(struct x86_emulate_ctxt *ctxt)
2290{
2291 struct decode_cache *c = &ctxt->decode;
2292 u16 sel, old_cs;
2293 ulong old_eip;
2294 int rc;
2295
2296 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2297 old_eip = c->eip;
2298
2299 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2300 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2301 return X86EMUL_CONTINUE;
2302
2303 c->eip = 0;
2304 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2305
2306 c->src.val = old_cs;
2307 emulate_push(ctxt, ctxt->ops);
2308 rc = writeback(ctxt, ctxt->ops);
2309 if (rc != X86EMUL_CONTINUE)
2310 return rc;
2311
2312 c->src.val = old_eip;
2313 emulate_push(ctxt, ctxt->ops);
2314 rc = writeback(ctxt, ctxt->ops);
2315 if (rc != X86EMUL_CONTINUE)
2316 return rc;
2317
2318 c->dst.type = OP_NONE;
2319
2320 return X86EMUL_CONTINUE;
2321}
2322
2323static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2324{
2325 struct decode_cache *c = &ctxt->decode;
2326 int rc;
2327
2328 c->dst.type = OP_REG;
2329 c->dst.addr.reg = &c->eip;
2330 c->dst.bytes = c->op_bytes;
2331 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2332 if (rc != X86EMUL_CONTINUE)
2333 return rc;
2334 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2335 return X86EMUL_CONTINUE;
2336}
2337
2338static int em_imul(struct x86_emulate_ctxt *ctxt)
2339{
2340 struct decode_cache *c = &ctxt->decode;
2341
2342 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2343 return X86EMUL_CONTINUE;
2344}
2345
2346static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2347{
2348 struct decode_cache *c = &ctxt->decode;
2349
2350 c->dst.val = c->src2.val;
2351 return em_imul(ctxt);
2352}
2353
2354static int em_cwd(struct x86_emulate_ctxt *ctxt)
2355{
2356 struct decode_cache *c = &ctxt->decode;
2357
2358 c->dst.type = OP_REG;
2359 c->dst.bytes = c->src.bytes;
2360 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2361 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2362
2363 return X86EMUL_CONTINUE;
2364}
2365
2366static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2367{
2368 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2369 struct decode_cache *c = &ctxt->decode;
2370 u64 tsc = 0;
2371
2372 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
2373 emulate_gp(ctxt, 0);
2374 return X86EMUL_PROPAGATE_FAULT;
2375 }
2376 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2377 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2378 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2379 return X86EMUL_CONTINUE;
2380}
2381
2382static int em_mov(struct x86_emulate_ctxt *ctxt)
2383{
2384 struct decode_cache *c = &ctxt->decode;
2385 c->dst.val = c->src.val;
2386 return X86EMUL_CONTINUE;
2387}
2388
2389#define D(_y) { .flags = (_y) }
2390#define N D(0)
2391#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2392#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2393#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2394
2395#define D2bv(_f) D((_f) | ByteOp), D(_f)
2396#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2397
2398#define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2399 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2400 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2401
2402
2403static struct opcode group1[] = {
2404 X7(D(Lock)), N
2405};
2406
2407static struct opcode group1A[] = {
2408 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2409};
2410
2411static struct opcode group3[] = {
2412 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2413 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2414 X4(D(SrcMem | ModRM)),
2415};
2416
2417static struct opcode group4[] = {
2418 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2419 N, N, N, N, N, N,
2420};
2421
2422static struct opcode group5[] = {
2423 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2424 D(SrcMem | ModRM | Stack),
2425 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2426 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2427 D(SrcMem | ModRM | Stack), N,
2428};
2429
2430static struct group_dual group7 = { {
2431 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2432 D(SrcNone | ModRM | DstMem | Mov), N,
2433 D(SrcMem16 | ModRM | Mov | Priv),
2434 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2435}, {
2436 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2437 D(SrcNone | ModRM | DstMem | Mov), N,
2438 D(SrcMem16 | ModRM | Mov | Priv), N,
2439} };
2440
2441static struct opcode group8[] = {
2442 N, N, N, N,
2443 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2444 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2445};
2446
2447static struct group_dual group9 = { {
2448 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2449}, {
2450 N, N, N, N, N, N, N, N,
2451} };
2452
2453static struct opcode group11[] = {
2454 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2455};
2456
2457static struct opcode opcode_table[256] = {
2458 /* 0x00 - 0x07 */
2459 D6ALU(Lock),
2460 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2461 /* 0x08 - 0x0F */
2462 D6ALU(Lock),
2463 D(ImplicitOps | Stack | No64), N,
2464 /* 0x10 - 0x17 */
2465 D6ALU(Lock),
2466 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2467 /* 0x18 - 0x1F */
2468 D6ALU(Lock),
2469 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2470 /* 0x20 - 0x27 */
2471 D6ALU(Lock), N, N,
2472 /* 0x28 - 0x2F */
2473 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2474 /* 0x30 - 0x37 */
2475 D6ALU(Lock), N, N,
2476 /* 0x38 - 0x3F */
2477 D6ALU(0), N, N,
2478 /* 0x40 - 0x4F */
2479 X16(D(DstReg)),
2480 /* 0x50 - 0x57 */
2481 X8(I(SrcReg | Stack, em_push)),
2482 /* 0x58 - 0x5F */
2483 X8(D(DstReg | Stack)),
2484 /* 0x60 - 0x67 */
2485 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2486 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2487 N, N, N, N,
2488 /* 0x68 - 0x6F */
2489 I(SrcImm | Mov | Stack, em_push),
2490 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2491 I(SrcImmByte | Mov | Stack, em_push),
2492 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2493 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2494 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2495 /* 0x70 - 0x7F */
2496 X16(D(SrcImmByte)),
2497 /* 0x80 - 0x87 */
2498 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2499 G(DstMem | SrcImm | ModRM | Group, group1),
2500 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2501 G(DstMem | SrcImmByte | ModRM | Group, group1),
2502 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2503 /* 0x88 - 0x8F */
2504 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2505 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2506 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2507 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2508 /* 0x90 - 0x97 */
2509 X8(D(SrcAcc | DstReg)),
2510 /* 0x98 - 0x9F */
2511 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2512 I(SrcImmFAddr | No64, em_call_far), N,
2513 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2514 /* 0xA0 - 0xA7 */
2515 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2516 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2517 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2518 D2bv(SrcSI | DstDI | String),
2519 /* 0xA8 - 0xAF */
2520 D2bv(DstAcc | SrcImm),
2521 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2522 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2523 D2bv(SrcAcc | DstDI | String),
2524 /* 0xB0 - 0xB7 */
2525 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2526 /* 0xB8 - 0xBF */
2527 X8(I(DstReg | SrcImm | Mov, em_mov)),
2528 /* 0xC0 - 0xC7 */
2529 D2bv(DstMem | SrcImmByte | ModRM),
2530 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2531 D(ImplicitOps | Stack),
2532 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2533 G(ByteOp, group11), G(0, group11),
2534 /* 0xC8 - 0xCF */
2535 N, N, N, D(ImplicitOps | Stack),
2536 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2537 /* 0xD0 - 0xD7 */
2538 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2539 N, N, N, N,
2540 /* 0xD8 - 0xDF */
2541 N, N, N, N, N, N, N, N,
2542 /* 0xE0 - 0xE7 */
2543 X4(D(SrcImmByte)),
2544 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2545 /* 0xE8 - 0xEF */
2546 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2547 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2548 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2549 /* 0xF0 - 0xF7 */
2550 N, N, N, N,
2551 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2552 /* 0xF8 - 0xFF */
2553 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2554 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2555};
2556
2557static struct opcode twobyte_table[256] = {
2558 /* 0x00 - 0x0F */
2559 N, GD(0, &group7), N, N,
2560 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2561 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2562 N, D(ImplicitOps | ModRM), N, N,
2563 /* 0x10 - 0x1F */
2564 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2565 /* 0x20 - 0x2F */
2566 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2567 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2568 N, N, N, N,
2569 N, N, N, N, N, N, N, N,
2570 /* 0x30 - 0x3F */
2571 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2572 D(ImplicitOps | Priv), N,
2573 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2574 N, N, N, N, N, N, N, N,
2575 /* 0x40 - 0x4F */
2576 X16(D(DstReg | SrcMem | ModRM | Mov)),
2577 /* 0x50 - 0x5F */
2578 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2579 /* 0x60 - 0x6F */
2580 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2581 /* 0x70 - 0x7F */
2582 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2583 /* 0x80 - 0x8F */
2584 X16(D(SrcImm)),
2585 /* 0x90 - 0x9F */
2586 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2587 /* 0xA0 - 0xA7 */
2588 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2589 N, D(DstMem | SrcReg | ModRM | BitOp),
2590 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2591 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2592 /* 0xA8 - 0xAF */
2593 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2594 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2595 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2596 D(DstMem | SrcReg | Src2CL | ModRM),
2597 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2598 /* 0xB0 - 0xB7 */
2599 D2bv(DstMem | SrcReg | ModRM | Lock),
2600 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2601 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2602 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2603 /* 0xB8 - 0xBF */
2604 N, N,
2605 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2606 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2607 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2608 /* 0xC0 - 0xCF */
2609 D2bv(DstMem | SrcReg | ModRM | Lock),
2610 N, D(DstMem | SrcReg | ModRM | Mov),
2611 N, N, N, GD(0, &group9),
2612 N, N, N, N, N, N, N, N,
2613 /* 0xD0 - 0xDF */
2614 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2615 /* 0xE0 - 0xEF */
2616 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2617 /* 0xF0 - 0xFF */
2618 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2619};
2620
2621#undef D
2622#undef N
2623#undef G
2624#undef GD
2625#undef I
2626
2627#undef D2bv
2628#undef I2bv
2629#undef D6ALU
2630
2631static unsigned imm_size(struct decode_cache *c)
2632{
2633 unsigned size;
2634
2635 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2636 if (size == 8)
2637 size = 4;
2638 return size;
2639}
2640
2641static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2642 unsigned size, bool sign_extension)
2643{
2644 struct decode_cache *c = &ctxt->decode;
2645 struct x86_emulate_ops *ops = ctxt->ops;
2646 int rc = X86EMUL_CONTINUE;
2647
2648 op->type = OP_IMM;
2649 op->bytes = size;
2650 op->addr.mem = c->eip;
2651 /* NB. Immediates are sign-extended as necessary. */
2652 switch (op->bytes) {
2653 case 1:
2654 op->val = insn_fetch(s8, 1, c->eip);
2655 break;
2656 case 2:
2657 op->val = insn_fetch(s16, 2, c->eip);
2658 break;
2659 case 4:
2660 op->val = insn_fetch(s32, 4, c->eip);
2661 break;
2662 }
2663 if (!sign_extension) {
2664 switch (op->bytes) {
2665 case 1:
2666 op->val &= 0xff;
2667 break;
2668 case 2:
2669 op->val &= 0xffff;
2670 break;
2671 case 4:
2672 op->val &= 0xffffffff;
2673 break;
2674 }
2675 }
2676done:
2677 return rc;
2556} 2678}
2557 2679
2558int 2680int
2559x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 2681x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2560{ 2682{
2683 struct x86_emulate_ops *ops = ctxt->ops;
2684 struct decode_cache *c = &ctxt->decode;
2685 int rc = X86EMUL_CONTINUE;
2686 int mode = ctxt->mode;
2687 int def_op_bytes, def_ad_bytes, dual, goffset;
2688 struct opcode opcode, *g_mod012, *g_mod3;
2689 struct operand memop = { .type = OP_NONE };
2690
2691 c->eip = ctxt->eip;
2692 c->fetch.start = c->fetch.end = c->eip;
2693 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2694
2695 switch (mode) {
2696 case X86EMUL_MODE_REAL:
2697 case X86EMUL_MODE_VM86:
2698 case X86EMUL_MODE_PROT16:
2699 def_op_bytes = def_ad_bytes = 2;
2700 break;
2701 case X86EMUL_MODE_PROT32:
2702 def_op_bytes = def_ad_bytes = 4;
2703 break;
2704#ifdef CONFIG_X86_64
2705 case X86EMUL_MODE_PROT64:
2706 def_op_bytes = 4;
2707 def_ad_bytes = 8;
2708 break;
2709#endif
2710 default:
2711 return -1;
2712 }
2713
2714 c->op_bytes = def_op_bytes;
2715 c->ad_bytes = def_ad_bytes;
2716
2717 /* Legacy prefixes. */
2718 for (;;) {
2719 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2720 case 0x66: /* operand-size override */
2721 /* switch between 2/4 bytes */
2722 c->op_bytes = def_op_bytes ^ 6;
2723 break;
2724 case 0x67: /* address-size override */
2725 if (mode == X86EMUL_MODE_PROT64)
2726 /* switch between 4/8 bytes */
2727 c->ad_bytes = def_ad_bytes ^ 12;
2728 else
2729 /* switch between 2/4 bytes */
2730 c->ad_bytes = def_ad_bytes ^ 6;
2731 break;
2732 case 0x26: /* ES override */
2733 case 0x2e: /* CS override */
2734 case 0x36: /* SS override */
2735 case 0x3e: /* DS override */
2736 set_seg_override(c, (c->b >> 3) & 3);
2737 break;
2738 case 0x64: /* FS override */
2739 case 0x65: /* GS override */
2740 set_seg_override(c, c->b & 7);
2741 break;
2742 case 0x40 ... 0x4f: /* REX */
2743 if (mode != X86EMUL_MODE_PROT64)
2744 goto done_prefixes;
2745 c->rex_prefix = c->b;
2746 continue;
2747 case 0xf0: /* LOCK */
2748 c->lock_prefix = 1;
2749 break;
2750 case 0xf2: /* REPNE/REPNZ */
2751 c->rep_prefix = REPNE_PREFIX;
2752 break;
2753 case 0xf3: /* REP/REPE/REPZ */
2754 c->rep_prefix = REPE_PREFIX;
2755 break;
2756 default:
2757 goto done_prefixes;
2758 }
2759
2760 /* Any legacy prefix after a REX prefix nullifies its effect. */
2761
2762 c->rex_prefix = 0;
2763 }
2764
2765done_prefixes:
2766
2767 /* REX prefix. */
2768 if (c->rex_prefix & 8)
2769 c->op_bytes = 8; /* REX.W */
2770
2771 /* Opcode byte(s). */
2772 opcode = opcode_table[c->b];
2773 /* Two-byte opcode? */
2774 if (c->b == 0x0f) {
2775 c->twobyte = 1;
2776 c->b = insn_fetch(u8, 1, c->eip);
2777 opcode = twobyte_table[c->b];
2778 }
2779 c->d = opcode.flags;
2780
2781 if (c->d & Group) {
2782 dual = c->d & GroupDual;
2783 c->modrm = insn_fetch(u8, 1, c->eip);
2784 --c->eip;
2785
2786 if (c->d & GroupDual) {
2787 g_mod012 = opcode.u.gdual->mod012;
2788 g_mod3 = opcode.u.gdual->mod3;
2789 } else
2790 g_mod012 = g_mod3 = opcode.u.group;
2791
2792 c->d &= ~(Group | GroupDual);
2793
2794 goffset = (c->modrm >> 3) & 7;
2795
2796 if ((c->modrm >> 6) == 3)
2797 opcode = g_mod3[goffset];
2798 else
2799 opcode = g_mod012[goffset];
2800 c->d |= opcode.flags;
2801 }
2802
2803 c->execute = opcode.u.execute;
2804
2805 /* Unrecognised? */
2806 if (c->d == 0 || (c->d & Undefined)) {
2807 DPRINTF("Cannot emulate %02x\n", c->b);
2808 return -1;
2809 }
2810
2811 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2812 c->op_bytes = 8;
2813
2814 if (c->d & Op3264) {
2815 if (mode == X86EMUL_MODE_PROT64)
2816 c->op_bytes = 8;
2817 else
2818 c->op_bytes = 4;
2819 }
2820
2821 /* ModRM and SIB bytes. */
2822 if (c->d & ModRM) {
2823 rc = decode_modrm(ctxt, ops, &memop);
2824 if (!c->has_seg_override)
2825 set_seg_override(c, c->modrm_seg);
2826 } else if (c->d & MemAbs)
2827 rc = decode_abs(ctxt, ops, &memop);
2828 if (rc != X86EMUL_CONTINUE)
2829 goto done;
2830
2831 if (!c->has_seg_override)
2832 set_seg_override(c, VCPU_SREG_DS);
2833
2834 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2835 memop.addr.mem += seg_override_base(ctxt, ops, c);
2836
2837 if (memop.type == OP_MEM && c->ad_bytes != 8)
2838 memop.addr.mem = (u32)memop.addr.mem;
2839
2840 if (memop.type == OP_MEM && c->rip_relative)
2841 memop.addr.mem += c->eip;
2842
2843 /*
2844 * Decode and fetch the source operand: register, memory
2845 * or immediate.
2846 */
2847 switch (c->d & SrcMask) {
2848 case SrcNone:
2849 break;
2850 case SrcReg:
2851 decode_register_operand(&c->src, c, 0);
2852 break;
2853 case SrcMem16:
2854 memop.bytes = 2;
2855 goto srcmem_common;
2856 case SrcMem32:
2857 memop.bytes = 4;
2858 goto srcmem_common;
2859 case SrcMem:
2860 memop.bytes = (c->d & ByteOp) ? 1 :
2861 c->op_bytes;
2862 srcmem_common:
2863 c->src = memop;
2864 break;
2865 case SrcImmU16:
2866 rc = decode_imm(ctxt, &c->src, 2, false);
2867 break;
2868 case SrcImm:
2869 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2870 break;
2871 case SrcImmU:
2872 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2873 break;
2874 case SrcImmByte:
2875 rc = decode_imm(ctxt, &c->src, 1, true);
2876 break;
2877 case SrcImmUByte:
2878 rc = decode_imm(ctxt, &c->src, 1, false);
2879 break;
2880 case SrcAcc:
2881 c->src.type = OP_REG;
2882 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2883 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2884 fetch_register_operand(&c->src);
2885 break;
2886 case SrcOne:
2887 c->src.bytes = 1;
2888 c->src.val = 1;
2889 break;
2890 case SrcSI:
2891 c->src.type = OP_MEM;
2892 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2893 c->src.addr.mem =
2894 register_address(c, seg_override_base(ctxt, ops, c),
2895 c->regs[VCPU_REGS_RSI]);
2896 c->src.val = 0;
2897 break;
2898 case SrcImmFAddr:
2899 c->src.type = OP_IMM;
2900 c->src.addr.mem = c->eip;
2901 c->src.bytes = c->op_bytes + 2;
2902 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2903 break;
2904 case SrcMemFAddr:
2905 memop.bytes = c->op_bytes + 2;
2906 goto srcmem_common;
2907 break;
2908 }
2909
2910 if (rc != X86EMUL_CONTINUE)
2911 goto done;
2912
2913 /*
2914 * Decode and fetch the second source operand: register, memory
2915 * or immediate.
2916 */
2917 switch (c->d & Src2Mask) {
2918 case Src2None:
2919 break;
2920 case Src2CL:
2921 c->src2.bytes = 1;
2922 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2923 break;
2924 case Src2ImmByte:
2925 rc = decode_imm(ctxt, &c->src2, 1, true);
2926 break;
2927 case Src2One:
2928 c->src2.bytes = 1;
2929 c->src2.val = 1;
2930 break;
2931 case Src2Imm:
2932 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2933 break;
2934 }
2935
2936 if (rc != X86EMUL_CONTINUE)
2937 goto done;
2938
2939 /* Decode and fetch the destination operand: register or memory. */
2940 switch (c->d & DstMask) {
2941 case DstReg:
2942 decode_register_operand(&c->dst, c,
2943 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2944 break;
2945 case DstImmUByte:
2946 c->dst.type = OP_IMM;
2947 c->dst.addr.mem = c->eip;
2948 c->dst.bytes = 1;
2949 c->dst.val = insn_fetch(u8, 1, c->eip);
2950 break;
2951 case DstMem:
2952 case DstMem64:
2953 c->dst = memop;
2954 if ((c->d & DstMask) == DstMem64)
2955 c->dst.bytes = 8;
2956 else
2957 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2958 if (c->d & BitOp)
2959 fetch_bit_operand(c);
2960 c->dst.orig_val = c->dst.val;
2961 break;
2962 case DstAcc:
2963 c->dst.type = OP_REG;
2964 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2965 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2966 fetch_register_operand(&c->dst);
2967 c->dst.orig_val = c->dst.val;
2968 break;
2969 case DstDI:
2970 c->dst.type = OP_MEM;
2971 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2972 c->dst.addr.mem =
2973 register_address(c, es_base(ctxt, ops),
2974 c->regs[VCPU_REGS_RDI]);
2975 c->dst.val = 0;
2976 break;
2977 case ImplicitOps:
2978 /* Special instructions do their own operand decoding. */
2979 default:
2980 c->dst.type = OP_NONE; /* Disable writeback. */
2981 return 0;
2982 }
2983
2984done:
2985 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2986}
2987
2988static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
2989{
2990 struct decode_cache *c = &ctxt->decode;
2991
2992 /* The second termination condition only applies for REPE
2993 * and REPNE. Test if the repeat string operation prefix is
2994 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2995 * corresponding termination condition according to:
2996 * - if REPE/REPZ and ZF = 0 then done
2997 * - if REPNE/REPNZ and ZF = 1 then done
2998 */
2999 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3000 (c->b == 0xae) || (c->b == 0xaf))
3001 && (((c->rep_prefix == REPE_PREFIX) &&
3002 ((ctxt->eflags & EFLG_ZF) == 0))
3003 || ((c->rep_prefix == REPNE_PREFIX) &&
3004 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3005 return true;
3006
3007 return false;
3008}
3009
3010int
3011x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3012{
3013 struct x86_emulate_ops *ops = ctxt->ops;
2561 u64 msr_data; 3014 u64 msr_data;
2562 struct decode_cache *c = &ctxt->decode; 3015 struct decode_cache *c = &ctxt->decode;
2563 int rc = X86EMUL_CONTINUE; 3016 int rc = X86EMUL_CONTINUE;
2564 int saved_dst_type = c->dst.type; 3017 int saved_dst_type = c->dst.type;
3018 int irq; /* Used for int 3, int, and into */
2565 3019
2566 ctxt->decode.mem_read.pos = 0; 3020 ctxt->decode.mem_read.pos = 0;
2567 3021
@@ -2576,6 +3030,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2576 goto done; 3030 goto done;
2577 } 3031 }
2578 3032
3033 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3034 emulate_ud(ctxt);
3035 goto done;
3036 }
3037
2579 /* Privileged instruction can be executed only in CPL=0 */ 3038 /* Privileged instruction can be executed only in CPL=0 */
2580 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 3039 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2581 emulate_gp(ctxt, 0); 3040 emulate_gp(ctxt, 0);
@@ -2583,35 +3042,15 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2583 } 3042 }
2584 3043
2585 if (c->rep_prefix && (c->d & String)) { 3044 if (c->rep_prefix && (c->d & String)) {
2586 ctxt->restart = true;
2587 /* All REP prefixes have the same first termination condition */ 3045 /* All REP prefixes have the same first termination condition */
2588 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { 3046 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2589 string_done:
2590 ctxt->restart = false;
2591 ctxt->eip = c->eip; 3047 ctxt->eip = c->eip;
2592 goto done; 3048 goto done;
2593 } 3049 }
2594 /* The second termination condition only applies for REPE
2595 * and REPNE. Test if the repeat string operation prefix is
2596 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2597 * corresponding termination condition according to:
2598 * - if REPE/REPZ and ZF = 0 then done
2599 * - if REPNE/REPNZ and ZF = 1 then done
2600 */
2601 if ((c->b == 0xa6) || (c->b == 0xa7) ||
2602 (c->b == 0xae) || (c->b == 0xaf)) {
2603 if ((c->rep_prefix == REPE_PREFIX) &&
2604 ((ctxt->eflags & EFLG_ZF) == 0))
2605 goto string_done;
2606 if ((c->rep_prefix == REPNE_PREFIX) &&
2607 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
2608 goto string_done;
2609 }
2610 c->eip = ctxt->eip;
2611 } 3050 }
2612 3051
2613 if (c->src.type == OP_MEM) { 3052 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
2614 rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr, 3053 rc = read_emulated(ctxt, ops, c->src.addr.mem,
2615 c->src.valptr, c->src.bytes); 3054 c->src.valptr, c->src.bytes);
2616 if (rc != X86EMUL_CONTINUE) 3055 if (rc != X86EMUL_CONTINUE)
2617 goto done; 3056 goto done;
@@ -2619,7 +3058,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2619 } 3058 }
2620 3059
2621 if (c->src2.type == OP_MEM) { 3060 if (c->src2.type == OP_MEM) {
2622 rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr, 3061 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
2623 &c->src2.val, c->src2.bytes); 3062 &c->src2.val, c->src2.bytes);
2624 if (rc != X86EMUL_CONTINUE) 3063 if (rc != X86EMUL_CONTINUE)
2625 goto done; 3064 goto done;
@@ -2631,7 +3070,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2631 3070
2632 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { 3071 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2633 /* optimisation - avoid slow emulated read if Mov */ 3072 /* optimisation - avoid slow emulated read if Mov */
2634 rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr, 3073 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
2635 &c->dst.val, c->dst.bytes); 3074 &c->dst.val, c->dst.bytes);
2636 if (rc != X86EMUL_CONTINUE) 3075 if (rc != X86EMUL_CONTINUE)
2637 goto done; 3076 goto done;
@@ -2640,6 +3079,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2640 3079
2641special_insn: 3080special_insn:
2642 3081
3082 if (c->execute) {
3083 rc = c->execute(ctxt);
3084 if (rc != X86EMUL_CONTINUE)
3085 goto done;
3086 goto writeback;
3087 }
3088
2643 if (c->twobyte) 3089 if (c->twobyte)
2644 goto twobyte_insn; 3090 goto twobyte_insn;
2645 3091
@@ -2653,8 +3099,6 @@ special_insn:
2653 break; 3099 break;
2654 case 0x07: /* pop es */ 3100 case 0x07: /* pop es */
2655 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); 3101 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2656 if (rc != X86EMUL_CONTINUE)
2657 goto done;
2658 break; 3102 break;
2659 case 0x08 ... 0x0d: 3103 case 0x08 ... 0x0d:
2660 or: /* or */ 3104 or: /* or */
@@ -2672,8 +3116,6 @@ special_insn:
2672 break; 3116 break;
2673 case 0x17: /* pop ss */ 3117 case 0x17: /* pop ss */
2674 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); 3118 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2675 if (rc != X86EMUL_CONTINUE)
2676 goto done;
2677 break; 3119 break;
2678 case 0x18 ... 0x1d: 3120 case 0x18 ... 0x1d:
2679 sbb: /* sbb */ 3121 sbb: /* sbb */
@@ -2684,8 +3126,6 @@ special_insn:
2684 break; 3126 break;
2685 case 0x1f: /* pop ds */ 3127 case 0x1f: /* pop ds */
2686 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); 3128 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2687 if (rc != X86EMUL_CONTINUE)
2688 goto done;
2689 break; 3129 break;
2690 case 0x20 ... 0x25: 3130 case 0x20 ... 0x25:
2691 and: /* and */ 3131 and: /* and */
@@ -2709,58 +3149,29 @@ special_insn:
2709 case 0x48 ... 0x4f: /* dec r16/r32 */ 3149 case 0x48 ... 0x4f: /* dec r16/r32 */
2710 emulate_1op("dec", c->dst, ctxt->eflags); 3150 emulate_1op("dec", c->dst, ctxt->eflags);
2711 break; 3151 break;
2712 case 0x50 ... 0x57: /* push reg */
2713 emulate_push(ctxt, ops);
2714 break;
2715 case 0x58 ... 0x5f: /* pop reg */ 3152 case 0x58 ... 0x5f: /* pop reg */
2716 pop_instruction: 3153 pop_instruction:
2717 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); 3154 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2718 if (rc != X86EMUL_CONTINUE)
2719 goto done;
2720 break; 3155 break;
2721 case 0x60: /* pusha */ 3156 case 0x60: /* pusha */
2722 rc = emulate_pusha(ctxt, ops); 3157 rc = emulate_pusha(ctxt, ops);
2723 if (rc != X86EMUL_CONTINUE)
2724 goto done;
2725 break; 3158 break;
2726 case 0x61: /* popa */ 3159 case 0x61: /* popa */
2727 rc = emulate_popa(ctxt, ops); 3160 rc = emulate_popa(ctxt, ops);
2728 if (rc != X86EMUL_CONTINUE)
2729 goto done;
2730 break; 3161 break;
2731 case 0x63: /* movsxd */ 3162 case 0x63: /* movsxd */
2732 if (ctxt->mode != X86EMUL_MODE_PROT64) 3163 if (ctxt->mode != X86EMUL_MODE_PROT64)
2733 goto cannot_emulate; 3164 goto cannot_emulate;
2734 c->dst.val = (s32) c->src.val; 3165 c->dst.val = (s32) c->src.val;
2735 break; 3166 break;
2736 case 0x68: /* push imm */
2737 case 0x6a: /* push imm8 */
2738 emulate_push(ctxt, ops);
2739 break;
2740 case 0x6c: /* insb */ 3167 case 0x6c: /* insb */
2741 case 0x6d: /* insw/insd */ 3168 case 0x6d: /* insw/insd */
2742 c->dst.bytes = min(c->dst.bytes, 4u); 3169 c->src.val = c->regs[VCPU_REGS_RDX];
2743 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], 3170 goto do_io_in;
2744 c->dst.bytes)) {
2745 emulate_gp(ctxt, 0);
2746 goto done;
2747 }
2748 if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
2749 c->regs[VCPU_REGS_RDX], &c->dst.val))
2750 goto done; /* IO is needed, skip writeback */
2751 break;
2752 case 0x6e: /* outsb */ 3171 case 0x6e: /* outsb */
2753 case 0x6f: /* outsw/outsd */ 3172 case 0x6f: /* outsw/outsd */
2754 c->src.bytes = min(c->src.bytes, 4u); 3173 c->dst.val = c->regs[VCPU_REGS_RDX];
2755 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], 3174 goto do_io_out;
2756 c->src.bytes)) {
2757 emulate_gp(ctxt, 0);
2758 goto done;
2759 }
2760 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
2761 &c->src.val, 1, ctxt->vcpu);
2762
2763 c->dst.type = OP_NONE; /* nothing to writeback */
2764 break; 3175 break;
2765 case 0x70 ... 0x7f: /* jcc (short) */ 3176 case 0x70 ... 0x7f: /* jcc (short) */
2766 if (test_cc(c->b, ctxt->eflags)) 3177 if (test_cc(c->b, ctxt->eflags))
@@ -2793,29 +3204,15 @@ special_insn:
2793 case 0x86 ... 0x87: /* xchg */ 3204 case 0x86 ... 0x87: /* xchg */
2794 xchg: 3205 xchg:
2795 /* Write back the register source. */ 3206 /* Write back the register source. */
2796 switch (c->dst.bytes) { 3207 c->src.val = c->dst.val;
2797 case 1: 3208 write_register_operand(&c->src);
2798 *(u8 *) c->src.ptr = (u8) c->dst.val;
2799 break;
2800 case 2:
2801 *(u16 *) c->src.ptr = (u16) c->dst.val;
2802 break;
2803 case 4:
2804 *c->src.ptr = (u32) c->dst.val;
2805 break; /* 64b reg: zero-extend */
2806 case 8:
2807 *c->src.ptr = c->dst.val;
2808 break;
2809 }
2810 /* 3209 /*
2811 * Write back the memory destination with implicit LOCK 3210 * Write back the memory destination with implicit LOCK
2812 * prefix. 3211 * prefix.
2813 */ 3212 */
2814 c->dst.val = c->src.val; 3213 c->dst.val = c->src.orig_val;
2815 c->lock_prefix = 1; 3214 c->lock_prefix = 1;
2816 break; 3215 break;
2817 case 0x88 ... 0x8b: /* mov */
2818 goto mov;
2819 case 0x8c: /* mov r/m, sreg */ 3216 case 0x8c: /* mov r/m, sreg */
2820 if (c->modrm_reg > VCPU_SREG_GS) { 3217 if (c->modrm_reg > VCPU_SREG_GS) {
2821 emulate_ud(ctxt); 3218 emulate_ud(ctxt);
@@ -2824,7 +3221,7 @@ special_insn:
2824 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); 3221 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2825 break; 3222 break;
2826 case 0x8d: /* lea r16/r32, m */ 3223 case 0x8d: /* lea r16/r32, m */
2827 c->dst.val = c->modrm_ea; 3224 c->dst.val = c->src.addr.mem;
2828 break; 3225 break;
2829 case 0x8e: { /* mov seg, r/m16 */ 3226 case 0x8e: { /* mov seg, r/m16 */
2830 uint16_t sel; 3227 uint16_t sel;
@@ -2847,76 +3244,87 @@ special_insn:
2847 } 3244 }
2848 case 0x8f: /* pop (sole member of Grp1a) */ 3245 case 0x8f: /* pop (sole member of Grp1a) */
2849 rc = emulate_grp1a(ctxt, ops); 3246 rc = emulate_grp1a(ctxt, ops);
2850 if (rc != X86EMUL_CONTINUE)
2851 goto done;
2852 break; 3247 break;
2853 case 0x90: /* nop / xchg r8,rax */ 3248 case 0x90 ... 0x97: /* nop / xchg reg, rax */
2854 if (c->dst.ptr == (unsigned long *)&c->regs[VCPU_REGS_RAX]) { 3249 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
2855 c->dst.type = OP_NONE; /* nop */
2856 break; 3250 break;
2857 }
2858 case 0x91 ... 0x97: /* xchg reg,rax */
2859 c->src.type = OP_REG;
2860 c->src.bytes = c->op_bytes;
2861 c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
2862 c->src.val = *(c->src.ptr);
2863 goto xchg; 3251 goto xchg;
3252 case 0x98: /* cbw/cwde/cdqe */
3253 switch (c->op_bytes) {
3254 case 2: c->dst.val = (s8)c->dst.val; break;
3255 case 4: c->dst.val = (s16)c->dst.val; break;
3256 case 8: c->dst.val = (s32)c->dst.val; break;
3257 }
3258 break;
2864 case 0x9c: /* pushf */ 3259 case 0x9c: /* pushf */
2865 c->src.val = (unsigned long) ctxt->eflags; 3260 c->src.val = (unsigned long) ctxt->eflags;
2866 emulate_push(ctxt, ops); 3261 emulate_push(ctxt, ops);
2867 break; 3262 break;
2868 case 0x9d: /* popf */ 3263 case 0x9d: /* popf */
2869 c->dst.type = OP_REG; 3264 c->dst.type = OP_REG;
2870 c->dst.ptr = (unsigned long *) &ctxt->eflags; 3265 c->dst.addr.reg = &ctxt->eflags;
2871 c->dst.bytes = c->op_bytes; 3266 c->dst.bytes = c->op_bytes;
2872 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes); 3267 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
2873 if (rc != X86EMUL_CONTINUE)
2874 goto done;
2875 break; 3268 break;
2876 case 0xa0 ... 0xa3: /* mov */
2877 case 0xa4 ... 0xa5: /* movs */
2878 goto mov;
2879 case 0xa6 ... 0xa7: /* cmps */ 3269 case 0xa6 ... 0xa7: /* cmps */
2880 c->dst.type = OP_NONE; /* Disable writeback. */ 3270 c->dst.type = OP_NONE; /* Disable writeback. */
2881 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); 3271 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
2882 goto cmp; 3272 goto cmp;
2883 case 0xa8 ... 0xa9: /* test ax, imm */ 3273 case 0xa8 ... 0xa9: /* test ax, imm */
2884 goto test; 3274 goto test;
2885 case 0xaa ... 0xab: /* stos */
2886 c->dst.val = c->regs[VCPU_REGS_RAX];
2887 break;
2888 case 0xac ... 0xad: /* lods */
2889 goto mov;
2890 case 0xae ... 0xaf: /* scas */ 3275 case 0xae ... 0xaf: /* scas */
2891 DPRINTF("Urk! I don't handle SCAS.\n"); 3276 goto cmp;
2892 goto cannot_emulate;
2893 case 0xb0 ... 0xbf: /* mov r, imm */
2894 goto mov;
2895 case 0xc0 ... 0xc1: 3277 case 0xc0 ... 0xc1:
2896 emulate_grp2(ctxt); 3278 emulate_grp2(ctxt);
2897 break; 3279 break;
2898 case 0xc3: /* ret */ 3280 case 0xc3: /* ret */
2899 c->dst.type = OP_REG; 3281 c->dst.type = OP_REG;
2900 c->dst.ptr = &c->eip; 3282 c->dst.addr.reg = &c->eip;
2901 c->dst.bytes = c->op_bytes; 3283 c->dst.bytes = c->op_bytes;
2902 goto pop_instruction; 3284 goto pop_instruction;
2903 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ 3285 case 0xc4: /* les */
2904 mov: 3286 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
2905 c->dst.val = c->src.val; 3287 break;
3288 case 0xc5: /* lds */
3289 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
2906 break; 3290 break;
2907 case 0xcb: /* ret far */ 3291 case 0xcb: /* ret far */
2908 rc = emulate_ret_far(ctxt, ops); 3292 rc = emulate_ret_far(ctxt, ops);
2909 if (rc != X86EMUL_CONTINUE) 3293 break;
2910 goto done; 3294 case 0xcc: /* int3 */
3295 irq = 3;
3296 goto do_interrupt;
3297 case 0xcd: /* int n */
3298 irq = c->src.val;
3299 do_interrupt:
3300 rc = emulate_int(ctxt, ops, irq);
3301 break;
3302 case 0xce: /* into */
3303 if (ctxt->eflags & EFLG_OF) {
3304 irq = 4;
3305 goto do_interrupt;
3306 }
3307 break;
3308 case 0xcf: /* iret */
3309 rc = emulate_iret(ctxt, ops);
2911 break; 3310 break;
2912 case 0xd0 ... 0xd1: /* Grp2 */ 3311 case 0xd0 ... 0xd1: /* Grp2 */
2913 c->src.val = 1;
2914 emulate_grp2(ctxt); 3312 emulate_grp2(ctxt);
2915 break; 3313 break;
2916 case 0xd2 ... 0xd3: /* Grp2 */ 3314 case 0xd2 ... 0xd3: /* Grp2 */
2917 c->src.val = c->regs[VCPU_REGS_RCX]; 3315 c->src.val = c->regs[VCPU_REGS_RCX];
2918 emulate_grp2(ctxt); 3316 emulate_grp2(ctxt);
2919 break; 3317 break;
3318 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3319 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3320 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3321 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3322 jmp_rel(c, c->src.val);
3323 break;
3324 case 0xe3: /* jcxz/jecxz/jrcxz */
3325 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3326 jmp_rel(c, c->src.val);
3327 break;
2920 case 0xe4: /* inb */ 3328 case 0xe4: /* inb */
2921 case 0xe5: /* in */ 3329 case 0xe5: /* in */
2922 goto do_io_in; 3330 goto do_io_in;
@@ -2964,15 +3372,16 @@ special_insn:
2964 break; 3372 break;
2965 case 0xee: /* out dx,al */ 3373 case 0xee: /* out dx,al */
2966 case 0xef: /* out dx,(e/r)ax */ 3374 case 0xef: /* out dx,(e/r)ax */
2967 c->src.val = c->regs[VCPU_REGS_RDX]; 3375 c->dst.val = c->regs[VCPU_REGS_RDX];
2968 do_io_out: 3376 do_io_out:
2969 c->dst.bytes = min(c->dst.bytes, 4u); 3377 c->src.bytes = min(c->src.bytes, 4u);
2970 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { 3378 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3379 c->src.bytes)) {
2971 emulate_gp(ctxt, 0); 3380 emulate_gp(ctxt, 0);
2972 goto done; 3381 goto done;
2973 } 3382 }
2974 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, 3383 ops->pio_out_emulated(c->src.bytes, c->dst.val,
2975 ctxt->vcpu); 3384 &c->src.val, 1, ctxt->vcpu);
2976 c->dst.type = OP_NONE; /* Disable writeback. */ 3385 c->dst.type = OP_NONE; /* Disable writeback. */
2977 break; 3386 break;
2978 case 0xf4: /* hlt */ 3387 case 0xf4: /* hlt */
@@ -2981,24 +3390,22 @@ special_insn:
2981 case 0xf5: /* cmc */ 3390 case 0xf5: /* cmc */
2982 /* complement carry flag from eflags reg */ 3391 /* complement carry flag from eflags reg */
2983 ctxt->eflags ^= EFLG_CF; 3392 ctxt->eflags ^= EFLG_CF;
2984 c->dst.type = OP_NONE; /* Disable writeback. */
2985 break; 3393 break;
2986 case 0xf6 ... 0xf7: /* Grp3 */ 3394 case 0xf6 ... 0xf7: /* Grp3 */
2987 if (!emulate_grp3(ctxt, ops)) 3395 rc = emulate_grp3(ctxt, ops);
2988 goto cannot_emulate;
2989 break; 3396 break;
2990 case 0xf8: /* clc */ 3397 case 0xf8: /* clc */
2991 ctxt->eflags &= ~EFLG_CF; 3398 ctxt->eflags &= ~EFLG_CF;
2992 c->dst.type = OP_NONE; /* Disable writeback. */ 3399 break;
3400 case 0xf9: /* stc */
3401 ctxt->eflags |= EFLG_CF;
2993 break; 3402 break;
2994 case 0xfa: /* cli */ 3403 case 0xfa: /* cli */
2995 if (emulator_bad_iopl(ctxt, ops)) { 3404 if (emulator_bad_iopl(ctxt, ops)) {
2996 emulate_gp(ctxt, 0); 3405 emulate_gp(ctxt, 0);
2997 goto done; 3406 goto done;
2998 } else { 3407 } else
2999 ctxt->eflags &= ~X86_EFLAGS_IF; 3408 ctxt->eflags &= ~X86_EFLAGS_IF;
3000 c->dst.type = OP_NONE; /* Disable writeback. */
3001 }
3002 break; 3409 break;
3003 case 0xfb: /* sti */ 3410 case 0xfb: /* sti */
3004 if (emulator_bad_iopl(ctxt, ops)) { 3411 if (emulator_bad_iopl(ctxt, ops)) {
@@ -3007,29 +3414,29 @@ special_insn:
3007 } else { 3414 } else {
3008 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3415 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3009 ctxt->eflags |= X86_EFLAGS_IF; 3416 ctxt->eflags |= X86_EFLAGS_IF;
3010 c->dst.type = OP_NONE; /* Disable writeback. */
3011 } 3417 }
3012 break; 3418 break;
3013 case 0xfc: /* cld */ 3419 case 0xfc: /* cld */
3014 ctxt->eflags &= ~EFLG_DF; 3420 ctxt->eflags &= ~EFLG_DF;
3015 c->dst.type = OP_NONE; /* Disable writeback. */
3016 break; 3421 break;
3017 case 0xfd: /* std */ 3422 case 0xfd: /* std */
3018 ctxt->eflags |= EFLG_DF; 3423 ctxt->eflags |= EFLG_DF;
3019 c->dst.type = OP_NONE; /* Disable writeback. */
3020 break; 3424 break;
3021 case 0xfe: /* Grp4 */ 3425 case 0xfe: /* Grp4 */
3022 grp45: 3426 grp45:
3023 rc = emulate_grp45(ctxt, ops); 3427 rc = emulate_grp45(ctxt, ops);
3024 if (rc != X86EMUL_CONTINUE)
3025 goto done;
3026 break; 3428 break;
3027 case 0xff: /* Grp5 */ 3429 case 0xff: /* Grp5 */
3028 if (c->modrm_reg == 5) 3430 if (c->modrm_reg == 5)
3029 goto jump_far; 3431 goto jump_far;
3030 goto grp45; 3432 goto grp45;
3433 default:
3434 goto cannot_emulate;
3031 } 3435 }
3032 3436
3437 if (rc != X86EMUL_CONTINUE)
3438 goto done;
3439
3033writeback: 3440writeback:
3034 rc = writeback(ctxt, ops); 3441 rc = writeback(ctxt, ops);
3035 if (rc != X86EMUL_CONTINUE) 3442 if (rc != X86EMUL_CONTINUE)
@@ -3050,25 +3457,32 @@ writeback:
3050 &c->dst); 3457 &c->dst);
3051 3458
3052 if (c->rep_prefix && (c->d & String)) { 3459 if (c->rep_prefix && (c->d & String)) {
3053 struct read_cache *rc = &ctxt->decode.io_read; 3460 struct read_cache *r = &ctxt->decode.io_read;
3054 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); 3461 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3055 /* 3462
3056 * Re-enter guest when pio read ahead buffer is empty or, 3463 if (!string_insn_completed(ctxt)) {
3057 * if it is not used, after each 1024 iteration. 3464 /*
3058 */ 3465 * Re-enter guest when pio read ahead buffer is empty
3059 if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) || 3466 * or, if it is not used, after each 1024 iteration.
3060 (rc->end != 0 && rc->end == rc->pos)) 3467 */
3061 ctxt->restart = false; 3468 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3469 (r->end == 0 || r->end != r->pos)) {
3470 /*
3471 * Reset read cache. Usually happens before
3472 * decode, but since instruction is restarted
3473 * we have to do it here.
3474 */
3475 ctxt->decode.mem_read.end = 0;
3476 return EMULATION_RESTART;
3477 }
3478 goto done; /* skip rip writeback */
3479 }
3062 } 3480 }
3063 /* 3481
3064 * reset read cache here in case string instruction is restared
3065 * without decoding
3066 */
3067 ctxt->decode.mem_read.end = 0;
3068 ctxt->eip = c->eip; 3482 ctxt->eip = c->eip;
3069 3483
3070done: 3484done:
3071 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; 3485 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3072 3486
3073twobyte_insn: 3487twobyte_insn:
3074 switch (c->b) { 3488 switch (c->b) {
@@ -3091,7 +3505,7 @@ twobyte_insn:
3091 c->dst.type = OP_NONE; 3505 c->dst.type = OP_NONE;
3092 break; 3506 break;
3093 case 2: /* lgdt */ 3507 case 2: /* lgdt */
3094 rc = read_descriptor(ctxt, ops, c->src.ptr, 3508 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3095 &size, &address, c->op_bytes); 3509 &size, &address, c->op_bytes);
3096 if (rc != X86EMUL_CONTINUE) 3510 if (rc != X86EMUL_CONTINUE)
3097 goto done; 3511 goto done;
@@ -3104,14 +3518,12 @@ twobyte_insn:
3104 switch (c->modrm_rm) { 3518 switch (c->modrm_rm) {
3105 case 1: 3519 case 1:
3106 rc = kvm_fix_hypercall(ctxt->vcpu); 3520 rc = kvm_fix_hypercall(ctxt->vcpu);
3107 if (rc != X86EMUL_CONTINUE)
3108 goto done;
3109 break; 3521 break;
3110 default: 3522 default:
3111 goto cannot_emulate; 3523 goto cannot_emulate;
3112 } 3524 }
3113 } else { 3525 } else {
3114 rc = read_descriptor(ctxt, ops, c->src.ptr, 3526 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3115 &size, &address, 3527 &size, &address,
3116 c->op_bytes); 3528 c->op_bytes);
3117 if (rc != X86EMUL_CONTINUE) 3529 if (rc != X86EMUL_CONTINUE)
@@ -3126,7 +3538,7 @@ twobyte_insn:
3126 c->dst.val = ops->get_cr(0, ctxt->vcpu); 3538 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3127 break; 3539 break;
3128 case 6: /* lmsw */ 3540 case 6: /* lmsw */
3129 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) | 3541 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3130 (c->src.val & 0x0f), ctxt->vcpu); 3542 (c->src.val & 0x0f), ctxt->vcpu);
3131 c->dst.type = OP_NONE; 3543 c->dst.type = OP_NONE;
3132 break; 3544 break;
@@ -3134,7 +3546,7 @@ twobyte_insn:
3134 emulate_ud(ctxt); 3546 emulate_ud(ctxt);
3135 goto done; 3547 goto done;
3136 case 7: /* invlpg*/ 3548 case 7: /* invlpg*/
3137 emulate_invlpg(ctxt->vcpu, c->modrm_ea); 3549 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3138 /* Disable writeback. */ 3550 /* Disable writeback. */
3139 c->dst.type = OP_NONE; 3551 c->dst.type = OP_NONE;
3140 break; 3552 break;
@@ -3144,23 +3556,16 @@ twobyte_insn:
3144 break; 3556 break;
3145 case 0x05: /* syscall */ 3557 case 0x05: /* syscall */
3146 rc = emulate_syscall(ctxt, ops); 3558 rc = emulate_syscall(ctxt, ops);
3147 if (rc != X86EMUL_CONTINUE)
3148 goto done;
3149 else
3150 goto writeback;
3151 break; 3559 break;
3152 case 0x06: 3560 case 0x06:
3153 emulate_clts(ctxt->vcpu); 3561 emulate_clts(ctxt->vcpu);
3154 c->dst.type = OP_NONE;
3155 break; 3562 break;
3156 case 0x09: /* wbinvd */ 3563 case 0x09: /* wbinvd */
3157 kvm_emulate_wbinvd(ctxt->vcpu); 3564 kvm_emulate_wbinvd(ctxt->vcpu);
3158 c->dst.type = OP_NONE;
3159 break; 3565 break;
3160 case 0x08: /* invd */ 3566 case 0x08: /* invd */
3161 case 0x0d: /* GrpP (prefetch) */ 3567 case 0x0d: /* GrpP (prefetch) */
3162 case 0x18: /* Grp16 (prefetch/nop) */ 3568 case 0x18: /* Grp16 (prefetch/nop) */
3163 c->dst.type = OP_NONE;
3164 break; 3569 break;
3165 case 0x20: /* mov cr, reg */ 3570 case 0x20: /* mov cr, reg */
3166 switch (c->modrm_reg) { 3571 switch (c->modrm_reg) {
@@ -3170,8 +3575,7 @@ twobyte_insn:
3170 emulate_ud(ctxt); 3575 emulate_ud(ctxt);
3171 goto done; 3576 goto done;
3172 } 3577 }
3173 c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); 3578 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3174 c->dst.type = OP_NONE; /* no writeback */
3175 break; 3579 break;
3176 case 0x21: /* mov from dr to reg */ 3580 case 0x21: /* mov from dr to reg */
3177 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && 3581 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
@@ -3179,11 +3583,10 @@ twobyte_insn:
3179 emulate_ud(ctxt); 3583 emulate_ud(ctxt);
3180 goto done; 3584 goto done;
3181 } 3585 }
3182 ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu); 3586 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3183 c->dst.type = OP_NONE; /* no writeback */
3184 break; 3587 break;
3185 case 0x22: /* mov reg, cr */ 3588 case 0x22: /* mov reg, cr */
3186 if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) { 3589 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3187 emulate_gp(ctxt, 0); 3590 emulate_gp(ctxt, 0);
3188 goto done; 3591 goto done;
3189 } 3592 }
@@ -3196,7 +3599,7 @@ twobyte_insn:
3196 goto done; 3599 goto done;
3197 } 3600 }
3198 3601
3199 if (ops->set_dr(c->modrm_reg, c->regs[c->modrm_rm] & 3602 if (ops->set_dr(c->modrm_reg, c->src.val &
3200 ((ctxt->mode == X86EMUL_MODE_PROT64) ? 3603 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3201 ~0ULL : ~0U), ctxt->vcpu) < 0) { 3604 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3202 /* #UD condition is already handled by the code above */ 3605 /* #UD condition is already handled by the code above */
@@ -3215,7 +3618,6 @@ twobyte_insn:
3215 goto done; 3618 goto done;
3216 } 3619 }
3217 rc = X86EMUL_CONTINUE; 3620 rc = X86EMUL_CONTINUE;
3218 c->dst.type = OP_NONE;
3219 break; 3621 break;
3220 case 0x32: 3622 case 0x32:
3221 /* rdmsr */ 3623 /* rdmsr */
@@ -3227,21 +3629,12 @@ twobyte_insn:
3227 c->regs[VCPU_REGS_RDX] = msr_data >> 32; 3629 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3228 } 3630 }
3229 rc = X86EMUL_CONTINUE; 3631 rc = X86EMUL_CONTINUE;
3230 c->dst.type = OP_NONE;
3231 break; 3632 break;
3232 case 0x34: /* sysenter */ 3633 case 0x34: /* sysenter */
3233 rc = emulate_sysenter(ctxt, ops); 3634 rc = emulate_sysenter(ctxt, ops);
3234 if (rc != X86EMUL_CONTINUE)
3235 goto done;
3236 else
3237 goto writeback;
3238 break; 3635 break;
3239 case 0x35: /* sysexit */ 3636 case 0x35: /* sysexit */
3240 rc = emulate_sysexit(ctxt, ops); 3637 rc = emulate_sysexit(ctxt, ops);
3241 if (rc != X86EMUL_CONTINUE)
3242 goto done;
3243 else
3244 goto writeback;
3245 break; 3638 break;
3246 case 0x40 ... 0x4f: /* cmov */ 3639 case 0x40 ... 0x4f: /* cmov */
3247 c->dst.val = c->dst.orig_val = c->src.val; 3640 c->dst.val = c->dst.orig_val = c->src.val;
@@ -3251,15 +3644,15 @@ twobyte_insn:
3251 case 0x80 ... 0x8f: /* jnz rel, etc*/ 3644 case 0x80 ... 0x8f: /* jnz rel, etc*/
3252 if (test_cc(c->b, ctxt->eflags)) 3645 if (test_cc(c->b, ctxt->eflags))
3253 jmp_rel(c, c->src.val); 3646 jmp_rel(c, c->src.val);
3254 c->dst.type = OP_NONE; 3647 break;
3648 case 0x90 ... 0x9f: /* setcc r/m8 */
3649 c->dst.val = test_cc(c->b, ctxt->eflags);
3255 break; 3650 break;
3256 case 0xa0: /* push fs */ 3651 case 0xa0: /* push fs */
3257 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); 3652 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3258 break; 3653 break;
3259 case 0xa1: /* pop fs */ 3654 case 0xa1: /* pop fs */
3260 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); 3655 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3261 if (rc != X86EMUL_CONTINUE)
3262 goto done;
3263 break; 3656 break;
3264 case 0xa3: 3657 case 0xa3:
3265 bt: /* bt */ 3658 bt: /* bt */
@@ -3277,13 +3670,9 @@ twobyte_insn:
3277 break; 3670 break;
3278 case 0xa9: /* pop gs */ 3671 case 0xa9: /* pop gs */
3279 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); 3672 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3280 if (rc != X86EMUL_CONTINUE)
3281 goto done;
3282 break; 3673 break;
3283 case 0xab: 3674 case 0xab:
3284 bts: /* bts */ 3675 bts: /* bts */
3285 /* only subword offset */
3286 c->src.val &= (c->dst.bytes << 3) - 1;
3287 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); 3676 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3288 break; 3677 break;
3289 case 0xac: /* shrd imm8, r, r/m */ 3678 case 0xac: /* shrd imm8, r, r/m */
@@ -3306,15 +3695,22 @@ twobyte_insn:
3306 } else { 3695 } else {
3307 /* Failure: write the value we saw to EAX. */ 3696 /* Failure: write the value we saw to EAX. */
3308 c->dst.type = OP_REG; 3697 c->dst.type = OP_REG;
3309 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; 3698 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3310 } 3699 }
3311 break; 3700 break;
3701 case 0xb2: /* lss */
3702 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3703 break;
3312 case 0xb3: 3704 case 0xb3:
3313 btr: /* btr */ 3705 btr: /* btr */
3314 /* only subword offset */
3315 c->src.val &= (c->dst.bytes << 3) - 1;
3316 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); 3706 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3317 break; 3707 break;
3708 case 0xb4: /* lfs */
3709 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3710 break;
3711 case 0xb5: /* lgs */
3712 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3713 break;
3318 case 0xb6 ... 0xb7: /* movzx */ 3714 case 0xb6 ... 0xb7: /* movzx */
3319 c->dst.bytes = c->op_bytes; 3715 c->dst.bytes = c->op_bytes;
3320 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val 3716 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
@@ -3334,15 +3730,43 @@ twobyte_insn:
3334 break; 3730 break;
3335 case 0xbb: 3731 case 0xbb:
3336 btc: /* btc */ 3732 btc: /* btc */
3337 /* only subword offset */
3338 c->src.val &= (c->dst.bytes << 3) - 1;
3339 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags); 3733 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3340 break; 3734 break;
3735 case 0xbc: { /* bsf */
3736 u8 zf;
3737 __asm__ ("bsf %2, %0; setz %1"
3738 : "=r"(c->dst.val), "=q"(zf)
3739 : "r"(c->src.val));
3740 ctxt->eflags &= ~X86_EFLAGS_ZF;
3741 if (zf) {
3742 ctxt->eflags |= X86_EFLAGS_ZF;
3743 c->dst.type = OP_NONE; /* Disable writeback. */
3744 }
3745 break;
3746 }
3747 case 0xbd: { /* bsr */
3748 u8 zf;
3749 __asm__ ("bsr %2, %0; setz %1"
3750 : "=r"(c->dst.val), "=q"(zf)
3751 : "r"(c->src.val));
3752 ctxt->eflags &= ~X86_EFLAGS_ZF;
3753 if (zf) {
3754 ctxt->eflags |= X86_EFLAGS_ZF;
3755 c->dst.type = OP_NONE; /* Disable writeback. */
3756 }
3757 break;
3758 }
3341 case 0xbe ... 0xbf: /* movsx */ 3759 case 0xbe ... 0xbf: /* movsx */
3342 c->dst.bytes = c->op_bytes; 3760 c->dst.bytes = c->op_bytes;
3343 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val : 3761 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3344 (s16) c->src.val; 3762 (s16) c->src.val;
3345 break; 3763 break;
3764 case 0xc0 ... 0xc1: /* xadd */
3765 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3766 /* Write back the register source. */
3767 c->src.val = c->dst.orig_val;
3768 write_register_operand(&c->src);
3769 break;
3346 case 0xc3: /* movnti */ 3770 case 0xc3: /* movnti */
3347 c->dst.bytes = c->op_bytes; 3771 c->dst.bytes = c->op_bytes;
3348 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val : 3772 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
@@ -3350,10 +3774,14 @@ twobyte_insn:
3350 break; 3774 break;
3351 case 0xc7: /* Grp9 (cmpxchg8b) */ 3775 case 0xc7: /* Grp9 (cmpxchg8b) */
3352 rc = emulate_grp9(ctxt, ops); 3776 rc = emulate_grp9(ctxt, ops);
3353 if (rc != X86EMUL_CONTINUE)
3354 goto done;
3355 break; 3777 break;
3778 default:
3779 goto cannot_emulate;
3356 } 3780 }
3781
3782 if (rc != X86EMUL_CONTINUE)
3783 goto done;
3784
3357 goto writeback; 3785 goto writeback;
3358 3786
3359cannot_emulate: 3787cannot_emulate:
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index ddeb2314b522..efad72385058 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -5,7 +5,7 @@
5 * Copyright (c) 2006 Intel Corporation 5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc 6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation 7 * Copyright (c) 2008 Intel Corporation
8 * Copyright 2009 Red Hat, Inc. and/or its affilates. 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy 10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal 11 * of this software and associated documentation files (the "Software"), to deal
@@ -232,15 +232,6 @@ static void pit_latch_status(struct kvm *kvm, int channel)
232 } 232 }
233} 233}
234 234
235int pit_has_pending_timer(struct kvm_vcpu *vcpu)
236{
237 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
238
239 if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
240 return atomic_read(&pit->pit_state.pit_timer.pending);
241 return 0;
242}
243
244static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 235static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
245{ 236{
246 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 237 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 4b7b73ce2098..f628234fbeca 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard 4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2007 Intel Corporation 5 * Copyright (c) 2007 Intel Corporation
6 * Copyright 2009 Red Hat, Inc. and/or its affilates. 6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal 9 * of this software and associated documentation files (the "Software"), to deal
@@ -39,7 +39,7 @@ static void pic_irq_request(struct kvm *kvm, int level);
39static void pic_lock(struct kvm_pic *s) 39static void pic_lock(struct kvm_pic *s)
40 __acquires(&s->lock) 40 __acquires(&s->lock)
41{ 41{
42 raw_spin_lock(&s->lock); 42 spin_lock(&s->lock);
43} 43}
44 44
45static void pic_unlock(struct kvm_pic *s) 45static void pic_unlock(struct kvm_pic *s)
@@ -51,7 +51,7 @@ static void pic_unlock(struct kvm_pic *s)
51 51
52 s->wakeup_needed = false; 52 s->wakeup_needed = false;
53 53
54 raw_spin_unlock(&s->lock); 54 spin_unlock(&s->lock);
55 55
56 if (wakeup) { 56 if (wakeup) {
57 kvm_for_each_vcpu(i, vcpu, s->kvm) { 57 kvm_for_each_vcpu(i, vcpu, s->kvm) {
@@ -67,6 +67,7 @@ static void pic_unlock(struct kvm_pic *s)
67 if (!found) 67 if (!found)
68 return; 68 return;
69 69
70 kvm_make_request(KVM_REQ_EVENT, found);
70 kvm_vcpu_kick(found); 71 kvm_vcpu_kick(found);
71 } 72 }
72} 73}
@@ -308,13 +309,17 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
308 addr &= 1; 309 addr &= 1;
309 if (addr == 0) { 310 if (addr == 0) {
310 if (val & 0x10) { 311 if (val & 0x10) {
311 kvm_pic_reset(s); /* init */
312 /*
313 * deassert a pending interrupt
314 */
315 pic_irq_request(s->pics_state->kvm, 0);
316 s->init_state = 1;
317 s->init4 = val & 1; 312 s->init4 = val & 1;
313 s->last_irr = 0;
314 s->imr = 0;
315 s->priority_add = 0;
316 s->special_mask = 0;
317 s->read_reg_select = 0;
318 if (!s->init4) {
319 s->special_fully_nested_mode = 0;
320 s->auto_eoi = 0;
321 }
322 s->init_state = 1;
318 if (val & 0x02) 323 if (val & 0x02)
319 printk(KERN_ERR "single mode not supported"); 324 printk(KERN_ERR "single mode not supported");
320 if (val & 0x08) 325 if (val & 0x08)
@@ -564,7 +569,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
564 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 569 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
565 if (!s) 570 if (!s)
566 return NULL; 571 return NULL;
567 raw_spin_lock_init(&s->lock); 572 spin_lock_init(&s->lock);
568 s->kvm = kvm; 573 s->kvm = kvm;
569 s->pics[0].elcr_mask = 0xf8; 574 s->pics[0].elcr_mask = 0xf8;
570 s->pics[1].elcr_mask = 0xde; 575 s->pics[1].elcr_mask = 0xde;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 2095a049835e..7e06ba1618bd 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * irq.c: API for in kernel interrupt controller 2 * irq.c: API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation. 3 * Copyright (c) 2007, Intel Corporation.
4 * Copyright 2009 Red Hat, Inc. and/or its affilates. 4 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -33,12 +33,7 @@
33 */ 33 */
34int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 34int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
35{ 35{
36 int ret; 36 return apic_has_pending_timer(vcpu);
37
38 ret = pit_has_pending_timer(vcpu);
39 ret |= apic_has_pending_timer(vcpu);
40
41 return ret;
42} 37}
43EXPORT_SYMBOL(kvm_cpu_has_pending_timer); 38EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
44 39
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 63c314502993..ba910d149410 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -60,7 +60,7 @@ struct kvm_kpic_state {
60}; 60};
61 61
62struct kvm_pic { 62struct kvm_pic {
63 raw_spinlock_t lock; 63 spinlock_t lock;
64 bool wakeup_needed; 64 bool wakeup_needed;
65 unsigned pending_acks; 65 unsigned pending_acks;
66 struct kvm *kvm; 66 struct kvm *kvm;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 6491ac8e755b..975bb45329a1 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -42,7 +42,14 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
42 (unsigned long *)&vcpu->arch.regs_avail)) 42 (unsigned long *)&vcpu->arch.regs_avail))
43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); 43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44 44
45 return vcpu->arch.pdptrs[index]; 45 return vcpu->arch.walk_mmu->pdptrs[index];
46}
47
48static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
49{
50 load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
51
52 return mmu->pdptrs[index];
46} 53}
47 54
48static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) 55static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 22b06f7660f4..413f8973a855 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2006 Qumranet, Inc. 5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell 6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel 7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affilates. 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Authors: 10 * Authors:
11 * Dor Laor <dor.laor@qumranet.com> 11 * Dor Laor <dor.laor@qumranet.com>
@@ -259,9 +259,10 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
259 259
260static void apic_update_ppr(struct kvm_lapic *apic) 260static void apic_update_ppr(struct kvm_lapic *apic)
261{ 261{
262 u32 tpr, isrv, ppr; 262 u32 tpr, isrv, ppr, old_ppr;
263 int isr; 263 int isr;
264 264
265 old_ppr = apic_get_reg(apic, APIC_PROCPRI);
265 tpr = apic_get_reg(apic, APIC_TASKPRI); 266 tpr = apic_get_reg(apic, APIC_TASKPRI);
266 isr = apic_find_highest_isr(apic); 267 isr = apic_find_highest_isr(apic);
267 isrv = (isr != -1) ? isr : 0; 268 isrv = (isr != -1) ? isr : 0;
@@ -274,7 +275,10 @@ static void apic_update_ppr(struct kvm_lapic *apic)
274 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", 275 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
275 apic, ppr, isr, isrv); 276 apic, ppr, isr, isrv);
276 277
277 apic_set_reg(apic, APIC_PROCPRI, ppr); 278 if (old_ppr != ppr) {
279 apic_set_reg(apic, APIC_PROCPRI, ppr);
280 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
281 }
278} 282}
279 283
280static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) 284static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
@@ -391,6 +395,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
391 break; 395 break;
392 } 396 }
393 397
398 kvm_make_request(KVM_REQ_EVENT, vcpu);
394 kvm_vcpu_kick(vcpu); 399 kvm_vcpu_kick(vcpu);
395 break; 400 break;
396 401
@@ -416,6 +421,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
416 "INIT on a runnable vcpu %d\n", 421 "INIT on a runnable vcpu %d\n",
417 vcpu->vcpu_id); 422 vcpu->vcpu_id);
418 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 423 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
424 kvm_make_request(KVM_REQ_EVENT, vcpu);
419 kvm_vcpu_kick(vcpu); 425 kvm_vcpu_kick(vcpu);
420 } else { 426 } else {
421 apic_debug("Ignoring de-assert INIT to vcpu %d\n", 427 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
@@ -430,6 +436,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
430 result = 1; 436 result = 1;
431 vcpu->arch.sipi_vector = vector; 437 vcpu->arch.sipi_vector = vector;
432 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; 438 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
439 kvm_make_request(KVM_REQ_EVENT, vcpu);
433 kvm_vcpu_kick(vcpu); 440 kvm_vcpu_kick(vcpu);
434 } 441 }
435 break; 442 break;
@@ -475,6 +482,7 @@ static void apic_set_eoi(struct kvm_lapic *apic)
475 trigger_mode = IOAPIC_EDGE_TRIG; 482 trigger_mode = IOAPIC_EDGE_TRIG;
476 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) 483 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
477 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 484 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
485 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
478} 486}
479 487
480static void apic_send_ipi(struct kvm_lapic *apic) 488static void apic_send_ipi(struct kvm_lapic *apic)
@@ -1151,6 +1159,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1151 update_divide_count(apic); 1159 update_divide_count(apic);
1152 start_apic_timer(apic); 1160 start_apic_timer(apic);
1153 apic->irr_pending = true; 1161 apic->irr_pending = true;
1162 kvm_make_request(KVM_REQ_EVENT, vcpu);
1154} 1163}
1155 1164
1156void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1165void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 311f6dad8951..908ea5464a51 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -7,7 +7,7 @@
7 * MMU support 7 * MMU support
8 * 8 *
9 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates. 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * 11 *
12 * Authors: 12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com>
@@ -49,15 +49,25 @@
49 */ 49 */
50bool tdp_enabled = false; 50bool tdp_enabled = false;
51 51
52#undef MMU_DEBUG 52enum {
53 AUDIT_PRE_PAGE_FAULT,
54 AUDIT_POST_PAGE_FAULT,
55 AUDIT_PRE_PTE_WRITE,
56 AUDIT_POST_PTE_WRITE,
57 AUDIT_PRE_SYNC,
58 AUDIT_POST_SYNC
59};
53 60
54#undef AUDIT 61char *audit_point_name[] = {
62 "pre page fault",
63 "post page fault",
64 "pre pte write",
65 "post pte write",
66 "pre sync",
67 "post sync"
68};
55 69
56#ifdef AUDIT 70#undef MMU_DEBUG
57static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
58#else
59static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
60#endif
61 71
62#ifdef MMU_DEBUG 72#ifdef MMU_DEBUG
63 73
@@ -71,7 +81,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
71 81
72#endif 82#endif
73 83
74#if defined(MMU_DEBUG) || defined(AUDIT) 84#ifdef MMU_DEBUG
75static int dbg = 0; 85static int dbg = 0;
76module_param(dbg, bool, 0644); 86module_param(dbg, bool, 0644);
77#endif 87#endif
@@ -89,6 +99,8 @@ module_param(oos_shadow, bool, 0644);
89 } 99 }
90#endif 100#endif
91 101
102#define PTE_PREFETCH_NUM 8
103
92#define PT_FIRST_AVAIL_BITS_SHIFT 9 104#define PT_FIRST_AVAIL_BITS_SHIFT 9
93#define PT64_SECOND_AVAIL_BITS_SHIFT 52 105#define PT64_SECOND_AVAIL_BITS_SHIFT 52
94 106
@@ -178,6 +190,7 @@ typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
178static struct kmem_cache *pte_chain_cache; 190static struct kmem_cache *pte_chain_cache;
179static struct kmem_cache *rmap_desc_cache; 191static struct kmem_cache *rmap_desc_cache;
180static struct kmem_cache *mmu_page_header_cache; 192static struct kmem_cache *mmu_page_header_cache;
193static struct percpu_counter kvm_total_used_mmu_pages;
181 194
182static u64 __read_mostly shadow_trap_nonpresent_pte; 195static u64 __read_mostly shadow_trap_nonpresent_pte;
183static u64 __read_mostly shadow_notrap_nonpresent_pte; 196static u64 __read_mostly shadow_notrap_nonpresent_pte;
@@ -299,18 +312,50 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
299#endif 312#endif
300} 313}
301 314
315static bool spte_has_volatile_bits(u64 spte)
316{
317 if (!shadow_accessed_mask)
318 return false;
319
320 if (!is_shadow_present_pte(spte))
321 return false;
322
323 if ((spte & shadow_accessed_mask) &&
324 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
325 return false;
326
327 return true;
328}
329
330static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
331{
332 return (old_spte & bit_mask) && !(new_spte & bit_mask);
333}
334
302static void update_spte(u64 *sptep, u64 new_spte) 335static void update_spte(u64 *sptep, u64 new_spte)
303{ 336{
304 u64 old_spte; 337 u64 mask, old_spte = *sptep;
338
339 WARN_ON(!is_rmap_spte(new_spte));
340
341 new_spte |= old_spte & shadow_dirty_mask;
305 342
306 if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) || 343 mask = shadow_accessed_mask;
307 !is_rmap_spte(*sptep)) 344 if (is_writable_pte(old_spte))
345 mask |= shadow_dirty_mask;
346
347 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
308 __set_spte(sptep, new_spte); 348 __set_spte(sptep, new_spte);
309 else { 349 else
310 old_spte = __xchg_spte(sptep, new_spte); 350 old_spte = __xchg_spte(sptep, new_spte);
311 if (old_spte & shadow_accessed_mask) 351
312 mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte))); 352 if (!shadow_accessed_mask)
313 } 353 return;
354
355 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
356 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
357 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
358 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
314} 359}
315 360
316static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 361static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -367,7 +412,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
367 if (r) 412 if (r)
368 goto out; 413 goto out;
369 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, 414 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
370 rmap_desc_cache, 4); 415 rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
371 if (r) 416 if (r)
372 goto out; 417 goto out;
373 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 418 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
@@ -591,6 +636,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
591 desc->sptes[0] = (u64 *)*rmapp; 636 desc->sptes[0] = (u64 *)*rmapp;
592 desc->sptes[1] = spte; 637 desc->sptes[1] = spte;
593 *rmapp = (unsigned long)desc | 1; 638 *rmapp = (unsigned long)desc | 1;
639 ++count;
594 } else { 640 } else {
595 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 641 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
596 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 642 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
@@ -603,7 +649,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
603 desc = desc->more; 649 desc = desc->more;
604 } 650 }
605 for (i = 0; desc->sptes[i]; ++i) 651 for (i = 0; desc->sptes[i]; ++i)
606 ; 652 ++count;
607 desc->sptes[i] = spte; 653 desc->sptes[i] = spte;
608 } 654 }
609 return count; 655 return count;
@@ -645,18 +691,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
645 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 691 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
646 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); 692 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
647 if (!*rmapp) { 693 if (!*rmapp) {
648 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 694 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
649 BUG(); 695 BUG();
650 } else if (!(*rmapp & 1)) { 696 } else if (!(*rmapp & 1)) {
651 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); 697 rmap_printk("rmap_remove: %p 1->0\n", spte);
652 if ((u64 *)*rmapp != spte) { 698 if ((u64 *)*rmapp != spte) {
653 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", 699 printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte);
654 spte, *spte);
655 BUG(); 700 BUG();
656 } 701 }
657 *rmapp = 0; 702 *rmapp = 0;
658 } else { 703 } else {
659 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); 704 rmap_printk("rmap_remove: %p many->many\n", spte);
660 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 705 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
661 prev_desc = NULL; 706 prev_desc = NULL;
662 while (desc) { 707 while (desc) {
@@ -670,7 +715,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
670 prev_desc = desc; 715 prev_desc = desc;
671 desc = desc->more; 716 desc = desc->more;
672 } 717 }
673 pr_err("rmap_remove: %p %llx many->many\n", spte, *spte); 718 pr_err("rmap_remove: %p many->many\n", spte);
674 BUG(); 719 BUG();
675 } 720 }
676} 721}
@@ -680,18 +725,18 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
680 pfn_t pfn; 725 pfn_t pfn;
681 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
682 727
683 if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) || 728 if (!spte_has_volatile_bits(old_spte))
684 old_spte & shadow_accessed_mask) {
685 __set_spte(sptep, new_spte); 729 __set_spte(sptep, new_spte);
686 } else 730 else
687 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
688 732
689 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
690 return; 734 return;
735
691 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
692 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
693 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
694 if (is_writable_pte(old_spte)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
695 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
696} 741}
697 742
@@ -746,13 +791,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
746 } 791 }
747 spte = rmap_next(kvm, rmapp, spte); 792 spte = rmap_next(kvm, rmapp, spte);
748 } 793 }
749 if (write_protected) {
750 pfn_t pfn;
751
752 spte = rmap_next(kvm, rmapp, NULL);
753 pfn = spte_to_pfn(*spte);
754 kvm_set_pfn_dirty(pfn);
755 }
756 794
757 /* check for huge page mappings */ 795 /* check for huge page mappings */
758 for (i = PT_DIRECTORY_LEVEL; 796 for (i = PT_DIRECTORY_LEVEL;
@@ -947,6 +985,18 @@ static int is_empty_shadow_page(u64 *spt)
947} 985}
948#endif 986#endif
949 987
988/*
989 * This value is the sum of all of the kvm instances's
990 * kvm->arch.n_used_mmu_pages values. We need a global,
991 * aggregate version in order to make the slab shrinker
992 * faster
993 */
994static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
995{
996 kvm->arch.n_used_mmu_pages += nr;
997 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
998}
999
950static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1000static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
951{ 1001{
952 ASSERT(is_empty_shadow_page(sp->spt)); 1002 ASSERT(is_empty_shadow_page(sp->spt));
@@ -956,7 +1006,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
956 if (!sp->role.direct) 1006 if (!sp->role.direct)
957 __free_page(virt_to_page(sp->gfns)); 1007 __free_page(virt_to_page(sp->gfns));
958 kmem_cache_free(mmu_page_header_cache, sp); 1008 kmem_cache_free(mmu_page_header_cache, sp);
959 ++kvm->arch.n_free_mmu_pages; 1009 kvm_mod_used_mmu_pages(kvm, -1);
960} 1010}
961 1011
962static unsigned kvm_page_table_hashfn(gfn_t gfn) 1012static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -979,7 +1029,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
979 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 1029 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
980 sp->multimapped = 0; 1030 sp->multimapped = 0;
981 sp->parent_pte = parent_pte; 1031 sp->parent_pte = parent_pte;
982 --vcpu->kvm->arch.n_free_mmu_pages; 1032 kvm_mod_used_mmu_pages(vcpu->kvm, +1);
983 return sp; 1033 return sp;
984} 1034}
985 1035
@@ -1403,7 +1453,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1403 if (role.direct) 1453 if (role.direct)
1404 role.cr4_pae = 0; 1454 role.cr4_pae = 0;
1405 role.access = access; 1455 role.access = access;
1406 if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1456 if (!vcpu->arch.mmu.direct_map
1457 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1407 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1458 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1408 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1459 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1409 role.quadrant = quadrant; 1460 role.quadrant = quadrant;
@@ -1458,6 +1509,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1458 iterator->addr = addr; 1509 iterator->addr = addr;
1459 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; 1510 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1460 iterator->level = vcpu->arch.mmu.shadow_root_level; 1511 iterator->level = vcpu->arch.mmu.shadow_root_level;
1512
1513 if (iterator->level == PT64_ROOT_LEVEL &&
1514 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1515 !vcpu->arch.mmu.direct_map)
1516 --iterator->level;
1517
1461 if (iterator->level == PT32E_ROOT_LEVEL) { 1518 if (iterator->level == PT32E_ROOT_LEVEL) {
1462 iterator->shadow_addr 1519 iterator->shadow_addr
1463 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 1520 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
@@ -1665,41 +1722,31 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1665 1722
1666/* 1723/*
1667 * Changing the number of mmu pages allocated to the vm 1724 * Changing the number of mmu pages allocated to the vm
1668 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock 1725 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1669 */ 1726 */
1670void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) 1727void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1671{ 1728{
1672 int used_pages;
1673 LIST_HEAD(invalid_list); 1729 LIST_HEAD(invalid_list);
1674
1675 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1676 used_pages = max(0, used_pages);
1677
1678 /* 1730 /*
1679 * If we set the number of mmu pages to be smaller be than the 1731 * If we set the number of mmu pages to be smaller be than the
1680 * number of actived pages , we must to free some mmu pages before we 1732 * number of actived pages , we must to free some mmu pages before we
1681 * change the value 1733 * change the value
1682 */ 1734 */
1683 1735
1684 if (used_pages > kvm_nr_mmu_pages) { 1736 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1685 while (used_pages > kvm_nr_mmu_pages && 1737 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1686 !list_empty(&kvm->arch.active_mmu_pages)) { 1738 !list_empty(&kvm->arch.active_mmu_pages)) {
1687 struct kvm_mmu_page *page; 1739 struct kvm_mmu_page *page;
1688 1740
1689 page = container_of(kvm->arch.active_mmu_pages.prev, 1741 page = container_of(kvm->arch.active_mmu_pages.prev,
1690 struct kvm_mmu_page, link); 1742 struct kvm_mmu_page, link);
1691 used_pages -= kvm_mmu_prepare_zap_page(kvm, page, 1743 kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1692 &invalid_list); 1744 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1693 } 1745 }
1694 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1746 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1695 kvm_nr_mmu_pages = used_pages;
1696 kvm->arch.n_free_mmu_pages = 0;
1697 } 1747 }
1698 else
1699 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1700 - kvm->arch.n_alloc_mmu_pages;
1701 1748
1702 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages; 1749 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1703} 1750}
1704 1751
1705static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1752static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -1709,11 +1756,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1709 LIST_HEAD(invalid_list); 1756 LIST_HEAD(invalid_list);
1710 int r; 1757 int r;
1711 1758
1712 pgprintk("%s: looking for gfn %lx\n", __func__, gfn); 1759 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1713 r = 0; 1760 r = 0;
1714 1761
1715 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 1762 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1716 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1763 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1717 sp->role.word); 1764 sp->role.word);
1718 r = 1; 1765 r = 1;
1719 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 1766 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
@@ -1729,7 +1776,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1729 LIST_HEAD(invalid_list); 1776 LIST_HEAD(invalid_list);
1730 1777
1731 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 1778 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1732 pgprintk("%s: zap %lx %x\n", 1779 pgprintk("%s: zap %llx %x\n",
1733 __func__, gfn, sp->role.word); 1780 __func__, gfn, sp->role.word);
1734 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 1781 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1735 } 1782 }
@@ -1925,7 +1972,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1925 * whether the guest actually used the pte (in order to detect 1972 * whether the guest actually used the pte (in order to detect
1926 * demand paging). 1973 * demand paging).
1927 */ 1974 */
1928 spte = shadow_base_present_pte | shadow_dirty_mask; 1975 spte = shadow_base_present_pte;
1929 if (!speculative) 1976 if (!speculative)
1930 spte |= shadow_accessed_mask; 1977 spte |= shadow_accessed_mask;
1931 if (!dirty) 1978 if (!dirty)
@@ -1948,8 +1995,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1948 spte |= (u64)pfn << PAGE_SHIFT; 1995 spte |= (u64)pfn << PAGE_SHIFT;
1949 1996
1950 if ((pte_access & ACC_WRITE_MASK) 1997 if ((pte_access & ACC_WRITE_MASK)
1951 || (!tdp_enabled && write_fault && !is_write_protection(vcpu) 1998 || (!vcpu->arch.mmu.direct_map && write_fault
1952 && !user_fault)) { 1999 && !is_write_protection(vcpu) && !user_fault)) {
1953 2000
1954 if (level > PT_PAGE_TABLE_LEVEL && 2001 if (level > PT_PAGE_TABLE_LEVEL &&
1955 has_wrprotected_page(vcpu->kvm, gfn, level)) { 2002 has_wrprotected_page(vcpu->kvm, gfn, level)) {
@@ -1960,7 +2007,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1960 2007
1961 spte |= PT_WRITABLE_MASK; 2008 spte |= PT_WRITABLE_MASK;
1962 2009
1963 if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK)) 2010 if (!vcpu->arch.mmu.direct_map
2011 && !(pte_access & ACC_WRITE_MASK))
1964 spte &= ~PT_USER_MASK; 2012 spte &= ~PT_USER_MASK;
1965 2013
1966 /* 2014 /*
@@ -1973,7 +2021,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1973 goto set_pte; 2021 goto set_pte;
1974 2022
1975 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 2023 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1976 pgprintk("%s: found shadow page for %lx, marking ro\n", 2024 pgprintk("%s: found shadow page for %llx, marking ro\n",
1977 __func__, gfn); 2025 __func__, gfn);
1978 ret = 1; 2026 ret = 1;
1979 pte_access &= ~ACC_WRITE_MASK; 2027 pte_access &= ~ACC_WRITE_MASK;
@@ -1986,8 +2034,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1986 mark_page_dirty(vcpu->kvm, gfn); 2034 mark_page_dirty(vcpu->kvm, gfn);
1987 2035
1988set_pte: 2036set_pte:
1989 if (is_writable_pte(*sptep) && !is_writable_pte(spte))
1990 kvm_set_pfn_dirty(pfn);
1991 update_spte(sptep, spte); 2037 update_spte(sptep, spte);
1992done: 2038done:
1993 return ret; 2039 return ret;
@@ -2004,7 +2050,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2004 int rmap_count; 2050 int rmap_count;
2005 2051
2006 pgprintk("%s: spte %llx access %x write_fault %d" 2052 pgprintk("%s: spte %llx access %x write_fault %d"
2007 " user_fault %d gfn %lx\n", 2053 " user_fault %d gfn %llx\n",
2008 __func__, *sptep, pt_access, 2054 __func__, *sptep, pt_access,
2009 write_fault, user_fault, gfn); 2055 write_fault, user_fault, gfn);
2010 2056
@@ -2023,7 +2069,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2023 __set_spte(sptep, shadow_trap_nonpresent_pte); 2069 __set_spte(sptep, shadow_trap_nonpresent_pte);
2024 kvm_flush_remote_tlbs(vcpu->kvm); 2070 kvm_flush_remote_tlbs(vcpu->kvm);
2025 } else if (pfn != spte_to_pfn(*sptep)) { 2071 } else if (pfn != spte_to_pfn(*sptep)) {
2026 pgprintk("hfn old %lx new %lx\n", 2072 pgprintk("hfn old %llx new %llx\n",
2027 spte_to_pfn(*sptep), pfn); 2073 spte_to_pfn(*sptep), pfn);
2028 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); 2074 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2029 kvm_flush_remote_tlbs(vcpu->kvm); 2075 kvm_flush_remote_tlbs(vcpu->kvm);
@@ -2040,7 +2086,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2040 } 2086 }
2041 2087
2042 pgprintk("%s: setting spte %llx\n", __func__, *sptep); 2088 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2043 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", 2089 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2044 is_large_pte(*sptep)? "2MB" : "4kB", 2090 is_large_pte(*sptep)? "2MB" : "4kB",
2045 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, 2091 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2046 *sptep, sptep); 2092 *sptep, sptep);
@@ -2064,6 +2110,105 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2064{ 2110{
2065} 2111}
2066 2112
2113static struct kvm_memory_slot *
2114pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
2115{
2116 struct kvm_memory_slot *slot;
2117
2118 slot = gfn_to_memslot(vcpu->kvm, gfn);
2119 if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
2120 (no_dirty_log && slot->dirty_bitmap))
2121 slot = NULL;
2122
2123 return slot;
2124}
2125
2126static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2127 bool no_dirty_log)
2128{
2129 struct kvm_memory_slot *slot;
2130 unsigned long hva;
2131
2132 slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
2133 if (!slot) {
2134 get_page(bad_page);
2135 return page_to_pfn(bad_page);
2136 }
2137
2138 hva = gfn_to_hva_memslot(slot, gfn);
2139
2140 return hva_to_pfn_atomic(vcpu->kvm, hva);
2141}
2142
2143static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2144 struct kvm_mmu_page *sp,
2145 u64 *start, u64 *end)
2146{
2147 struct page *pages[PTE_PREFETCH_NUM];
2148 unsigned access = sp->role.access;
2149 int i, ret;
2150 gfn_t gfn;
2151
2152 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2153 if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
2154 return -1;
2155
2156 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2157 if (ret <= 0)
2158 return -1;
2159
2160 for (i = 0; i < ret; i++, gfn++, start++)
2161 mmu_set_spte(vcpu, start, ACC_ALL,
2162 access, 0, 0, 1, NULL,
2163 sp->role.level, gfn,
2164 page_to_pfn(pages[i]), true, true);
2165
2166 return 0;
2167}
2168
2169static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2170 struct kvm_mmu_page *sp, u64 *sptep)
2171{
2172 u64 *spte, *start = NULL;
2173 int i;
2174
2175 WARN_ON(!sp->role.direct);
2176
2177 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2178 spte = sp->spt + i;
2179
2180 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2181 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2182 if (!start)
2183 continue;
2184 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2185 break;
2186 start = NULL;
2187 } else if (!start)
2188 start = spte;
2189 }
2190}
2191
2192static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2193{
2194 struct kvm_mmu_page *sp;
2195
2196 /*
2197 * Since it's no accessed bit on EPT, it's no way to
2198 * distinguish between actually accessed translations
2199 * and prefetched, so disable pte prefetch if EPT is
2200 * enabled.
2201 */
2202 if (!shadow_accessed_mask)
2203 return;
2204
2205 sp = page_header(__pa(sptep));
2206 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2207 return;
2208
2209 __direct_pte_prefetch(vcpu, sp, sptep);
2210}
2211
2067static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 2212static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2068 int level, gfn_t gfn, pfn_t pfn) 2213 int level, gfn_t gfn, pfn_t pfn)
2069{ 2214{
@@ -2077,6 +2222,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2077 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 2222 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
2078 0, write, 1, &pt_write, 2223 0, write, 1, &pt_write,
2079 level, gfn, pfn, false, true); 2224 level, gfn, pfn, false, true);
2225 direct_pte_prefetch(vcpu, iterator.sptep);
2080 ++vcpu->stat.pf_fixed; 2226 ++vcpu->stat.pf_fixed;
2081 break; 2227 break;
2082 } 2228 }
@@ -2098,28 +2244,31 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2098 __set_spte(iterator.sptep, 2244 __set_spte(iterator.sptep,
2099 __pa(sp->spt) 2245 __pa(sp->spt)
2100 | PT_PRESENT_MASK | PT_WRITABLE_MASK 2246 | PT_PRESENT_MASK | PT_WRITABLE_MASK
2101 | shadow_user_mask | shadow_x_mask); 2247 | shadow_user_mask | shadow_x_mask
2248 | shadow_accessed_mask);
2102 } 2249 }
2103 } 2250 }
2104 return pt_write; 2251 return pt_write;
2105} 2252}
2106 2253
2107static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn) 2254static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2108{ 2255{
2109 char buf[1]; 2256 siginfo_t info;
2110 void __user *hva; 2257
2111 int r; 2258 info.si_signo = SIGBUS;
2259 info.si_errno = 0;
2260 info.si_code = BUS_MCEERR_AR;
2261 info.si_addr = (void __user *)address;
2262 info.si_addr_lsb = PAGE_SHIFT;
2112 2263
2113 /* Touch the page, so send SIGBUS */ 2264 send_sig_info(SIGBUS, &info, tsk);
2114 hva = (void __user *)gfn_to_hva(kvm, gfn);
2115 r = copy_from_user(buf, hva, 1);
2116} 2265}
2117 2266
2118static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) 2267static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2119{ 2268{
2120 kvm_release_pfn_clean(pfn); 2269 kvm_release_pfn_clean(pfn);
2121 if (is_hwpoison_pfn(pfn)) { 2270 if (is_hwpoison_pfn(pfn)) {
2122 kvm_send_hwpoison_signal(kvm, gfn); 2271 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
2123 return 0; 2272 return 0;
2124 } else if (is_fault_pfn(pfn)) 2273 } else if (is_fault_pfn(pfn))
2125 return -EFAULT; 2274 return -EFAULT;
@@ -2179,7 +2328,9 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
2179 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2180 return; 2329 return;
2181 spin_lock(&vcpu->kvm->mmu_lock); 2330 spin_lock(&vcpu->kvm->mmu_lock);
2182 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2331 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2332 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2333 vcpu->arch.mmu.direct_map)) {
2183 hpa_t root = vcpu->arch.mmu.root_hpa; 2334 hpa_t root = vcpu->arch.mmu.root_hpa;
2184 2335
2185 sp = page_header(root); 2336 sp = page_header(root);
@@ -2222,80 +2373,158 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2222 return ret; 2373 return ret;
2223} 2374}
2224 2375
2225static int mmu_alloc_roots(struct kvm_vcpu *vcpu) 2376static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2226{ 2377{
2227 int i;
2228 gfn_t root_gfn;
2229 struct kvm_mmu_page *sp; 2378 struct kvm_mmu_page *sp;
2230 int direct = 0; 2379 unsigned i;
2231 u64 pdptr;
2232
2233 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2234 2380
2235 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2381 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2382 spin_lock(&vcpu->kvm->mmu_lock);
2383 kvm_mmu_free_some_pages(vcpu);
2384 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2385 1, ACC_ALL, NULL);
2386 ++sp->root_count;
2387 spin_unlock(&vcpu->kvm->mmu_lock);
2388 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2389 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2390 for (i = 0; i < 4; ++i) {
2391 hpa_t root = vcpu->arch.mmu.pae_root[i];
2392
2393 ASSERT(!VALID_PAGE(root));
2394 spin_lock(&vcpu->kvm->mmu_lock);
2395 kvm_mmu_free_some_pages(vcpu);
2396 sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
2397 PT32_ROOT_LEVEL, 1, ACC_ALL,
2398 NULL);
2399 root = __pa(sp->spt);
2400 ++sp->root_count;
2401 spin_unlock(&vcpu->kvm->mmu_lock);
2402 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2403 }
2404 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2405 } else
2406 BUG();
2407
2408 return 0;
2409}
2410
2411static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2412{
2413 struct kvm_mmu_page *sp;
2414 u64 pdptr, pm_mask;
2415 gfn_t root_gfn;
2416 int i;
2417
2418 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2419
2420 if (mmu_check_root(vcpu, root_gfn))
2421 return 1;
2422
2423 /*
2424 * Do we shadow a long mode page table? If so we need to
2425 * write-protect the guests page table root.
2426 */
2427 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2236 hpa_t root = vcpu->arch.mmu.root_hpa; 2428 hpa_t root = vcpu->arch.mmu.root_hpa;
2237 2429
2238 ASSERT(!VALID_PAGE(root)); 2430 ASSERT(!VALID_PAGE(root));
2239 if (mmu_check_root(vcpu, root_gfn)) 2431
2240 return 1;
2241 if (tdp_enabled) {
2242 direct = 1;
2243 root_gfn = 0;
2244 }
2245 spin_lock(&vcpu->kvm->mmu_lock); 2432 spin_lock(&vcpu->kvm->mmu_lock);
2246 kvm_mmu_free_some_pages(vcpu); 2433 kvm_mmu_free_some_pages(vcpu);
2247 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 2434 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2248 PT64_ROOT_LEVEL, direct, 2435 0, ACC_ALL, NULL);
2249 ACC_ALL, NULL);
2250 root = __pa(sp->spt); 2436 root = __pa(sp->spt);
2251 ++sp->root_count; 2437 ++sp->root_count;
2252 spin_unlock(&vcpu->kvm->mmu_lock); 2438 spin_unlock(&vcpu->kvm->mmu_lock);
2253 vcpu->arch.mmu.root_hpa = root; 2439 vcpu->arch.mmu.root_hpa = root;
2254 return 0; 2440 return 0;
2255 } 2441 }
2256 direct = !is_paging(vcpu); 2442
2443 /*
2444 * We shadow a 32 bit page table. This may be a legacy 2-level
2445 * or a PAE 3-level page table. In either case we need to be aware that
2446 * the shadow page table may be a PAE or a long mode page table.
2447 */
2448 pm_mask = PT_PRESENT_MASK;
2449 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2450 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2451
2257 for (i = 0; i < 4; ++i) { 2452 for (i = 0; i < 4; ++i) {
2258 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2453 hpa_t root = vcpu->arch.mmu.pae_root[i];
2259 2454
2260 ASSERT(!VALID_PAGE(root)); 2455 ASSERT(!VALID_PAGE(root));
2261 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 2456 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2262 pdptr = kvm_pdptr_read(vcpu, i); 2457 pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2263 if (!is_present_gpte(pdptr)) { 2458 if (!is_present_gpte(pdptr)) {
2264 vcpu->arch.mmu.pae_root[i] = 0; 2459 vcpu->arch.mmu.pae_root[i] = 0;
2265 continue; 2460 continue;
2266 } 2461 }
2267 root_gfn = pdptr >> PAGE_SHIFT; 2462 root_gfn = pdptr >> PAGE_SHIFT;
2268 } else if (vcpu->arch.mmu.root_level == 0) 2463 if (mmu_check_root(vcpu, root_gfn))
2269 root_gfn = 0; 2464 return 1;
2270 if (mmu_check_root(vcpu, root_gfn))
2271 return 1;
2272 if (tdp_enabled) {
2273 direct = 1;
2274 root_gfn = i << 30;
2275 } 2465 }
2276 spin_lock(&vcpu->kvm->mmu_lock); 2466 spin_lock(&vcpu->kvm->mmu_lock);
2277 kvm_mmu_free_some_pages(vcpu); 2467 kvm_mmu_free_some_pages(vcpu);
2278 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 2468 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2279 PT32_ROOT_LEVEL, direct, 2469 PT32_ROOT_LEVEL, 0,
2280 ACC_ALL, NULL); 2470 ACC_ALL, NULL);
2281 root = __pa(sp->spt); 2471 root = __pa(sp->spt);
2282 ++sp->root_count; 2472 ++sp->root_count;
2283 spin_unlock(&vcpu->kvm->mmu_lock); 2473 spin_unlock(&vcpu->kvm->mmu_lock);
2284 2474
2285 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; 2475 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2286 } 2476 }
2287 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 2477 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2478
2479 /*
2480 * If we shadow a 32 bit page table with a long mode page
2481 * table we enter this path.
2482 */
2483 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2484 if (vcpu->arch.mmu.lm_root == NULL) {
2485 /*
2486 * The additional page necessary for this is only
2487 * allocated on demand.
2488 */
2489
2490 u64 *lm_root;
2491
2492 lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2493 if (lm_root == NULL)
2494 return 1;
2495
2496 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2497
2498 vcpu->arch.mmu.lm_root = lm_root;
2499 }
2500
2501 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2502 }
2503
2288 return 0; 2504 return 0;
2289} 2505}
2290 2506
2507static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2508{
2509 if (vcpu->arch.mmu.direct_map)
2510 return mmu_alloc_direct_roots(vcpu);
2511 else
2512 return mmu_alloc_shadow_roots(vcpu);
2513}
2514
2291static void mmu_sync_roots(struct kvm_vcpu *vcpu) 2515static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2292{ 2516{
2293 int i; 2517 int i;
2294 struct kvm_mmu_page *sp; 2518 struct kvm_mmu_page *sp;
2295 2519
2520 if (vcpu->arch.mmu.direct_map)
2521 return;
2522
2296 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2523 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2297 return; 2524 return;
2298 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2525
2526 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2527 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2299 hpa_t root = vcpu->arch.mmu.root_hpa; 2528 hpa_t root = vcpu->arch.mmu.root_hpa;
2300 sp = page_header(root); 2529 sp = page_header(root);
2301 mmu_sync_children(vcpu, sp); 2530 mmu_sync_children(vcpu, sp);
@@ -2310,6 +2539,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2310 mmu_sync_children(vcpu, sp); 2539 mmu_sync_children(vcpu, sp);
2311 } 2540 }
2312 } 2541 }
2542 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2313} 2543}
2314 2544
2315void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 2545void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -2327,6 +2557,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2327 return vaddr; 2557 return vaddr;
2328} 2558}
2329 2559
2560static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2561 u32 access, u32 *error)
2562{
2563 if (error)
2564 *error = 0;
2565 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2566}
2567
2330static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 2568static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2331 u32 error_code) 2569 u32 error_code)
2332{ 2570{
@@ -2393,10 +2631,9 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
2393 mmu_free_roots(vcpu); 2631 mmu_free_roots(vcpu);
2394} 2632}
2395 2633
2396static int nonpaging_init_context(struct kvm_vcpu *vcpu) 2634static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2635 struct kvm_mmu *context)
2397{ 2636{
2398 struct kvm_mmu *context = &vcpu->arch.mmu;
2399
2400 context->new_cr3 = nonpaging_new_cr3; 2637 context->new_cr3 = nonpaging_new_cr3;
2401 context->page_fault = nonpaging_page_fault; 2638 context->page_fault = nonpaging_page_fault;
2402 context->gva_to_gpa = nonpaging_gva_to_gpa; 2639 context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2407,6 +2644,8 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2407 context->root_level = 0; 2644 context->root_level = 0;
2408 context->shadow_root_level = PT32E_ROOT_LEVEL; 2645 context->shadow_root_level = PT32E_ROOT_LEVEL;
2409 context->root_hpa = INVALID_PAGE; 2646 context->root_hpa = INVALID_PAGE;
2647 context->direct_map = true;
2648 context->nx = false;
2410 return 0; 2649 return 0;
2411} 2650}
2412 2651
@@ -2422,11 +2661,14 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
2422 mmu_free_roots(vcpu); 2661 mmu_free_roots(vcpu);
2423} 2662}
2424 2663
2425static void inject_page_fault(struct kvm_vcpu *vcpu, 2664static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2426 u64 addr, 2665{
2427 u32 err_code) 2666 return vcpu->arch.cr3;
2667}
2668
2669static void inject_page_fault(struct kvm_vcpu *vcpu)
2428{ 2670{
2429 kvm_inject_page_fault(vcpu, addr, err_code); 2671 vcpu->arch.mmu.inject_page_fault(vcpu);
2430} 2672}
2431 2673
2432static void paging_free(struct kvm_vcpu *vcpu) 2674static void paging_free(struct kvm_vcpu *vcpu)
@@ -2434,12 +2676,12 @@ static void paging_free(struct kvm_vcpu *vcpu)
2434 nonpaging_free(vcpu); 2676 nonpaging_free(vcpu);
2435} 2677}
2436 2678
2437static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level) 2679static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2438{ 2680{
2439 int bit7; 2681 int bit7;
2440 2682
2441 bit7 = (gpte >> 7) & 1; 2683 bit7 = (gpte >> 7) & 1;
2442 return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0; 2684 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2443} 2685}
2444 2686
2445#define PTTYPE 64 2687#define PTTYPE 64
@@ -2450,13 +2692,14 @@ static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2450#include "paging_tmpl.h" 2692#include "paging_tmpl.h"
2451#undef PTTYPE 2693#undef PTTYPE
2452 2694
2453static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level) 2695static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2696 struct kvm_mmu *context,
2697 int level)
2454{ 2698{
2455 struct kvm_mmu *context = &vcpu->arch.mmu;
2456 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2699 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2457 u64 exb_bit_rsvd = 0; 2700 u64 exb_bit_rsvd = 0;
2458 2701
2459 if (!is_nx(vcpu)) 2702 if (!context->nx)
2460 exb_bit_rsvd = rsvd_bits(63, 63); 2703 exb_bit_rsvd = rsvd_bits(63, 63);
2461 switch (level) { 2704 switch (level) {
2462 case PT32_ROOT_LEVEL: 2705 case PT32_ROOT_LEVEL:
@@ -2511,9 +2754,13 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2511 } 2754 }
2512} 2755}
2513 2756
2514static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) 2757static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2758 struct kvm_mmu *context,
2759 int level)
2515{ 2760{
2516 struct kvm_mmu *context = &vcpu->arch.mmu; 2761 context->nx = is_nx(vcpu);
2762
2763 reset_rsvds_bits_mask(vcpu, context, level);
2517 2764
2518 ASSERT(is_pae(vcpu)); 2765 ASSERT(is_pae(vcpu));
2519 context->new_cr3 = paging_new_cr3; 2766 context->new_cr3 = paging_new_cr3;
@@ -2526,20 +2773,23 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2526 context->root_level = level; 2773 context->root_level = level;
2527 context->shadow_root_level = level; 2774 context->shadow_root_level = level;
2528 context->root_hpa = INVALID_PAGE; 2775 context->root_hpa = INVALID_PAGE;
2776 context->direct_map = false;
2529 return 0; 2777 return 0;
2530} 2778}
2531 2779
2532static int paging64_init_context(struct kvm_vcpu *vcpu) 2780static int paging64_init_context(struct kvm_vcpu *vcpu,
2781 struct kvm_mmu *context)
2533{ 2782{
2534 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL); 2783 return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2535 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2536} 2784}
2537 2785
2538static int paging32_init_context(struct kvm_vcpu *vcpu) 2786static int paging32_init_context(struct kvm_vcpu *vcpu,
2787 struct kvm_mmu *context)
2539{ 2788{
2540 struct kvm_mmu *context = &vcpu->arch.mmu; 2789 context->nx = false;
2790
2791 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2541 2792
2542 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2543 context->new_cr3 = paging_new_cr3; 2793 context->new_cr3 = paging_new_cr3;
2544 context->page_fault = paging32_page_fault; 2794 context->page_fault = paging32_page_fault;
2545 context->gva_to_gpa = paging32_gva_to_gpa; 2795 context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2550,18 +2800,19 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2550 context->root_level = PT32_ROOT_LEVEL; 2800 context->root_level = PT32_ROOT_LEVEL;
2551 context->shadow_root_level = PT32E_ROOT_LEVEL; 2801 context->shadow_root_level = PT32E_ROOT_LEVEL;
2552 context->root_hpa = INVALID_PAGE; 2802 context->root_hpa = INVALID_PAGE;
2803 context->direct_map = false;
2553 return 0; 2804 return 0;
2554} 2805}
2555 2806
2556static int paging32E_init_context(struct kvm_vcpu *vcpu) 2807static int paging32E_init_context(struct kvm_vcpu *vcpu,
2808 struct kvm_mmu *context)
2557{ 2809{
2558 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL); 2810 return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2559 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2560} 2811}
2561 2812
2562static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 2813static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2563{ 2814{
2564 struct kvm_mmu *context = &vcpu->arch.mmu; 2815 struct kvm_mmu *context = vcpu->arch.walk_mmu;
2565 2816
2566 context->new_cr3 = nonpaging_new_cr3; 2817 context->new_cr3 = nonpaging_new_cr3;
2567 context->page_fault = tdp_page_fault; 2818 context->page_fault = tdp_page_fault;
@@ -2571,20 +2822,29 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2571 context->invlpg = nonpaging_invlpg; 2822 context->invlpg = nonpaging_invlpg;
2572 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 2823 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2573 context->root_hpa = INVALID_PAGE; 2824 context->root_hpa = INVALID_PAGE;
2825 context->direct_map = true;
2826 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2827 context->get_cr3 = get_cr3;
2828 context->inject_page_fault = kvm_inject_page_fault;
2829 context->nx = is_nx(vcpu);
2574 2830
2575 if (!is_paging(vcpu)) { 2831 if (!is_paging(vcpu)) {
2832 context->nx = false;
2576 context->gva_to_gpa = nonpaging_gva_to_gpa; 2833 context->gva_to_gpa = nonpaging_gva_to_gpa;
2577 context->root_level = 0; 2834 context->root_level = 0;
2578 } else if (is_long_mode(vcpu)) { 2835 } else if (is_long_mode(vcpu)) {
2579 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL); 2836 context->nx = is_nx(vcpu);
2837 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2580 context->gva_to_gpa = paging64_gva_to_gpa; 2838 context->gva_to_gpa = paging64_gva_to_gpa;
2581 context->root_level = PT64_ROOT_LEVEL; 2839 context->root_level = PT64_ROOT_LEVEL;
2582 } else if (is_pae(vcpu)) { 2840 } else if (is_pae(vcpu)) {
2583 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL); 2841 context->nx = is_nx(vcpu);
2842 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
2584 context->gva_to_gpa = paging64_gva_to_gpa; 2843 context->gva_to_gpa = paging64_gva_to_gpa;
2585 context->root_level = PT32E_ROOT_LEVEL; 2844 context->root_level = PT32E_ROOT_LEVEL;
2586 } else { 2845 } else {
2587 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL); 2846 context->nx = false;
2847 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2588 context->gva_to_gpa = paging32_gva_to_gpa; 2848 context->gva_to_gpa = paging32_gva_to_gpa;
2589 context->root_level = PT32_ROOT_LEVEL; 2849 context->root_level = PT32_ROOT_LEVEL;
2590 } 2850 }
@@ -2592,33 +2852,83 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2592 return 0; 2852 return 0;
2593} 2853}
2594 2854
2595static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 2855int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2596{ 2856{
2597 int r; 2857 int r;
2598
2599 ASSERT(vcpu); 2858 ASSERT(vcpu);
2600 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2859 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2601 2860
2602 if (!is_paging(vcpu)) 2861 if (!is_paging(vcpu))
2603 r = nonpaging_init_context(vcpu); 2862 r = nonpaging_init_context(vcpu, context);
2604 else if (is_long_mode(vcpu)) 2863 else if (is_long_mode(vcpu))
2605 r = paging64_init_context(vcpu); 2864 r = paging64_init_context(vcpu, context);
2606 else if (is_pae(vcpu)) 2865 else if (is_pae(vcpu))
2607 r = paging32E_init_context(vcpu); 2866 r = paging32E_init_context(vcpu, context);
2608 else 2867 else
2609 r = paging32_init_context(vcpu); 2868 r = paging32_init_context(vcpu, context);
2610 2869
2611 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2870 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2612 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 2871 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2613 2872
2614 return r; 2873 return r;
2615} 2874}
2875EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
2876
2877static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2878{
2879 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
2880
2881 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
2882 vcpu->arch.walk_mmu->get_cr3 = get_cr3;
2883 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
2884
2885 return r;
2886}
2887
2888static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
2889{
2890 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
2891
2892 g_context->get_cr3 = get_cr3;
2893 g_context->inject_page_fault = kvm_inject_page_fault;
2894
2895 /*
2896 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
2897 * translation of l2_gpa to l1_gpa addresses is done using the
2898 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
2899 * functions between mmu and nested_mmu are swapped.
2900 */
2901 if (!is_paging(vcpu)) {
2902 g_context->nx = false;
2903 g_context->root_level = 0;
2904 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
2905 } else if (is_long_mode(vcpu)) {
2906 g_context->nx = is_nx(vcpu);
2907 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
2908 g_context->root_level = PT64_ROOT_LEVEL;
2909 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2910 } else if (is_pae(vcpu)) {
2911 g_context->nx = is_nx(vcpu);
2912 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
2913 g_context->root_level = PT32E_ROOT_LEVEL;
2914 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2915 } else {
2916 g_context->nx = false;
2917 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
2918 g_context->root_level = PT32_ROOT_LEVEL;
2919 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
2920 }
2921
2922 return 0;
2923}
2616 2924
2617static int init_kvm_mmu(struct kvm_vcpu *vcpu) 2925static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2618{ 2926{
2619 vcpu->arch.update_pte.pfn = bad_pfn; 2927 vcpu->arch.update_pte.pfn = bad_pfn;
2620 2928
2621 if (tdp_enabled) 2929 if (mmu_is_nested(vcpu))
2930 return init_kvm_nested_mmu(vcpu);
2931 else if (tdp_enabled)
2622 return init_kvm_tdp_mmu(vcpu); 2932 return init_kvm_tdp_mmu(vcpu);
2623 else 2933 else
2624 return init_kvm_softmmu(vcpu); 2934 return init_kvm_softmmu(vcpu);
@@ -2653,7 +2963,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
2653 if (r) 2963 if (r)
2654 goto out; 2964 goto out;
2655 /* set_cr3() should ensure TLB has been flushed */ 2965 /* set_cr3() should ensure TLB has been flushed */
2656 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 2966 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2657out: 2967out:
2658 return r; 2968 return r;
2659} 2969}
@@ -2663,6 +2973,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2663{ 2973{
2664 mmu_free_roots(vcpu); 2974 mmu_free_roots(vcpu);
2665} 2975}
2976EXPORT_SYMBOL_GPL(kvm_mmu_unload);
2666 2977
2667static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, 2978static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2668 struct kvm_mmu_page *sp, 2979 struct kvm_mmu_page *sp,
@@ -2695,7 +3006,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2695 return; 3006 return;
2696 } 3007 }
2697 3008
2698 if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL)) 3009 if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
2699 return; 3010 return;
2700 3011
2701 ++vcpu->kvm->stat.mmu_pte_updated; 3012 ++vcpu->kvm->stat.mmu_pte_updated;
@@ -2837,7 +3148,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2837 kvm_mmu_access_page(vcpu, gfn); 3148 kvm_mmu_access_page(vcpu, gfn);
2838 kvm_mmu_free_some_pages(vcpu); 3149 kvm_mmu_free_some_pages(vcpu);
2839 ++vcpu->kvm->stat.mmu_pte_write; 3150 ++vcpu->kvm->stat.mmu_pte_write;
2840 kvm_mmu_audit(vcpu, "pre pte write"); 3151 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
2841 if (guest_initiated) { 3152 if (guest_initiated) {
2842 if (gfn == vcpu->arch.last_pt_write_gfn 3153 if (gfn == vcpu->arch.last_pt_write_gfn
2843 && !last_updated_pte_accessed(vcpu)) { 3154 && !last_updated_pte_accessed(vcpu)) {
@@ -2910,7 +3221,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2910 } 3221 }
2911 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); 3222 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
2912 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3223 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2913 kvm_mmu_audit(vcpu, "post pte write"); 3224 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
2914 spin_unlock(&vcpu->kvm->mmu_lock); 3225 spin_unlock(&vcpu->kvm->mmu_lock);
2915 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) { 3226 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2916 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn); 3227 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
@@ -2923,7 +3234,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2923 gpa_t gpa; 3234 gpa_t gpa;
2924 int r; 3235 int r;
2925 3236
2926 if (tdp_enabled) 3237 if (vcpu->arch.mmu.direct_map)
2927 return 0; 3238 return 0;
2928 3239
2929 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); 3240 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -2937,21 +3248,18 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2937 3248
2938void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 3249void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2939{ 3250{
2940 int free_pages;
2941 LIST_HEAD(invalid_list); 3251 LIST_HEAD(invalid_list);
2942 3252
2943 free_pages = vcpu->kvm->arch.n_free_mmu_pages; 3253 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
2944 while (free_pages < KVM_REFILL_PAGES &&
2945 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 3254 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2946 struct kvm_mmu_page *sp; 3255 struct kvm_mmu_page *sp;
2947 3256
2948 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 3257 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2949 struct kvm_mmu_page, link); 3258 struct kvm_mmu_page, link);
2950 free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3259 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2951 &invalid_list); 3260 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2952 ++vcpu->kvm->stat.mmu_recycled; 3261 ++vcpu->kvm->stat.mmu_recycled;
2953 } 3262 }
2954 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2955} 3263}
2956 3264
2957int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) 3265int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -3013,6 +3321,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3013static void free_mmu_pages(struct kvm_vcpu *vcpu) 3321static void free_mmu_pages(struct kvm_vcpu *vcpu)
3014{ 3322{
3015 free_page((unsigned long)vcpu->arch.mmu.pae_root); 3323 free_page((unsigned long)vcpu->arch.mmu.pae_root);
3324 if (vcpu->arch.mmu.lm_root != NULL)
3325 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3016} 3326}
3017 3327
3018static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 3328static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -3054,15 +3364,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3054 return init_kvm_mmu(vcpu); 3364 return init_kvm_mmu(vcpu);
3055} 3365}
3056 3366
3057void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3058{
3059 ASSERT(vcpu);
3060
3061 destroy_kvm_mmu(vcpu);
3062 free_mmu_pages(vcpu);
3063 mmu_free_memory_caches(vcpu);
3064}
3065
3066void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) 3367void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3067{ 3368{
3068 struct kvm_mmu_page *sp; 3369 struct kvm_mmu_page *sp;
@@ -3112,23 +3413,22 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3112{ 3413{
3113 struct kvm *kvm; 3414 struct kvm *kvm;
3114 struct kvm *kvm_freed = NULL; 3415 struct kvm *kvm_freed = NULL;
3115 int cache_count = 0; 3416
3417 if (nr_to_scan == 0)
3418 goto out;
3116 3419
3117 spin_lock(&kvm_lock); 3420 spin_lock(&kvm_lock);
3118 3421
3119 list_for_each_entry(kvm, &vm_list, vm_list) { 3422 list_for_each_entry(kvm, &vm_list, vm_list) {
3120 int npages, idx, freed_pages; 3423 int idx, freed_pages;
3121 LIST_HEAD(invalid_list); 3424 LIST_HEAD(invalid_list);
3122 3425
3123 idx = srcu_read_lock(&kvm->srcu); 3426 idx = srcu_read_lock(&kvm->srcu);
3124 spin_lock(&kvm->mmu_lock); 3427 spin_lock(&kvm->mmu_lock);
3125 npages = kvm->arch.n_alloc_mmu_pages - 3428 if (!kvm_freed && nr_to_scan > 0 &&
3126 kvm->arch.n_free_mmu_pages; 3429 kvm->arch.n_used_mmu_pages > 0) {
3127 cache_count += npages;
3128 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
3129 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, 3430 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3130 &invalid_list); 3431 &invalid_list);
3131 cache_count -= freed_pages;
3132 kvm_freed = kvm; 3432 kvm_freed = kvm;
3133 } 3433 }
3134 nr_to_scan--; 3434 nr_to_scan--;
@@ -3142,7 +3442,8 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3142 3442
3143 spin_unlock(&kvm_lock); 3443 spin_unlock(&kvm_lock);
3144 3444
3145 return cache_count; 3445out:
3446 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3146} 3447}
3147 3448
3148static struct shrinker mmu_shrinker = { 3449static struct shrinker mmu_shrinker = {
@@ -3163,6 +3464,7 @@ static void mmu_destroy_caches(void)
3163void kvm_mmu_module_exit(void) 3464void kvm_mmu_module_exit(void)
3164{ 3465{
3165 mmu_destroy_caches(); 3466 mmu_destroy_caches();
3467 percpu_counter_destroy(&kvm_total_used_mmu_pages);
3166 unregister_shrinker(&mmu_shrinker); 3468 unregister_shrinker(&mmu_shrinker);
3167} 3469}
3168 3470
@@ -3185,6 +3487,9 @@ int kvm_mmu_module_init(void)
3185 if (!mmu_page_header_cache) 3487 if (!mmu_page_header_cache)
3186 goto nomem; 3488 goto nomem;
3187 3489
3490 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3491 goto nomem;
3492
3188 register_shrinker(&mmu_shrinker); 3493 register_shrinker(&mmu_shrinker);
3189 3494
3190 return 0; 3495 return 0;
@@ -3355,271 +3660,18 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3355} 3660}
3356EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); 3661EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3357 3662
3358#ifdef AUDIT 3663#ifdef CONFIG_KVM_MMU_AUDIT
3359 3664#include "mmu_audit.c"
3360static const char *audit_msg; 3665#else
3361 3666static void mmu_audit_disable(void) { }
3362static gva_t canonicalize(gva_t gva)
3363{
3364#ifdef CONFIG_X86_64
3365 gva = (long long)(gva << 16) >> 16;
3366#endif 3667#endif
3367 return gva;
3368}
3369
3370
3371typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
3372
3373static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3374 inspect_spte_fn fn)
3375{
3376 int i;
3377
3378 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3379 u64 ent = sp->spt[i];
3380
3381 if (is_shadow_present_pte(ent)) {
3382 if (!is_last_spte(ent, sp->role.level)) {
3383 struct kvm_mmu_page *child;
3384 child = page_header(ent & PT64_BASE_ADDR_MASK);
3385 __mmu_spte_walk(kvm, child, fn);
3386 } else
3387 fn(kvm, &sp->spt[i]);
3388 }
3389 }
3390}
3391
3392static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3393{
3394 int i;
3395 struct kvm_mmu_page *sp;
3396
3397 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3398 return;
3399 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3400 hpa_t root = vcpu->arch.mmu.root_hpa;
3401 sp = page_header(root);
3402 __mmu_spte_walk(vcpu->kvm, sp, fn);
3403 return;
3404 }
3405 for (i = 0; i < 4; ++i) {
3406 hpa_t root = vcpu->arch.mmu.pae_root[i];
3407
3408 if (root && VALID_PAGE(root)) {
3409 root &= PT64_BASE_ADDR_MASK;
3410 sp = page_header(root);
3411 __mmu_spte_walk(vcpu->kvm, sp, fn);
3412 }
3413 }
3414 return;
3415}
3416
3417static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3418 gva_t va, int level)
3419{
3420 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3421 int i;
3422 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3423
3424 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3425 u64 ent = pt[i];
3426
3427 if (ent == shadow_trap_nonpresent_pte)
3428 continue;
3429
3430 va = canonicalize(va);
3431 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3432 audit_mappings_page(vcpu, ent, va, level - 1);
3433 else {
3434 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
3435 gfn_t gfn = gpa >> PAGE_SHIFT;
3436 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3437 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3438 3668
3439 if (is_error_pfn(pfn)) { 3669void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3440 kvm_release_pfn_clean(pfn);
3441 continue;
3442 }
3443
3444 if (is_shadow_present_pte(ent)
3445 && (ent & PT64_BASE_ADDR_MASK) != hpa)
3446 printk(KERN_ERR "xx audit error: (%s) levels %d"
3447 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3448 audit_msg, vcpu->arch.mmu.root_level,
3449 va, gpa, hpa, ent,
3450 is_shadow_present_pte(ent));
3451 else if (ent == shadow_notrap_nonpresent_pte
3452 && !is_error_hpa(hpa))
3453 printk(KERN_ERR "audit: (%s) notrap shadow,"
3454 " valid guest gva %lx\n", audit_msg, va);
3455 kvm_release_pfn_clean(pfn);
3456
3457 }
3458 }
3459}
3460
3461static void audit_mappings(struct kvm_vcpu *vcpu)
3462{
3463 unsigned i;
3464
3465 if (vcpu->arch.mmu.root_level == 4)
3466 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3467 else
3468 for (i = 0; i < 4; ++i)
3469 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3470 audit_mappings_page(vcpu,
3471 vcpu->arch.mmu.pae_root[i],
3472 i << 30,
3473 2);
3474}
3475
3476static int count_rmaps(struct kvm_vcpu *vcpu)
3477{
3478 struct kvm *kvm = vcpu->kvm;
3479 struct kvm_memslots *slots;
3480 int nmaps = 0;
3481 int i, j, k, idx;
3482
3483 idx = srcu_read_lock(&kvm->srcu);
3484 slots = kvm_memslots(kvm);
3485 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3486 struct kvm_memory_slot *m = &slots->memslots[i];
3487 struct kvm_rmap_desc *d;
3488
3489 for (j = 0; j < m->npages; ++j) {
3490 unsigned long *rmapp = &m->rmap[j];
3491
3492 if (!*rmapp)
3493 continue;
3494 if (!(*rmapp & 1)) {
3495 ++nmaps;
3496 continue;
3497 }
3498 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3499 while (d) {
3500 for (k = 0; k < RMAP_EXT; ++k)
3501 if (d->sptes[k])
3502 ++nmaps;
3503 else
3504 break;
3505 d = d->more;
3506 }
3507 }
3508 }
3509 srcu_read_unlock(&kvm->srcu, idx);
3510 return nmaps;
3511}
3512
3513void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
3514{
3515 unsigned long *rmapp;
3516 struct kvm_mmu_page *rev_sp;
3517 gfn_t gfn;
3518
3519 if (is_writable_pte(*sptep)) {
3520 rev_sp = page_header(__pa(sptep));
3521 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
3522
3523 if (!gfn_to_memslot(kvm, gfn)) {
3524 if (!printk_ratelimit())
3525 return;
3526 printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3527 audit_msg, gfn);
3528 printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3529 audit_msg, (long int)(sptep - rev_sp->spt),
3530 rev_sp->gfn);
3531 dump_stack();
3532 return;
3533 }
3534
3535 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
3536 if (!*rmapp) {
3537 if (!printk_ratelimit())
3538 return;
3539 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3540 audit_msg, *sptep);
3541 dump_stack();
3542 }
3543 }
3544
3545}
3546
3547void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3548{
3549 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3550}
3551
3552static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3553{
3554 struct kvm_mmu_page *sp;
3555 int i;
3556
3557 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3558 u64 *pt = sp->spt;
3559
3560 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3561 continue;
3562
3563 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3564 u64 ent = pt[i];
3565
3566 if (!(ent & PT_PRESENT_MASK))
3567 continue;
3568 if (!is_writable_pte(ent))
3569 continue;
3570 inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
3571 }
3572 }
3573 return;
3574}
3575
3576static void audit_rmap(struct kvm_vcpu *vcpu)
3577{
3578 check_writable_mappings_rmap(vcpu);
3579 count_rmaps(vcpu);
3580}
3581
3582static void audit_write_protection(struct kvm_vcpu *vcpu)
3583{
3584 struct kvm_mmu_page *sp;
3585 struct kvm_memory_slot *slot;
3586 unsigned long *rmapp;
3587 u64 *spte;
3588 gfn_t gfn;
3589
3590 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3591 if (sp->role.direct)
3592 continue;
3593 if (sp->unsync)
3594 continue;
3595
3596 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3597 rmapp = &slot->rmap[gfn - slot->base_gfn];
3598
3599 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3600 while (spte) {
3601 if (is_writable_pte(*spte))
3602 printk(KERN_ERR "%s: (%s) shadow page has "
3603 "writable mappings: gfn %lx role %x\n",
3604 __func__, audit_msg, sp->gfn,
3605 sp->role.word);
3606 spte = rmap_next(vcpu->kvm, rmapp, spte);
3607 }
3608 }
3609}
3610
3611static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3612{ 3670{
3613 int olddbg = dbg; 3671 ASSERT(vcpu);
3614 3672
3615 dbg = 0; 3673 destroy_kvm_mmu(vcpu);
3616 audit_msg = msg; 3674 free_mmu_pages(vcpu);
3617 audit_rmap(vcpu); 3675 mmu_free_memory_caches(vcpu);
3618 audit_write_protection(vcpu); 3676 mmu_audit_disable();
3619 if (strcmp("pre pte write", audit_msg) != 0)
3620 audit_mappings(vcpu);
3621 audit_writable_sptes_have_rmaps(vcpu);
3622 dbg = olddbg;
3623} 3677}
3624
3625#endif
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index be66759321a5..7086ca85d3e7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,10 +49,17 @@
49#define PFERR_FETCH_MASK (1U << 4) 49#define PFERR_FETCH_MASK (1U << 4)
50 50
51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
52int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
53
54static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
55{
56 return kvm->arch.n_max_mmu_pages -
57 kvm->arch.n_used_mmu_pages;
58}
52 59
53static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 60static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
54{ 61{
55 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) 62 if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
56 __kvm_mmu_free_some_pages(vcpu); 63 __kvm_mmu_free_some_pages(vcpu);
57} 64}
58 65
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
new file mode 100644
index 000000000000..ba2bcdde6221
--- /dev/null
+++ b/arch/x86/kvm/mmu_audit.c
@@ -0,0 +1,299 @@
1/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20#include <linux/ratelimit.h>
21
22static int audit_point;
23
24#define audit_printk(fmt, args...) \
25 printk(KERN_ERR "audit: (%s) error: " \
26 fmt, audit_point_name[audit_point], ##args)
27
28typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
29
30static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
31 inspect_spte_fn fn, int level)
32{
33 int i;
34
35 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
36 u64 *ent = sp->spt;
37
38 fn(vcpu, ent + i, level);
39
40 if (is_shadow_present_pte(ent[i]) &&
41 !is_last_spte(ent[i], level)) {
42 struct kvm_mmu_page *child;
43
44 child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
45 __mmu_spte_walk(vcpu, child, fn, level - 1);
46 }
47 }
48}
49
50static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
51{
52 int i;
53 struct kvm_mmu_page *sp;
54
55 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
56 return;
57
58 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
59 hpa_t root = vcpu->arch.mmu.root_hpa;
60
61 sp = page_header(root);
62 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
63 return;
64 }
65
66 for (i = 0; i < 4; ++i) {
67 hpa_t root = vcpu->arch.mmu.pae_root[i];
68
69 if (root && VALID_PAGE(root)) {
70 root &= PT64_BASE_ADDR_MASK;
71 sp = page_header(root);
72 __mmu_spte_walk(vcpu, sp, fn, 2);
73 }
74 }
75
76 return;
77}
78
79typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
80
81static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
82{
83 struct kvm_mmu_page *sp;
84
85 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
86 fn(kvm, sp);
87}
88
89static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
90{
91 struct kvm_mmu_page *sp;
92 gfn_t gfn;
93 pfn_t pfn;
94 hpa_t hpa;
95
96 sp = page_header(__pa(sptep));
97
98 if (sp->unsync) {
99 if (level != PT_PAGE_TABLE_LEVEL) {
100 audit_printk("unsync sp: %p level = %d\n", sp, level);
101 return;
102 }
103
104 if (*sptep == shadow_notrap_nonpresent_pte) {
105 audit_printk("notrap spte in unsync sp: %p\n", sp);
106 return;
107 }
108 }
109
110 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
111 audit_printk("notrap spte in direct sp: %p\n", sp);
112 return;
113 }
114
115 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
116 return;
117
118 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
119 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
120
121 if (is_error_pfn(pfn)) {
122 kvm_release_pfn_clean(pfn);
123 return;
124 }
125
126 hpa = pfn << PAGE_SHIFT;
127 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
128 audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
129 vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
130}
131
132static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
133{
134 unsigned long *rmapp;
135 struct kvm_mmu_page *rev_sp;
136 gfn_t gfn;
137
138
139 rev_sp = page_header(__pa(sptep));
140 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
141
142 if (!gfn_to_memslot(kvm, gfn)) {
143 if (!printk_ratelimit())
144 return;
145 audit_printk("no memslot for gfn %llx\n", gfn);
146 audit_printk("index %ld of sp (gfn=%llx)\n",
147 (long int)(sptep - rev_sp->spt), rev_sp->gfn);
148 dump_stack();
149 return;
150 }
151
152 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
153 if (!*rmapp) {
154 if (!printk_ratelimit())
155 return;
156 audit_printk("no rmap for writable spte %llx\n", *sptep);
157 dump_stack();
158 }
159}
160
161static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
162{
163 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
164 inspect_spte_has_rmap(vcpu->kvm, sptep);
165}
166
167static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
168{
169 struct kvm_mmu_page *sp = page_header(__pa(sptep));
170
171 if (audit_point == AUDIT_POST_SYNC && sp->unsync)
172 audit_printk("meet unsync sp(%p) after sync root.\n", sp);
173}
174
175static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
176{
177 int i;
178
179 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
180 return;
181
182 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
183 if (!is_rmap_spte(sp->spt[i]))
184 continue;
185
186 inspect_spte_has_rmap(kvm, sp->spt + i);
187 }
188}
189
190static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
191{
192 struct kvm_memory_slot *slot;
193 unsigned long *rmapp;
194 u64 *spte;
195
196 if (sp->role.direct || sp->unsync || sp->role.invalid)
197 return;
198
199 slot = gfn_to_memslot(kvm, sp->gfn);
200 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
201
202 spte = rmap_next(kvm, rmapp, NULL);
203 while (spte) {
204 if (is_writable_pte(*spte))
205 audit_printk("shadow page has writable mappings: gfn "
206 "%llx role %x\n", sp->gfn, sp->role.word);
207 spte = rmap_next(kvm, rmapp, spte);
208 }
209}
210
211static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
212{
213 check_mappings_rmap(kvm, sp);
214 audit_write_protection(kvm, sp);
215}
216
217static void audit_all_active_sps(struct kvm *kvm)
218{
219 walk_all_active_sps(kvm, audit_sp);
220}
221
222static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
223{
224 audit_sptes_have_rmaps(vcpu, sptep, level);
225 audit_mappings(vcpu, sptep, level);
226 audit_spte_after_sync(vcpu, sptep, level);
227}
228
229static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
230{
231 mmu_spte_walk(vcpu, audit_spte);
232}
233
234static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
235{
236 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
237
238 if (!__ratelimit(&ratelimit_state))
239 return;
240
241 audit_point = point;
242 audit_all_active_sps(vcpu->kvm);
243 audit_vcpu_spte(vcpu);
244}
245
246static bool mmu_audit;
247
248static void mmu_audit_enable(void)
249{
250 int ret;
251
252 if (mmu_audit)
253 return;
254
255 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
256 WARN_ON(ret);
257
258 mmu_audit = true;
259}
260
261static void mmu_audit_disable(void)
262{
263 if (!mmu_audit)
264 return;
265
266 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
267 tracepoint_synchronize_unregister();
268 mmu_audit = false;
269}
270
271static int mmu_audit_set(const char *val, const struct kernel_param *kp)
272{
273 int ret;
274 unsigned long enable;
275
276 ret = strict_strtoul(val, 10, &enable);
277 if (ret < 0)
278 return -EINVAL;
279
280 switch (enable) {
281 case 0:
282 mmu_audit_disable();
283 break;
284 case 1:
285 mmu_audit_enable();
286 break;
287 default:
288 return -EINVAL;
289 }
290
291 return 0;
292}
293
294static struct kernel_param_ops audit_param_ops = {
295 .set = mmu_audit_set,
296 .get = param_get_bool,
297};
298
299module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 3aab0f0930ef..b60b4fdb3eda 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -195,6 +195,25 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
195 195
196 TP_ARGS(sp) 196 TP_ARGS(sp)
197); 197);
198
199TRACE_EVENT(
200 kvm_mmu_audit,
201 TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
202 TP_ARGS(vcpu, audit_point),
203
204 TP_STRUCT__entry(
205 __field(struct kvm_vcpu *, vcpu)
206 __field(int, audit_point)
207 ),
208
209 TP_fast_assign(
210 __entry->vcpu = vcpu;
211 __entry->audit_point = audit_point;
212 ),
213
214 TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
215 audit_point_name[__entry->audit_point])
216);
198#endif /* _TRACE_KVMMMU_H */ 217#endif /* _TRACE_KVMMMU_H */
199 218
200#undef TRACE_INCLUDE_PATH 219#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 51ef9097960d..cd7a833a3b52 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -7,7 +7,7 @@
7 * MMU support 7 * MMU support
8 * 8 *
9 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates. 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * 11 *
12 * Authors: 12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com>
@@ -67,6 +67,7 @@ struct guest_walker {
67 int level; 67 int level;
68 gfn_t table_gfn[PT_MAX_FULL_LEVELS]; 68 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
69 pt_element_t ptes[PT_MAX_FULL_LEVELS]; 69 pt_element_t ptes[PT_MAX_FULL_LEVELS];
70 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
70 gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; 71 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
71 unsigned pt_access; 72 unsigned pt_access;
72 unsigned pte_access; 73 unsigned pte_access;
@@ -104,7 +105,7 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
104 105
105 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; 106 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
106#if PTTYPE == 64 107#if PTTYPE == 64
107 if (is_nx(vcpu)) 108 if (vcpu->arch.mmu.nx)
108 access &= ~(gpte >> PT64_NX_SHIFT); 109 access &= ~(gpte >> PT64_NX_SHIFT);
109#endif 110#endif
110 return access; 111 return access;
@@ -113,26 +114,32 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
113/* 114/*
114 * Fetch a guest pte for a guest virtual address 115 * Fetch a guest pte for a guest virtual address
115 */ 116 */
116static int FNAME(walk_addr)(struct guest_walker *walker, 117static int FNAME(walk_addr_generic)(struct guest_walker *walker,
117 struct kvm_vcpu *vcpu, gva_t addr, 118 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
118 int write_fault, int user_fault, int fetch_fault) 119 gva_t addr, u32 access)
119{ 120{
120 pt_element_t pte; 121 pt_element_t pte;
121 gfn_t table_gfn; 122 gfn_t table_gfn;
122 unsigned index, pt_access, uninitialized_var(pte_access); 123 unsigned index, pt_access, uninitialized_var(pte_access);
123 gpa_t pte_gpa; 124 gpa_t pte_gpa;
124 bool eperm, present, rsvd_fault; 125 bool eperm, present, rsvd_fault;
126 int offset, write_fault, user_fault, fetch_fault;
127
128 write_fault = access & PFERR_WRITE_MASK;
129 user_fault = access & PFERR_USER_MASK;
130 fetch_fault = access & PFERR_FETCH_MASK;
125 131
126 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, 132 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
127 fetch_fault); 133 fetch_fault);
128walk: 134walk:
129 present = true; 135 present = true;
130 eperm = rsvd_fault = false; 136 eperm = rsvd_fault = false;
131 walker->level = vcpu->arch.mmu.root_level; 137 walker->level = mmu->root_level;
132 pte = vcpu->arch.cr3; 138 pte = mmu->get_cr3(vcpu);
139
133#if PTTYPE == 64 140#if PTTYPE == 64
134 if (!is_long_mode(vcpu)) { 141 if (walker->level == PT32E_ROOT_LEVEL) {
135 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); 142 pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
136 trace_kvm_mmu_paging_element(pte, walker->level); 143 trace_kvm_mmu_paging_element(pte, walker->level);
137 if (!is_present_gpte(pte)) { 144 if (!is_present_gpte(pte)) {
138 present = false; 145 present = false;
@@ -142,7 +149,7 @@ walk:
142 } 149 }
143#endif 150#endif
144 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 151 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
145 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); 152 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
146 153
147 pt_access = ACC_ALL; 154 pt_access = ACC_ALL;
148 155
@@ -150,12 +157,14 @@ walk:
150 index = PT_INDEX(addr, walker->level); 157 index = PT_INDEX(addr, walker->level);
151 158
152 table_gfn = gpte_to_gfn(pte); 159 table_gfn = gpte_to_gfn(pte);
153 pte_gpa = gfn_to_gpa(table_gfn); 160 offset = index * sizeof(pt_element_t);
154 pte_gpa += index * sizeof(pt_element_t); 161 pte_gpa = gfn_to_gpa(table_gfn) + offset;
155 walker->table_gfn[walker->level - 1] = table_gfn; 162 walker->table_gfn[walker->level - 1] = table_gfn;
156 walker->pte_gpa[walker->level - 1] = pte_gpa; 163 walker->pte_gpa[walker->level - 1] = pte_gpa;
157 164
158 if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) { 165 if (kvm_read_guest_page_mmu(vcpu, mmu, table_gfn, &pte,
166 offset, sizeof(pte),
167 PFERR_USER_MASK|PFERR_WRITE_MASK)) {
159 present = false; 168 present = false;
160 break; 169 break;
161 } 170 }
@@ -167,7 +176,7 @@ walk:
167 break; 176 break;
168 } 177 }
169 178
170 if (is_rsvd_bits_set(vcpu, pte, walker->level)) { 179 if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) {
171 rsvd_fault = true; 180 rsvd_fault = true;
172 break; 181 break;
173 } 182 }
@@ -204,17 +213,28 @@ walk:
204 (PTTYPE == 64 || is_pse(vcpu))) || 213 (PTTYPE == 64 || is_pse(vcpu))) ||
205 ((walker->level == PT_PDPE_LEVEL) && 214 ((walker->level == PT_PDPE_LEVEL) &&
206 is_large_pte(pte) && 215 is_large_pte(pte) &&
207 is_long_mode(vcpu))) { 216 mmu->root_level == PT64_ROOT_LEVEL)) {
208 int lvl = walker->level; 217 int lvl = walker->level;
218 gpa_t real_gpa;
219 gfn_t gfn;
220 u32 ac;
209 221
210 walker->gfn = gpte_to_gfn_lvl(pte, lvl); 222 gfn = gpte_to_gfn_lvl(pte, lvl);
211 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) 223 gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
212 >> PAGE_SHIFT;
213 224
214 if (PTTYPE == 32 && 225 if (PTTYPE == 32 &&
215 walker->level == PT_DIRECTORY_LEVEL && 226 walker->level == PT_DIRECTORY_LEVEL &&
216 is_cpuid_PSE36()) 227 is_cpuid_PSE36())
217 walker->gfn += pse36_gfn_delta(pte); 228 gfn += pse36_gfn_delta(pte);
229
230 ac = write_fault | fetch_fault | user_fault;
231
232 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
233 ac);
234 if (real_gpa == UNMAPPED_GVA)
235 return 0;
236
237 walker->gfn = real_gpa >> PAGE_SHIFT;
218 238
219 break; 239 break;
220 } 240 }
@@ -249,18 +269,36 @@ error:
249 walker->error_code = 0; 269 walker->error_code = 0;
250 if (present) 270 if (present)
251 walker->error_code |= PFERR_PRESENT_MASK; 271 walker->error_code |= PFERR_PRESENT_MASK;
252 if (write_fault) 272
253 walker->error_code |= PFERR_WRITE_MASK; 273 walker->error_code |= write_fault | user_fault;
254 if (user_fault) 274
255 walker->error_code |= PFERR_USER_MASK; 275 if (fetch_fault && mmu->nx)
256 if (fetch_fault && is_nx(vcpu))
257 walker->error_code |= PFERR_FETCH_MASK; 276 walker->error_code |= PFERR_FETCH_MASK;
258 if (rsvd_fault) 277 if (rsvd_fault)
259 walker->error_code |= PFERR_RSVD_MASK; 278 walker->error_code |= PFERR_RSVD_MASK;
279
280 vcpu->arch.fault.address = addr;
281 vcpu->arch.fault.error_code = walker->error_code;
282
260 trace_kvm_mmu_walker_error(walker->error_code); 283 trace_kvm_mmu_walker_error(walker->error_code);
261 return 0; 284 return 0;
262} 285}
263 286
287static int FNAME(walk_addr)(struct guest_walker *walker,
288 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
289{
290 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
291 access);
292}
293
294static int FNAME(walk_addr_nested)(struct guest_walker *walker,
295 struct kvm_vcpu *vcpu, gva_t addr,
296 u32 access)
297{
298 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
299 addr, access);
300}
301
264static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 302static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
265 u64 *spte, const void *pte) 303 u64 *spte, const void *pte)
266{ 304{
@@ -302,14 +340,87 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
302static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, 340static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
303 struct guest_walker *gw, int level) 341 struct guest_walker *gw, int level)
304{ 342{
305 int r;
306 pt_element_t curr_pte; 343 pt_element_t curr_pte;
307 344 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
308 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 1], 345 u64 mask;
346 int r, index;
347
348 if (level == PT_PAGE_TABLE_LEVEL) {
349 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
350 base_gpa = pte_gpa & ~mask;
351 index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
352
353 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
354 gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
355 curr_pte = gw->prefetch_ptes[index];
356 } else
357 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
309 &curr_pte, sizeof(curr_pte)); 358 &curr_pte, sizeof(curr_pte));
359
310 return r || curr_pte != gw->ptes[level - 1]; 360 return r || curr_pte != gw->ptes[level - 1];
311} 361}
312 362
363static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
364 u64 *sptep)
365{
366 struct kvm_mmu_page *sp;
367 struct kvm_mmu *mmu = &vcpu->arch.mmu;
368 pt_element_t *gptep = gw->prefetch_ptes;
369 u64 *spte;
370 int i;
371
372 sp = page_header(__pa(sptep));
373
374 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
375 return;
376
377 if (sp->role.direct)
378 return __direct_pte_prefetch(vcpu, sp, sptep);
379
380 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
381 spte = sp->spt + i;
382
383 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
384 pt_element_t gpte;
385 unsigned pte_access;
386 gfn_t gfn;
387 pfn_t pfn;
388 bool dirty;
389
390 if (spte == sptep)
391 continue;
392
393 if (*spte != shadow_trap_nonpresent_pte)
394 continue;
395
396 gpte = gptep[i];
397
398 if (!is_present_gpte(gpte) ||
399 is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
400 if (!sp->unsync)
401 __set_spte(spte, shadow_notrap_nonpresent_pte);
402 continue;
403 }
404
405 if (!(gpte & PT_ACCESSED_MASK))
406 continue;
407
408 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
409 gfn = gpte_to_gfn(gpte);
410 dirty = is_dirty_gpte(gpte);
411 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
412 (pte_access & ACC_WRITE_MASK) && dirty);
413 if (is_error_pfn(pfn)) {
414 kvm_release_pfn_clean(pfn);
415 break;
416 }
417
418 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
419 dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
420 pfn, true, true);
421 }
422}
423
313/* 424/*
314 * Fetch a shadow pte for a specific level in the paging hierarchy. 425 * Fetch a shadow pte for a specific level in the paging hierarchy.
315 */ 426 */
@@ -391,6 +502,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
391 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, 502 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
392 user_fault, write_fault, dirty, ptwrite, it.level, 503 user_fault, write_fault, dirty, ptwrite, it.level,
393 gw->gfn, pfn, false, true); 504 gw->gfn, pfn, false, true);
505 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
394 506
395 return it.sptep; 507 return it.sptep;
396 508
@@ -420,7 +532,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
420{ 532{
421 int write_fault = error_code & PFERR_WRITE_MASK; 533 int write_fault = error_code & PFERR_WRITE_MASK;
422 int user_fault = error_code & PFERR_USER_MASK; 534 int user_fault = error_code & PFERR_USER_MASK;
423 int fetch_fault = error_code & PFERR_FETCH_MASK;
424 struct guest_walker walker; 535 struct guest_walker walker;
425 u64 *sptep; 536 u64 *sptep;
426 int write_pt = 0; 537 int write_pt = 0;
@@ -430,7 +541,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
430 unsigned long mmu_seq; 541 unsigned long mmu_seq;
431 542
432 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 543 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
433 kvm_mmu_audit(vcpu, "pre page fault");
434 544
435 r = mmu_topup_memory_caches(vcpu); 545 r = mmu_topup_memory_caches(vcpu);
436 if (r) 546 if (r)
@@ -439,15 +549,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
439 /* 549 /*
440 * Look up the guest pte for the faulting address. 550 * Look up the guest pte for the faulting address.
441 */ 551 */
442 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault, 552 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
443 fetch_fault);
444 553
445 /* 554 /*
446 * The page is not mapped by the guest. Let the guest handle it. 555 * The page is not mapped by the guest. Let the guest handle it.
447 */ 556 */
448 if (!r) { 557 if (!r) {
449 pgprintk("%s: guest page fault\n", __func__); 558 pgprintk("%s: guest page fault\n", __func__);
450 inject_page_fault(vcpu, addr, walker.error_code); 559 inject_page_fault(vcpu);
451 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 560 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
452 return 0; 561 return 0;
453 } 562 }
@@ -468,6 +577,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
468 spin_lock(&vcpu->kvm->mmu_lock); 577 spin_lock(&vcpu->kvm->mmu_lock);
469 if (mmu_notifier_retry(vcpu, mmu_seq)) 578 if (mmu_notifier_retry(vcpu, mmu_seq))
470 goto out_unlock; 579 goto out_unlock;
580
581 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
471 kvm_mmu_free_some_pages(vcpu); 582 kvm_mmu_free_some_pages(vcpu);
472 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 583 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
473 level, &write_pt, pfn); 584 level, &write_pt, pfn);
@@ -479,7 +590,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
479 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 590 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
480 591
481 ++vcpu->stat.pf_fixed; 592 ++vcpu->stat.pf_fixed;
482 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 593 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
483 spin_unlock(&vcpu->kvm->mmu_lock); 594 spin_unlock(&vcpu->kvm->mmu_lock);
484 595
485 return write_pt; 596 return write_pt;
@@ -556,10 +667,25 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
556 gpa_t gpa = UNMAPPED_GVA; 667 gpa_t gpa = UNMAPPED_GVA;
557 int r; 668 int r;
558 669
559 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 670 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
560 !!(access & PFERR_WRITE_MASK), 671
561 !!(access & PFERR_USER_MASK), 672 if (r) {
562 !!(access & PFERR_FETCH_MASK)); 673 gpa = gfn_to_gpa(walker.gfn);
674 gpa |= vaddr & ~PAGE_MASK;
675 } else if (error)
676 *error = walker.error_code;
677
678 return gpa;
679}
680
681static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
682 u32 access, u32 *error)
683{
684 struct guest_walker walker;
685 gpa_t gpa = UNMAPPED_GVA;
686 int r;
687
688 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
563 689
564 if (r) { 690 if (r) {
565 gpa = gfn_to_gpa(walker.gfn); 691 gpa = gfn_to_gpa(walker.gfn);
@@ -638,7 +764,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
638 return -EINVAL; 764 return -EINVAL;
639 765
640 gfn = gpte_to_gfn(gpte); 766 gfn = gpte_to_gfn(gpte);
641 if (is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL) 767 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
642 || gfn != sp->gfns[i] || !is_present_gpte(gpte) 768 || gfn != sp->gfns[i] || !is_present_gpte(gpte)
643 || !(gpte & PT_ACCESSED_MASK)) { 769 || !(gpte & PT_ACCESSED_MASK)) {
644 u64 nonpresent; 770 u64 nonpresent;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8a3f9f64f86f..82e144a4e514 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4,7 +4,7 @@
4 * AMD SVM support 4 * AMD SVM support
5 * 5 *
6 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates. 7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * 8 *
9 * Authors: 9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com> 10 * Yaniv Kamay <yaniv@qumranet.com>
@@ -88,6 +88,14 @@ struct nested_state {
88 /* A VMEXIT is required but not yet emulated */ 88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required; 89 bool exit_required;
90 90
91 /*
92 * If we vmexit during an instruction emulation we need this to restore
93 * the l1 guest rip after the emulation
94 */
95 unsigned long vmexit_rip;
96 unsigned long vmexit_rsp;
97 unsigned long vmexit_rax;
98
91 /* cache for intercepts of the guest */ 99 /* cache for intercepts of the guest */
92 u16 intercept_cr_read; 100 u16 intercept_cr_read;
93 u16 intercept_cr_write; 101 u16 intercept_cr_write;
@@ -96,6 +104,8 @@ struct nested_state {
96 u32 intercept_exceptions; 104 u32 intercept_exceptions;
97 u64 intercept; 105 u64 intercept;
98 106
107 /* Nested Paging related state */
108 u64 nested_cr3;
99}; 109};
100 110
101#define MSRPM_OFFSETS 16 111#define MSRPM_OFFSETS 16
@@ -284,6 +294,15 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
284 force_new_asid(vcpu); 294 force_new_asid(vcpu);
285} 295}
286 296
297static int get_npt_level(void)
298{
299#ifdef CONFIG_X86_64
300 return PT64_ROOT_LEVEL;
301#else
302 return PT32E_ROOT_LEVEL;
303#endif
304}
305
287static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 306static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
288{ 307{
289 vcpu->arch.efer = efer; 308 vcpu->arch.efer = efer;
@@ -701,6 +720,29 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
701 seg->base = 0; 720 seg->base = 0;
702} 721}
703 722
723static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
724{
725 struct vcpu_svm *svm = to_svm(vcpu);
726 u64 g_tsc_offset = 0;
727
728 if (is_nested(svm)) {
729 g_tsc_offset = svm->vmcb->control.tsc_offset -
730 svm->nested.hsave->control.tsc_offset;
731 svm->nested.hsave->control.tsc_offset = offset;
732 }
733
734 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
735}
736
737static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
738{
739 struct vcpu_svm *svm = to_svm(vcpu);
740
741 svm->vmcb->control.tsc_offset += adjustment;
742 if (is_nested(svm))
743 svm->nested.hsave->control.tsc_offset += adjustment;
744}
745
704static void init_vmcb(struct vcpu_svm *svm) 746static void init_vmcb(struct vcpu_svm *svm)
705{ 747{
706 struct vmcb_control_area *control = &svm->vmcb->control; 748 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -793,7 +835,7 @@ static void init_vmcb(struct vcpu_svm *svm)
793 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 835 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
794 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 836 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
795 837
796 save->efer = EFER_SVME; 838 svm_set_efer(&svm->vcpu, 0);
797 save->dr6 = 0xffff0ff0; 839 save->dr6 = 0xffff0ff0;
798 save->dr7 = 0x400; 840 save->dr7 = 0x400;
799 save->rflags = 2; 841 save->rflags = 2;
@@ -804,8 +846,8 @@ static void init_vmcb(struct vcpu_svm *svm)
804 * This is the guest-visible cr0 value. 846 * This is the guest-visible cr0 value.
805 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 847 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
806 */ 848 */
807 svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 849 svm->vcpu.arch.cr0 = 0;
808 (void)kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0); 850 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
809 851
810 save->cr4 = X86_CR4_PAE; 852 save->cr4 = X86_CR4_PAE;
811 /* rdx = ?? */ 853 /* rdx = ?? */
@@ -901,7 +943,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
901 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 943 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
902 svm->asid_generation = 0; 944 svm->asid_generation = 0;
903 init_vmcb(svm); 945 init_vmcb(svm);
904 svm->vmcb->control.tsc_offset = 0-native_read_tsc(); 946 kvm_write_tsc(&svm->vcpu, 0);
905 947
906 err = fx_init(&svm->vcpu); 948 err = fx_init(&svm->vcpu);
907 if (err) 949 if (err)
@@ -947,20 +989,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
947 int i; 989 int i;
948 990
949 if (unlikely(cpu != vcpu->cpu)) { 991 if (unlikely(cpu != vcpu->cpu)) {
950 u64 delta;
951
952 if (check_tsc_unstable()) {
953 /*
954 * Make sure that the guest sees a monotonically
955 * increasing TSC.
956 */
957 delta = vcpu->arch.host_tsc - native_read_tsc();
958 svm->vmcb->control.tsc_offset += delta;
959 if (is_nested(svm))
960 svm->nested.hsave->control.tsc_offset += delta;
961 }
962 vcpu->cpu = cpu;
963 kvm_migrate_timers(vcpu);
964 svm->asid_generation = 0; 992 svm->asid_generation = 0;
965 } 993 }
966 994
@@ -976,8 +1004,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
976 ++vcpu->stat.host_state_reload; 1004 ++vcpu->stat.host_state_reload;
977 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 1005 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
978 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1006 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
979
980 vcpu->arch.host_tsc = native_read_tsc();
981} 1007}
982 1008
983static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 1009static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -995,7 +1021,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
995 switch (reg) { 1021 switch (reg) {
996 case VCPU_EXREG_PDPTR: 1022 case VCPU_EXREG_PDPTR:
997 BUG_ON(!npt_enabled); 1023 BUG_ON(!npt_enabled);
998 load_pdptrs(vcpu, vcpu->arch.cr3); 1024 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
999 break; 1025 break;
1000 default: 1026 default:
1001 BUG(); 1027 BUG();
@@ -1206,8 +1232,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1206 if (old == new) { 1232 if (old == new) {
1207 /* cr0 write with ts and mp unchanged */ 1233 /* cr0 write with ts and mp unchanged */
1208 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 1234 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1209 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) 1235 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1236 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1237 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1238 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
1210 return; 1239 return;
1240 }
1211 } 1241 }
1212 } 1242 }
1213 1243
@@ -1581,6 +1611,54 @@ static int vmmcall_interception(struct vcpu_svm *svm)
1581 return 1; 1611 return 1;
1582} 1612}
1583 1613
1614static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1615{
1616 struct vcpu_svm *svm = to_svm(vcpu);
1617
1618 return svm->nested.nested_cr3;
1619}
1620
1621static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1622 unsigned long root)
1623{
1624 struct vcpu_svm *svm = to_svm(vcpu);
1625
1626 svm->vmcb->control.nested_cr3 = root;
1627 force_new_asid(vcpu);
1628}
1629
1630static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
1631{
1632 struct vcpu_svm *svm = to_svm(vcpu);
1633
1634 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1635 svm->vmcb->control.exit_code_hi = 0;
1636 svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code;
1637 svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address;
1638
1639 nested_svm_vmexit(svm);
1640}
1641
1642static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1643{
1644 int r;
1645
1646 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1647
1648 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1649 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1650 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1651 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1652 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1653
1654 return r;
1655}
1656
1657static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1658{
1659 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1660}
1661
1584static int nested_svm_check_permissions(struct vcpu_svm *svm) 1662static int nested_svm_check_permissions(struct vcpu_svm *svm)
1585{ 1663{
1586 if (!(svm->vcpu.arch.efer & EFER_SVME) 1664 if (!(svm->vcpu.arch.efer & EFER_SVME)
@@ -1629,6 +1707,14 @@ static inline bool nested_svm_intr(struct vcpu_svm *svm)
1629 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) 1707 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1630 return false; 1708 return false;
1631 1709
1710 /*
1711 * if vmexit was already requested (by intercepted exception
1712 * for instance) do not overwrite it with "external interrupt"
1713 * vmexit.
1714 */
1715 if (svm->nested.exit_required)
1716 return false;
1717
1632 svm->vmcb->control.exit_code = SVM_EXIT_INTR; 1718 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1633 svm->vmcb->control.exit_info_1 = 0; 1719 svm->vmcb->control.exit_info_1 = 0;
1634 svm->vmcb->control.exit_info_2 = 0; 1720 svm->vmcb->control.exit_info_2 = 0;
@@ -1896,6 +1982,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1896 nested_vmcb->save.ds = vmcb->save.ds; 1982 nested_vmcb->save.ds = vmcb->save.ds;
1897 nested_vmcb->save.gdtr = vmcb->save.gdtr; 1983 nested_vmcb->save.gdtr = vmcb->save.gdtr;
1898 nested_vmcb->save.idtr = vmcb->save.idtr; 1984 nested_vmcb->save.idtr = vmcb->save.idtr;
1985 nested_vmcb->save.efer = svm->vcpu.arch.efer;
1899 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); 1986 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
1900 nested_vmcb->save.cr3 = svm->vcpu.arch.cr3; 1987 nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
1901 nested_vmcb->save.cr2 = vmcb->save.cr2; 1988 nested_vmcb->save.cr2 = vmcb->save.cr2;
@@ -1917,6 +2004,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1917 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; 2004 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
1918 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; 2005 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
1919 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; 2006 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2007 nested_vmcb->control.next_rip = vmcb->control.next_rip;
1920 2008
1921 /* 2009 /*
1922 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have 2010 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
@@ -1947,6 +2035,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1947 kvm_clear_exception_queue(&svm->vcpu); 2035 kvm_clear_exception_queue(&svm->vcpu);
1948 kvm_clear_interrupt_queue(&svm->vcpu); 2036 kvm_clear_interrupt_queue(&svm->vcpu);
1949 2037
2038 svm->nested.nested_cr3 = 0;
2039
1950 /* Restore selected save entries */ 2040 /* Restore selected save entries */
1951 svm->vmcb->save.es = hsave->save.es; 2041 svm->vmcb->save.es = hsave->save.es;
1952 svm->vmcb->save.cs = hsave->save.cs; 2042 svm->vmcb->save.cs = hsave->save.cs;
@@ -1973,6 +2063,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1973 2063
1974 nested_svm_unmap(page); 2064 nested_svm_unmap(page);
1975 2065
2066 nested_svm_uninit_mmu_context(&svm->vcpu);
1976 kvm_mmu_reset_context(&svm->vcpu); 2067 kvm_mmu_reset_context(&svm->vcpu);
1977 kvm_mmu_load(&svm->vcpu); 2068 kvm_mmu_load(&svm->vcpu);
1978 2069
@@ -2012,6 +2103,20 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2012 return true; 2103 return true;
2013} 2104}
2014 2105
2106static bool nested_vmcb_checks(struct vmcb *vmcb)
2107{
2108 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2109 return false;
2110
2111 if (vmcb->control.asid == 0)
2112 return false;
2113
2114 if (vmcb->control.nested_ctl && !npt_enabled)
2115 return false;
2116
2117 return true;
2118}
2119
2015static bool nested_svm_vmrun(struct vcpu_svm *svm) 2120static bool nested_svm_vmrun(struct vcpu_svm *svm)
2016{ 2121{
2017 struct vmcb *nested_vmcb; 2122 struct vmcb *nested_vmcb;
@@ -2026,7 +2131,18 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2026 if (!nested_vmcb) 2131 if (!nested_vmcb)
2027 return false; 2132 return false;
2028 2133
2029 trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa, 2134 if (!nested_vmcb_checks(nested_vmcb)) {
2135 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2136 nested_vmcb->control.exit_code_hi = 0;
2137 nested_vmcb->control.exit_info_1 = 0;
2138 nested_vmcb->control.exit_info_2 = 0;
2139
2140 nested_svm_unmap(page);
2141
2142 return false;
2143 }
2144
2145 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2030 nested_vmcb->save.rip, 2146 nested_vmcb->save.rip,
2031 nested_vmcb->control.int_ctl, 2147 nested_vmcb->control.int_ctl,
2032 nested_vmcb->control.event_inj, 2148 nested_vmcb->control.event_inj,
@@ -2055,7 +2171,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2055 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 2171 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2056 hsave->save.cr4 = svm->vcpu.arch.cr4; 2172 hsave->save.cr4 = svm->vcpu.arch.cr4;
2057 hsave->save.rflags = vmcb->save.rflags; 2173 hsave->save.rflags = vmcb->save.rflags;
2058 hsave->save.rip = svm->next_rip; 2174 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2059 hsave->save.rsp = vmcb->save.rsp; 2175 hsave->save.rsp = vmcb->save.rsp;
2060 hsave->save.rax = vmcb->save.rax; 2176 hsave->save.rax = vmcb->save.rax;
2061 if (npt_enabled) 2177 if (npt_enabled)
@@ -2070,6 +2186,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2070 else 2186 else
2071 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; 2187 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2072 2188
2189 if (nested_vmcb->control.nested_ctl) {
2190 kvm_mmu_unload(&svm->vcpu);
2191 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2192 nested_svm_init_mmu_context(&svm->vcpu);
2193 }
2194
2073 /* Load the nested guest state */ 2195 /* Load the nested guest state */
2074 svm->vmcb->save.es = nested_vmcb->save.es; 2196 svm->vmcb->save.es = nested_vmcb->save.es;
2075 svm->vmcb->save.cs = nested_vmcb->save.cs; 2197 svm->vmcb->save.cs = nested_vmcb->save.cs;
@@ -2227,8 +2349,8 @@ static int vmrun_interception(struct vcpu_svm *svm)
2227 if (nested_svm_check_permissions(svm)) 2349 if (nested_svm_check_permissions(svm))
2228 return 1; 2350 return 1;
2229 2351
2230 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 2352 /* Save rip after vmrun instruction */
2231 skip_emulated_instruction(&svm->vcpu); 2353 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
2232 2354
2233 if (!nested_svm_vmrun(svm)) 2355 if (!nested_svm_vmrun(svm))
2234 return 1; 2356 return 1;
@@ -2257,6 +2379,7 @@ static int stgi_interception(struct vcpu_svm *svm)
2257 2379
2258 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 2380 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2259 skip_emulated_instruction(&svm->vcpu); 2381 skip_emulated_instruction(&svm->vcpu);
2382 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2260 2383
2261 enable_gif(svm); 2384 enable_gif(svm);
2262 2385
@@ -2399,6 +2522,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2399 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; 2522 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2400} 2523}
2401 2524
2525static int cr0_write_interception(struct vcpu_svm *svm)
2526{
2527 struct kvm_vcpu *vcpu = &svm->vcpu;
2528 int r;
2529
2530 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2531
2532 if (svm->nested.vmexit_rip) {
2533 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2534 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2535 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2536 svm->nested.vmexit_rip = 0;
2537 }
2538
2539 return r == EMULATE_DONE;
2540}
2541
2402static int cr8_write_interception(struct vcpu_svm *svm) 2542static int cr8_write_interception(struct vcpu_svm *svm)
2403{ 2543{
2404 struct kvm_run *kvm_run = svm->vcpu.run; 2544 struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2542,20 +2682,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2542 struct vcpu_svm *svm = to_svm(vcpu); 2682 struct vcpu_svm *svm = to_svm(vcpu);
2543 2683
2544 switch (ecx) { 2684 switch (ecx) {
2545 case MSR_IA32_TSC: { 2685 case MSR_IA32_TSC:
2546 u64 tsc_offset = data - native_read_tsc(); 2686 kvm_write_tsc(vcpu, data);
2547 u64 g_tsc_offset = 0;
2548
2549 if (is_nested(svm)) {
2550 g_tsc_offset = svm->vmcb->control.tsc_offset -
2551 svm->nested.hsave->control.tsc_offset;
2552 svm->nested.hsave->control.tsc_offset = tsc_offset;
2553 }
2554
2555 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2556
2557 break; 2687 break;
2558 }
2559 case MSR_STAR: 2688 case MSR_STAR:
2560 svm->vmcb->save.star = data; 2689 svm->vmcb->save.star = data;
2561 break; 2690 break;
@@ -2643,6 +2772,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
2643{ 2772{
2644 struct kvm_run *kvm_run = svm->vcpu.run; 2773 struct kvm_run *kvm_run = svm->vcpu.run;
2645 2774
2775 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2646 svm_clear_vintr(svm); 2776 svm_clear_vintr(svm);
2647 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2777 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2648 /* 2778 /*
@@ -2672,7 +2802,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2672 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2802 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2673 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2803 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2674 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2804 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2675 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2805 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2676 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2806 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2677 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2807 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2678 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2808 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
@@ -2871,7 +3001,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2871 3001
2872 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 3002 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2873 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && 3003 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2874 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH) 3004 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3005 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
2875 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 3006 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2876 "exit_code 0x%x\n", 3007 "exit_code 0x%x\n",
2877 __func__, svm->vmcb->control.exit_int_info, 3008 __func__, svm->vmcb->control.exit_int_info,
@@ -3088,8 +3219,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3088 3219
3089 svm->int3_injected = 0; 3220 svm->int3_injected = 0;
3090 3221
3091 if (svm->vcpu.arch.hflags & HF_IRET_MASK) 3222 if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
3092 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); 3223 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3224 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3225 }
3093 3226
3094 svm->vcpu.arch.nmi_injected = false; 3227 svm->vcpu.arch.nmi_injected = false;
3095 kvm_clear_exception_queue(&svm->vcpu); 3228 kvm_clear_exception_queue(&svm->vcpu);
@@ -3098,6 +3231,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3098 if (!(exitintinfo & SVM_EXITINTINFO_VALID)) 3231 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3099 return; 3232 return;
3100 3233
3234 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3235
3101 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; 3236 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3102 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; 3237 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3103 3238
@@ -3134,6 +3269,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
3134 } 3269 }
3135} 3270}
3136 3271
3272static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3273{
3274 struct vcpu_svm *svm = to_svm(vcpu);
3275 struct vmcb_control_area *control = &svm->vmcb->control;
3276
3277 control->exit_int_info = control->event_inj;
3278 control->exit_int_info_err = control->event_inj_err;
3279 control->event_inj = 0;
3280 svm_complete_interrupts(svm);
3281}
3282
3137#ifdef CONFIG_X86_64 3283#ifdef CONFIG_X86_64
3138#define R "r" 3284#define R "r"
3139#else 3285#else
@@ -3167,9 +3313,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3167 savesegment(gs, gs_selector); 3313 savesegment(gs, gs_selector);
3168 ldt_selector = kvm_read_ldt(); 3314 ldt_selector = kvm_read_ldt();
3169 svm->vmcb->save.cr2 = vcpu->arch.cr2; 3315 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3170 /* required for live migration with NPT */
3171 if (npt_enabled)
3172 svm->vmcb->save.cr3 = vcpu->arch.cr3;
3173 3316
3174 clgi(); 3317 clgi();
3175 3318
@@ -3291,16 +3434,22 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3291{ 3434{
3292 struct vcpu_svm *svm = to_svm(vcpu); 3435 struct vcpu_svm *svm = to_svm(vcpu);
3293 3436
3294 if (npt_enabled) {
3295 svm->vmcb->control.nested_cr3 = root;
3296 force_new_asid(vcpu);
3297 return;
3298 }
3299
3300 svm->vmcb->save.cr3 = root; 3437 svm->vmcb->save.cr3 = root;
3301 force_new_asid(vcpu); 3438 force_new_asid(vcpu);
3302} 3439}
3303 3440
3441static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3442{
3443 struct vcpu_svm *svm = to_svm(vcpu);
3444
3445 svm->vmcb->control.nested_cr3 = root;
3446
3447 /* Also sync guest cr3 here in case we live migrate */
3448 svm->vmcb->save.cr3 = vcpu->arch.cr3;
3449
3450 force_new_asid(vcpu);
3451}
3452
3304static int is_disabled(void) 3453static int is_disabled(void)
3305{ 3454{
3306 u64 vm_cr; 3455 u64 vm_cr;
@@ -3333,15 +3482,6 @@ static bool svm_cpu_has_accelerated_tpr(void)
3333 return false; 3482 return false;
3334} 3483}
3335 3484
3336static int get_npt_level(void)
3337{
3338#ifdef CONFIG_X86_64
3339 return PT64_ROOT_LEVEL;
3340#else
3341 return PT32E_ROOT_LEVEL;
3342#endif
3343}
3344
3345static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 3485static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3346{ 3486{
3347 return 0; 3487 return 0;
@@ -3354,12 +3494,25 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3354static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3494static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3355{ 3495{
3356 switch (func) { 3496 switch (func) {
3497 case 0x80000001:
3498 if (nested)
3499 entry->ecx |= (1 << 2); /* Set SVM bit */
3500 break;
3357 case 0x8000000A: 3501 case 0x8000000A:
3358 entry->eax = 1; /* SVM revision 1 */ 3502 entry->eax = 1; /* SVM revision 1 */
3359 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 3503 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
3360 ASID emulation to nested SVM */ 3504 ASID emulation to nested SVM */
3361 entry->ecx = 0; /* Reserved */ 3505 entry->ecx = 0; /* Reserved */
3362 entry->edx = 0; /* Do not support any additional features */ 3506 entry->edx = 0; /* Per default do not support any
3507 additional features */
3508
3509 /* Support next_rip if host supports it */
3510 if (svm_has(SVM_FEATURE_NRIP))
3511 entry->edx |= SVM_FEATURE_NRIP;
3512
3513 /* Support NPT for the guest if enabled */
3514 if (npt_enabled)
3515 entry->edx |= SVM_FEATURE_NPT;
3363 3516
3364 break; 3517 break;
3365 } 3518 }
@@ -3497,6 +3650,7 @@ static struct kvm_x86_ops svm_x86_ops = {
3497 .set_irq = svm_set_irq, 3650 .set_irq = svm_set_irq,
3498 .set_nmi = svm_inject_nmi, 3651 .set_nmi = svm_inject_nmi,
3499 .queue_exception = svm_queue_exception, 3652 .queue_exception = svm_queue_exception,
3653 .cancel_injection = svm_cancel_injection,
3500 .interrupt_allowed = svm_interrupt_allowed, 3654 .interrupt_allowed = svm_interrupt_allowed,
3501 .nmi_allowed = svm_nmi_allowed, 3655 .nmi_allowed = svm_nmi_allowed,
3502 .get_nmi_mask = svm_get_nmi_mask, 3656 .get_nmi_mask = svm_get_nmi_mask,
@@ -3519,6 +3673,11 @@ static struct kvm_x86_ops svm_x86_ops = {
3519 .set_supported_cpuid = svm_set_supported_cpuid, 3673 .set_supported_cpuid = svm_set_supported_cpuid,
3520 3674
3521 .has_wbinvd_exit = svm_has_wbinvd_exit, 3675 .has_wbinvd_exit = svm_has_wbinvd_exit,
3676
3677 .write_tsc_offset = svm_write_tsc_offset,
3678 .adjust_tsc_offset = svm_adjust_tsc_offset,
3679
3680 .set_tdp_cr3 = set_tdp_cr3,
3522}; 3681};
3523 3682
3524static int __init svm_init(void) 3683static int __init svm_init(void)
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index e16a0dbe74d8..fc7a101c4a35 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * timer support 7 * timer support
8 * 8 *
9 * Copyright 2010 Red Hat, Inc. and/or its affilates. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * 10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory. 12 * the COPYING file in the top-level directory.
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7bddfab12013..8da0e45ff7c9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5,7 +5,7 @@
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affilates. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Authors: 10 * Authors:
11 * Avi Kivity <avi@qumranet.com> 11 * Avi Kivity <avi@qumranet.com>
@@ -125,6 +125,7 @@ struct vcpu_vmx {
125 unsigned long host_rsp; 125 unsigned long host_rsp;
126 int launched; 126 int launched;
127 u8 fail; 127 u8 fail;
128 u32 exit_intr_info;
128 u32 idt_vectoring_info; 129 u32 idt_vectoring_info;
129 struct shared_msr_entry *guest_msrs; 130 struct shared_msr_entry *guest_msrs;
130 int nmsrs; 131 int nmsrs;
@@ -154,11 +155,6 @@ struct vcpu_vmx {
154 u32 limit; 155 u32 limit;
155 u32 ar; 156 u32 ar;
156 } tr, es, ds, fs, gs; 157 } tr, es, ds, fs, gs;
157 struct {
158 bool pending;
159 u8 vector;
160 unsigned rip;
161 } irq;
162 } rmode; 158 } rmode;
163 int vpid; 159 int vpid;
164 bool emulation_required; 160 bool emulation_required;
@@ -505,7 +501,6 @@ static void __vcpu_clear(void *arg)
505 vmcs_clear(vmx->vmcs); 501 vmcs_clear(vmx->vmcs);
506 if (per_cpu(current_vmcs, cpu) == vmx->vmcs) 502 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
507 per_cpu(current_vmcs, cpu) = NULL; 503 per_cpu(current_vmcs, cpu) = NULL;
508 rdtscll(vmx->vcpu.arch.host_tsc);
509 list_del(&vmx->local_vcpus_link); 504 list_del(&vmx->local_vcpus_link);
510 vmx->vcpu.cpu = -1; 505 vmx->vcpu.cpu = -1;
511 vmx->launched = 0; 506 vmx->launched = 0;
@@ -706,11 +701,10 @@ static void reload_tss(void)
706 /* 701 /*
707 * VT restores TR but not its size. Useless. 702 * VT restores TR but not its size. Useless.
708 */ 703 */
709 struct desc_ptr gdt; 704 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
710 struct desc_struct *descs; 705 struct desc_struct *descs;
711 706
712 native_store_gdt(&gdt); 707 descs = (void *)gdt->address;
713 descs = (void *)gdt.address;
714 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ 708 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
715 load_TR_desc(); 709 load_TR_desc();
716} 710}
@@ -753,7 +747,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
753 747
754static unsigned long segment_base(u16 selector) 748static unsigned long segment_base(u16 selector)
755{ 749{
756 struct desc_ptr gdt; 750 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
757 struct desc_struct *d; 751 struct desc_struct *d;
758 unsigned long table_base; 752 unsigned long table_base;
759 unsigned long v; 753 unsigned long v;
@@ -761,8 +755,7 @@ static unsigned long segment_base(u16 selector)
761 if (!(selector & ~3)) 755 if (!(selector & ~3))
762 return 0; 756 return 0;
763 757
764 native_store_gdt(&gdt); 758 table_base = gdt->address;
765 table_base = gdt.address;
766 759
767 if (selector & 4) { /* from ldt */ 760 if (selector & 4) { /* from ldt */
768 u16 ldt_selector = kvm_read_ldt(); 761 u16 ldt_selector = kvm_read_ldt();
@@ -883,7 +876,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
883static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 876static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
884{ 877{
885 struct vcpu_vmx *vmx = to_vmx(vcpu); 878 struct vcpu_vmx *vmx = to_vmx(vcpu);
886 u64 tsc_this, delta, new_offset;
887 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 879 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
888 880
889 if (!vmm_exclusive) 881 if (!vmm_exclusive)
@@ -897,37 +889,24 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
897 } 889 }
898 890
899 if (vcpu->cpu != cpu) { 891 if (vcpu->cpu != cpu) {
900 struct desc_ptr dt; 892 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
901 unsigned long sysenter_esp; 893 unsigned long sysenter_esp;
902 894
903 kvm_migrate_timers(vcpu);
904 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 895 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
905 local_irq_disable(); 896 local_irq_disable();
906 list_add(&vmx->local_vcpus_link, 897 list_add(&vmx->local_vcpus_link,
907 &per_cpu(vcpus_on_cpu, cpu)); 898 &per_cpu(vcpus_on_cpu, cpu));
908 local_irq_enable(); 899 local_irq_enable();
909 900
910 vcpu->cpu = cpu;
911 /* 901 /*
912 * Linux uses per-cpu TSS and GDT, so set these when switching 902 * Linux uses per-cpu TSS and GDT, so set these when switching
913 * processors. 903 * processors.
914 */ 904 */
915 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ 905 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
916 native_store_gdt(&dt); 906 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
917 vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */
918 907
919 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 908 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
920 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 909 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
921
922 /*
923 * Make sure the time stamp counter is monotonous.
924 */
925 rdtscll(tsc_this);
926 if (tsc_this < vcpu->arch.host_tsc) {
927 delta = vcpu->arch.host_tsc - tsc_this;
928 new_offset = vmcs_read64(TSC_OFFSET) + delta;
929 vmcs_write64(TSC_OFFSET, new_offset);
930 }
931 } 910 }
932} 911}
933 912
@@ -1044,16 +1023,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1044 } 1023 }
1045 1024
1046 if (vmx->rmode.vm86_active) { 1025 if (vmx->rmode.vm86_active) {
1047 vmx->rmode.irq.pending = true; 1026 if (kvm_inject_realmode_interrupt(vcpu, nr) != EMULATE_DONE)
1048 vmx->rmode.irq.vector = nr; 1027 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1049 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
1050 if (kvm_exception_is_soft(nr))
1051 vmx->rmode.irq.rip +=
1052 vmx->vcpu.arch.event_exit_inst_len;
1053 intr_info |= INTR_TYPE_SOFT_INTR;
1054 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1055 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
1056 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
1057 return; 1028 return;
1058 } 1029 }
1059 1030
@@ -1149,12 +1120,17 @@ static u64 guest_read_tsc(void)
1149} 1120}
1150 1121
1151/* 1122/*
1152 * writes 'guest_tsc' into guest's timestamp counter "register" 1123 * writes 'offset' into guest's timestamp counter offset register
1153 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
1154 */ 1124 */
1155static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) 1125static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1126{
1127 vmcs_write64(TSC_OFFSET, offset);
1128}
1129
1130static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1156{ 1131{
1157 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); 1132 u64 offset = vmcs_read64(TSC_OFFSET);
1133 vmcs_write64(TSC_OFFSET, offset + adjustment);
1158} 1134}
1159 1135
1160/* 1136/*
@@ -1227,7 +1203,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1227{ 1203{
1228 struct vcpu_vmx *vmx = to_vmx(vcpu); 1204 struct vcpu_vmx *vmx = to_vmx(vcpu);
1229 struct shared_msr_entry *msr; 1205 struct shared_msr_entry *msr;
1230 u64 host_tsc;
1231 int ret = 0; 1206 int ret = 0;
1232 1207
1233 switch (msr_index) { 1208 switch (msr_index) {
@@ -1257,8 +1232,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1257 vmcs_writel(GUEST_SYSENTER_ESP, data); 1232 vmcs_writel(GUEST_SYSENTER_ESP, data);
1258 break; 1233 break;
1259 case MSR_IA32_TSC: 1234 case MSR_IA32_TSC:
1260 rdtscll(host_tsc); 1235 kvm_write_tsc(vcpu, data);
1261 guest_write_tsc(data, host_tsc);
1262 break; 1236 break;
1263 case MSR_IA32_CR_PAT: 1237 case MSR_IA32_CR_PAT:
1264 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1238 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -1856,20 +1830,20 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1856 return; 1830 return;
1857 1831
1858 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 1832 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1859 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); 1833 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
1860 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); 1834 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
1861 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); 1835 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
1862 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); 1836 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
1863 } 1837 }
1864} 1838}
1865 1839
1866static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 1840static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1867{ 1841{
1868 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 1842 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1869 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 1843 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1870 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 1844 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1871 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 1845 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1872 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 1846 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1873 } 1847 }
1874 1848
1875 __set_bit(VCPU_EXREG_PDPTR, 1849 __set_bit(VCPU_EXREG_PDPTR,
@@ -2515,7 +2489,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2515{ 2489{
2516 u32 host_sysenter_cs, msr_low, msr_high; 2490 u32 host_sysenter_cs, msr_low, msr_high;
2517 u32 junk; 2491 u32 junk;
2518 u64 host_pat, tsc_this, tsc_base; 2492 u64 host_pat;
2519 unsigned long a; 2493 unsigned long a;
2520 struct desc_ptr dt; 2494 struct desc_ptr dt;
2521 int i; 2495 int i;
@@ -2656,12 +2630,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2656 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 2630 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2657 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 2631 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
2658 2632
2659 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; 2633 kvm_write_tsc(&vmx->vcpu, 0);
2660 rdtscll(tsc_this);
2661 if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2662 tsc_base = tsc_this;
2663
2664 guest_write_tsc(0, tsc_base);
2665 2634
2666 return 0; 2635 return 0;
2667} 2636}
@@ -2834,16 +2803,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
2834 2803
2835 ++vcpu->stat.irq_injections; 2804 ++vcpu->stat.irq_injections;
2836 if (vmx->rmode.vm86_active) { 2805 if (vmx->rmode.vm86_active) {
2837 vmx->rmode.irq.pending = true; 2806 if (kvm_inject_realmode_interrupt(vcpu, irq) != EMULATE_DONE)
2838 vmx->rmode.irq.vector = irq; 2807 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2839 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2840 if (vcpu->arch.interrupt.soft)
2841 vmx->rmode.irq.rip +=
2842 vmx->vcpu.arch.event_exit_inst_len;
2843 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2844 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2845 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2846 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2847 return; 2808 return;
2848 } 2809 }
2849 intr = irq | INTR_INFO_VALID_MASK; 2810 intr = irq | INTR_INFO_VALID_MASK;
@@ -2875,14 +2836,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2875 2836
2876 ++vcpu->stat.nmi_injections; 2837 ++vcpu->stat.nmi_injections;
2877 if (vmx->rmode.vm86_active) { 2838 if (vmx->rmode.vm86_active) {
2878 vmx->rmode.irq.pending = true; 2839 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR) != EMULATE_DONE)
2879 vmx->rmode.irq.vector = NMI_VECTOR; 2840 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2880 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2881 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2882 NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2883 INTR_INFO_VALID_MASK);
2884 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2885 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2886 return; 2841 return;
2887 } 2842 }
2888 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2843 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -3346,6 +3301,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
3346 3301
3347static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 3302static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
3348{ 3303{
3304 kvm_make_request(KVM_REQ_EVENT, vcpu);
3349 return 1; 3305 return 1;
3350} 3306}
3351 3307
@@ -3358,6 +3314,8 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
3358 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 3314 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3359 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 3315 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3360 3316
3317 kvm_make_request(KVM_REQ_EVENT, vcpu);
3318
3361 ++vcpu->stat.irq_window_exits; 3319 ++vcpu->stat.irq_window_exits;
3362 3320
3363 /* 3321 /*
@@ -3614,6 +3572,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu)
3614 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; 3572 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3615 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 3573 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3616 ++vcpu->stat.nmi_window_exits; 3574 ++vcpu->stat.nmi_window_exits;
3575 kvm_make_request(KVM_REQ_EVENT, vcpu);
3617 3576
3618 return 1; 3577 return 1;
3619} 3578}
@@ -3623,8 +3582,17 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
3623 struct vcpu_vmx *vmx = to_vmx(vcpu); 3582 struct vcpu_vmx *vmx = to_vmx(vcpu);
3624 enum emulation_result err = EMULATE_DONE; 3583 enum emulation_result err = EMULATE_DONE;
3625 int ret = 1; 3584 int ret = 1;
3585 u32 cpu_exec_ctrl;
3586 bool intr_window_requested;
3587
3588 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3589 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
3626 3590
3627 while (!guest_state_valid(vcpu)) { 3591 while (!guest_state_valid(vcpu)) {
3592 if (intr_window_requested
3593 && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
3594 return handle_interrupt_window(&vmx->vcpu);
3595
3628 err = emulate_instruction(vcpu, 0, 0, 0); 3596 err = emulate_instruction(vcpu, 0, 0, 0);
3629 3597
3630 if (err == EMULATE_DO_MMIO) { 3598 if (err == EMULATE_DO_MMIO) {
@@ -3790,18 +3758,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3790 vmcs_write32(TPR_THRESHOLD, irr); 3758 vmcs_write32(TPR_THRESHOLD, irr);
3791} 3759}
3792 3760
3793static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 3761static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
3794{ 3762{
3795 u32 exit_intr_info; 3763 u32 exit_intr_info = vmx->exit_intr_info;
3796 u32 idt_vectoring_info = vmx->idt_vectoring_info;
3797 bool unblock_nmi;
3798 u8 vector;
3799 int type;
3800 bool idtv_info_valid;
3801
3802 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3803
3804 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3805 3764
3806 /* Handle machine checks before interrupts are enabled */ 3765 /* Handle machine checks before interrupts are enabled */
3807 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) 3766 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
@@ -3816,8 +3775,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3816 asm("int $2"); 3775 asm("int $2");
3817 kvm_after_handle_nmi(&vmx->vcpu); 3776 kvm_after_handle_nmi(&vmx->vcpu);
3818 } 3777 }
3778}
3819 3779
3820 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 3780static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
3781{
3782 u32 exit_intr_info = vmx->exit_intr_info;
3783 bool unblock_nmi;
3784 u8 vector;
3785 bool idtv_info_valid;
3786
3787 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3821 3788
3822 if (cpu_has_virtual_nmis()) { 3789 if (cpu_has_virtual_nmis()) {
3823 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 3790 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
@@ -3839,6 +3806,18 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3839 } else if (unlikely(vmx->soft_vnmi_blocked)) 3806 } else if (unlikely(vmx->soft_vnmi_blocked))
3840 vmx->vnmi_blocked_time += 3807 vmx->vnmi_blocked_time +=
3841 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); 3808 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
3809}
3810
3811static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
3812 u32 idt_vectoring_info,
3813 int instr_len_field,
3814 int error_code_field)
3815{
3816 u8 vector;
3817 int type;
3818 bool idtv_info_valid;
3819
3820 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3842 3821
3843 vmx->vcpu.arch.nmi_injected = false; 3822 vmx->vcpu.arch.nmi_injected = false;
3844 kvm_clear_exception_queue(&vmx->vcpu); 3823 kvm_clear_exception_queue(&vmx->vcpu);
@@ -3847,6 +3826,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3847 if (!idtv_info_valid) 3826 if (!idtv_info_valid)
3848 return; 3827 return;
3849 3828
3829 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
3830
3850 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 3831 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3851 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 3832 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
3852 3833
@@ -3863,18 +3844,18 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3863 break; 3844 break;
3864 case INTR_TYPE_SOFT_EXCEPTION: 3845 case INTR_TYPE_SOFT_EXCEPTION:
3865 vmx->vcpu.arch.event_exit_inst_len = 3846 vmx->vcpu.arch.event_exit_inst_len =
3866 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 3847 vmcs_read32(instr_len_field);
3867 /* fall through */ 3848 /* fall through */
3868 case INTR_TYPE_HARD_EXCEPTION: 3849 case INTR_TYPE_HARD_EXCEPTION:
3869 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 3850 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
3870 u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE); 3851 u32 err = vmcs_read32(error_code_field);
3871 kvm_queue_exception_e(&vmx->vcpu, vector, err); 3852 kvm_queue_exception_e(&vmx->vcpu, vector, err);
3872 } else 3853 } else
3873 kvm_queue_exception(&vmx->vcpu, vector); 3854 kvm_queue_exception(&vmx->vcpu, vector);
3874 break; 3855 break;
3875 case INTR_TYPE_SOFT_INTR: 3856 case INTR_TYPE_SOFT_INTR:
3876 vmx->vcpu.arch.event_exit_inst_len = 3857 vmx->vcpu.arch.event_exit_inst_len =
3877 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 3858 vmcs_read32(instr_len_field);
3878 /* fall through */ 3859 /* fall through */
3879 case INTR_TYPE_EXT_INTR: 3860 case INTR_TYPE_EXT_INTR:
3880 kvm_queue_interrupt(&vmx->vcpu, vector, 3861 kvm_queue_interrupt(&vmx->vcpu, vector,
@@ -3885,27 +3866,21 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3885 } 3866 }
3886} 3867}
3887 3868
3888/* 3869static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3889 * Failure to inject an interrupt should give us the information
3890 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
3891 * when fetching the interrupt redirection bitmap in the real-mode
3892 * tss, this doesn't happen. So we do it ourselves.
3893 */
3894static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3895{ 3870{
3896 vmx->rmode.irq.pending = 0; 3871 __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
3897 if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip) 3872 VM_EXIT_INSTRUCTION_LEN,
3898 return; 3873 IDT_VECTORING_ERROR_CODE);
3899 kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip); 3874}
3900 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { 3875
3901 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; 3876static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
3902 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; 3877{
3903 return; 3878 __vmx_complete_interrupts(to_vmx(vcpu),
3904 } 3879 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
3905 vmx->idt_vectoring_info = 3880 VM_ENTRY_INSTRUCTION_LEN,
3906 VECTORING_INFO_VALID_MASK 3881 VM_ENTRY_EXCEPTION_ERROR_CODE);
3907 | INTR_TYPE_EXT_INTR 3882
3908 | vmx->rmode.irq.vector; 3883 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
3909} 3884}
3910 3885
3911#ifdef CONFIG_X86_64 3886#ifdef CONFIG_X86_64
@@ -4032,7 +4007,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
4032#endif 4007#endif
4033 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) 4008 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
4034 : "cc", "memory" 4009 : "cc", "memory"
4035 , R"bx", R"di", R"si" 4010 , R"ax", R"bx", R"di", R"si"
4036#ifdef CONFIG_X86_64 4011#ifdef CONFIG_X86_64
4037 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 4012 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
4038#endif 4013#endif
@@ -4043,12 +4018,15 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
4043 vcpu->arch.regs_dirty = 0; 4018 vcpu->arch.regs_dirty = 0;
4044 4019
4045 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 4020 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
4046 if (vmx->rmode.irq.pending)
4047 fixup_rmode_irq(vmx);
4048 4021
4049 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 4022 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
4050 vmx->launched = 1; 4023 vmx->launched = 1;
4051 4024
4025 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
4026 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4027
4028 vmx_complete_atomic_exit(vmx);
4029 vmx_recover_nmi_blocking(vmx);
4052 vmx_complete_interrupts(vmx); 4030 vmx_complete_interrupts(vmx);
4053} 4031}
4054 4032
@@ -4119,6 +4097,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
4119 4097
4120 cpu = get_cpu(); 4098 cpu = get_cpu();
4121 vmx_vcpu_load(&vmx->vcpu, cpu); 4099 vmx_vcpu_load(&vmx->vcpu, cpu);
4100 vmx->vcpu.cpu = cpu;
4122 err = vmx_vcpu_setup(vmx); 4101 err = vmx_vcpu_setup(vmx);
4123 vmx_vcpu_put(&vmx->vcpu); 4102 vmx_vcpu_put(&vmx->vcpu);
4124 put_cpu(); 4103 put_cpu();
@@ -4334,6 +4313,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4334 .set_irq = vmx_inject_irq, 4313 .set_irq = vmx_inject_irq,
4335 .set_nmi = vmx_inject_nmi, 4314 .set_nmi = vmx_inject_nmi,
4336 .queue_exception = vmx_queue_exception, 4315 .queue_exception = vmx_queue_exception,
4316 .cancel_injection = vmx_cancel_injection,
4337 .interrupt_allowed = vmx_interrupt_allowed, 4317 .interrupt_allowed = vmx_interrupt_allowed,
4338 .nmi_allowed = vmx_nmi_allowed, 4318 .nmi_allowed = vmx_nmi_allowed,
4339 .get_nmi_mask = vmx_get_nmi_mask, 4319 .get_nmi_mask = vmx_get_nmi_mask,
@@ -4356,6 +4336,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
4356 .set_supported_cpuid = vmx_set_supported_cpuid, 4336 .set_supported_cpuid = vmx_set_supported_cpuid,
4357 4337
4358 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 4338 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
4339
4340 .write_tsc_offset = vmx_write_tsc_offset,
4341 .adjust_tsc_offset = vmx_adjust_tsc_offset,
4342
4343 .set_tdp_cr3 = vmx_set_cr3,
4359}; 4344};
4360 4345
4361static int __init vmx_init(void) 4346static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c2ecf0a806d..2288ad829b32 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008 8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affilates. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * 10 *
11 * Authors: 11 * Authors:
12 * Avi Kivity <avi@qumranet.com> 12 * Avi Kivity <avi@qumranet.com>
@@ -55,6 +55,8 @@
55#include <asm/mce.h> 55#include <asm/mce.h>
56#include <asm/i387.h> 56#include <asm/i387.h>
57#include <asm/xcr.h> 57#include <asm/xcr.h>
58#include <asm/pvclock.h>
59#include <asm/div64.h>
58 60
59#define MAX_IO_MSRS 256 61#define MAX_IO_MSRS 256
60#define CR0_RESERVED_BITS \ 62#define CR0_RESERVED_BITS \
@@ -71,7 +73,7 @@
71#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 73#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
72 74
73#define KVM_MAX_MCE_BANKS 32 75#define KVM_MAX_MCE_BANKS 32
74#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P 76#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
75 77
76/* EFER defaults: 78/* EFER defaults:
77 * - enable syscall per default because its emulated by KVM 79 * - enable syscall per default because its emulated by KVM
@@ -282,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
282 u32 prev_nr; 284 u32 prev_nr;
283 int class1, class2; 285 int class1, class2;
284 286
287 kvm_make_request(KVM_REQ_EVENT, vcpu);
288
285 if (!vcpu->arch.exception.pending) { 289 if (!vcpu->arch.exception.pending) {
286 queue: 290 queue:
287 vcpu->arch.exception.pending = true; 291 vcpu->arch.exception.pending = true;
@@ -327,16 +331,28 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
327} 331}
328EXPORT_SYMBOL_GPL(kvm_requeue_exception); 332EXPORT_SYMBOL_GPL(kvm_requeue_exception);
329 333
330void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, 334void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
331 u32 error_code)
332{ 335{
336 unsigned error_code = vcpu->arch.fault.error_code;
337
333 ++vcpu->stat.pf_guest; 338 ++vcpu->stat.pf_guest;
334 vcpu->arch.cr2 = addr; 339 vcpu->arch.cr2 = vcpu->arch.fault.address;
335 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 340 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
336} 341}
337 342
343void kvm_propagate_fault(struct kvm_vcpu *vcpu)
344{
345 if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
346 vcpu->arch.nested_mmu.inject_page_fault(vcpu);
347 else
348 vcpu->arch.mmu.inject_page_fault(vcpu);
349
350 vcpu->arch.fault.nested = false;
351}
352
338void kvm_inject_nmi(struct kvm_vcpu *vcpu) 353void kvm_inject_nmi(struct kvm_vcpu *vcpu)
339{ 354{
355 kvm_make_request(KVM_REQ_EVENT, vcpu);
340 vcpu->arch.nmi_pending = 1; 356 vcpu->arch.nmi_pending = 1;
341} 357}
342EXPORT_SYMBOL_GPL(kvm_inject_nmi); 358EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -367,18 +383,49 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
367EXPORT_SYMBOL_GPL(kvm_require_cpl); 383EXPORT_SYMBOL_GPL(kvm_require_cpl);
368 384
369/* 385/*
386 * This function will be used to read from the physical memory of the currently
387 * running guest. The difference to kvm_read_guest_page is that this function
388 * can read from guest physical or from the guest's guest physical memory.
389 */
390int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
391 gfn_t ngfn, void *data, int offset, int len,
392 u32 access)
393{
394 gfn_t real_gfn;
395 gpa_t ngpa;
396
397 ngpa = gfn_to_gpa(ngfn);
398 real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
399 if (real_gfn == UNMAPPED_GVA)
400 return -EFAULT;
401
402 real_gfn = gpa_to_gfn(real_gfn);
403
404 return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
405}
406EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
407
408int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
409 void *data, int offset, int len, u32 access)
410{
411 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
412 data, offset, len, access);
413}
414
415/*
370 * Load the pae pdptrs. Return true is they are all valid. 416 * Load the pae pdptrs. Return true is they are all valid.
371 */ 417 */
372int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 418int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
373{ 419{
374 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 420 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 421 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376 int i; 422 int i;
377 int ret; 423 int ret;
378 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 424 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
379 425
380 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 426 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
381 offset * sizeof(u64), sizeof(pdpte)); 427 offset * sizeof(u64), sizeof(pdpte),
428 PFERR_USER_MASK|PFERR_WRITE_MASK);
382 if (ret < 0) { 429 if (ret < 0) {
383 ret = 0; 430 ret = 0;
384 goto out; 431 goto out;
@@ -392,7 +439,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
392 } 439 }
393 ret = 1; 440 ret = 1;
394 441
395 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 442 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
396 __set_bit(VCPU_EXREG_PDPTR, 443 __set_bit(VCPU_EXREG_PDPTR,
397 (unsigned long *)&vcpu->arch.regs_avail); 444 (unsigned long *)&vcpu->arch.regs_avail);
398 __set_bit(VCPU_EXREG_PDPTR, 445 __set_bit(VCPU_EXREG_PDPTR,
@@ -405,8 +452,10 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
405 452
406static bool pdptrs_changed(struct kvm_vcpu *vcpu) 453static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407{ 454{
408 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 455 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
409 bool changed = true; 456 bool changed = true;
457 int offset;
458 gfn_t gfn;
410 int r; 459 int r;
411 460
412 if (is_long_mode(vcpu) || !is_pae(vcpu)) 461 if (is_long_mode(vcpu) || !is_pae(vcpu))
@@ -416,10 +465,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
416 (unsigned long *)&vcpu->arch.regs_avail)) 465 (unsigned long *)&vcpu->arch.regs_avail))
417 return true; 466 return true;
418 467
419 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 468 gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
469 offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
470 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
471 PFERR_USER_MASK | PFERR_WRITE_MASK);
420 if (r < 0) 472 if (r < 0)
421 goto out; 473 goto out;
422 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 474 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
423out: 475out:
424 476
425 return changed; 477 return changed;
@@ -458,7 +510,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
458 return 1; 510 return 1;
459 } else 511 } else
460#endif 512#endif
461 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) 513 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
514 vcpu->arch.cr3))
462 return 1; 515 return 1;
463 } 516 }
464 517
@@ -547,7 +600,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
547 return 1; 600 return 1;
548 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 601 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
549 && ((cr4 ^ old_cr4) & pdptr_bits) 602 && ((cr4 ^ old_cr4) & pdptr_bits)
550 && !load_pdptrs(vcpu, vcpu->arch.cr3)) 603 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
551 return 1; 604 return 1;
552 605
553 if (cr4 & X86_CR4_VMXE) 606 if (cr4 & X86_CR4_VMXE)
@@ -580,7 +633,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
580 if (is_pae(vcpu)) { 633 if (is_pae(vcpu)) {
581 if (cr3 & CR3_PAE_RESERVED_BITS) 634 if (cr3 & CR3_PAE_RESERVED_BITS)
582 return 1; 635 return 1;
583 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 636 if (is_paging(vcpu) &&
637 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
584 return 1; 638 return 1;
585 } 639 }
586 /* 640 /*
@@ -737,7 +791,7 @@ static u32 msrs_to_save[] = {
737#ifdef CONFIG_X86_64 791#ifdef CONFIG_X86_64
738 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 792 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
739#endif 793#endif
740 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA 794 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
741}; 795};
742 796
743static unsigned num_msrs_to_save; 797static unsigned num_msrs_to_save;
@@ -838,7 +892,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
838 892
839 /* 893 /*
840 * The guest calculates current wall clock time by adding 894 * The guest calculates current wall clock time by adding
841 * system time (updated by kvm_write_guest_time below) to the 895 * system time (updated by kvm_guest_time_update below) to the
842 * wall clock specified here. guest system time equals host 896 * wall clock specified here. guest system time equals host
843 * system time for us, thus we must fill in host boot time here. 897 * system time for us, thus we must fill in host boot time here.
844 */ 898 */
@@ -866,65 +920,229 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
866 return quotient; 920 return quotient;
867} 921}
868 922
869static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock) 923static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
924 s8 *pshift, u32 *pmultiplier)
870{ 925{
871 uint64_t nsecs = 1000000000LL; 926 uint64_t scaled64;
872 int32_t shift = 0; 927 int32_t shift = 0;
873 uint64_t tps64; 928 uint64_t tps64;
874 uint32_t tps32; 929 uint32_t tps32;
875 930
876 tps64 = tsc_khz * 1000LL; 931 tps64 = base_khz * 1000LL;
877 while (tps64 > nsecs*2) { 932 scaled64 = scaled_khz * 1000LL;
933 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
878 tps64 >>= 1; 934 tps64 >>= 1;
879 shift--; 935 shift--;
880 } 936 }
881 937
882 tps32 = (uint32_t)tps64; 938 tps32 = (uint32_t)tps64;
883 while (tps32 <= (uint32_t)nsecs) { 939 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
884 tps32 <<= 1; 940 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
941 scaled64 >>= 1;
942 else
943 tps32 <<= 1;
885 shift++; 944 shift++;
886 } 945 }
887 946
888 hv_clock->tsc_shift = shift; 947 *pshift = shift;
889 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32); 948 *pmultiplier = div_frac(scaled64, tps32);
890 949
891 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n", 950 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
892 __func__, tsc_khz, hv_clock->tsc_shift, 951 __func__, base_khz, scaled_khz, shift, *pmultiplier);
893 hv_clock->tsc_to_system_mul); 952}
953
954static inline u64 get_kernel_ns(void)
955{
956 struct timespec ts;
957
958 WARN_ON(preemptible());
959 ktime_get_ts(&ts);
960 monotonic_to_bootbased(&ts);
961 return timespec_to_ns(&ts);
894} 962}
895 963
896static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 964static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
965unsigned long max_tsc_khz;
897 966
898static void kvm_write_guest_time(struct kvm_vcpu *v) 967static inline int kvm_tsc_changes_freq(void)
968{
969 int cpu = get_cpu();
970 int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
971 cpufreq_quick_get(cpu) != 0;
972 put_cpu();
973 return ret;
974}
975
976static inline u64 nsec_to_cycles(u64 nsec)
977{
978 u64 ret;
979
980 WARN_ON(preemptible());
981 if (kvm_tsc_changes_freq())
982 printk_once(KERN_WARNING
983 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
984 ret = nsec * __get_cpu_var(cpu_tsc_khz);
985 do_div(ret, USEC_PER_SEC);
986 return ret;
987}
988
989static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
990{
991 /* Compute a scale to convert nanoseconds in TSC cycles */
992 kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
993 &kvm->arch.virtual_tsc_shift,
994 &kvm->arch.virtual_tsc_mult);
995 kvm->arch.virtual_tsc_khz = this_tsc_khz;
996}
997
998static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
999{
1000 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
1001 vcpu->kvm->arch.virtual_tsc_mult,
1002 vcpu->kvm->arch.virtual_tsc_shift);
1003 tsc += vcpu->arch.last_tsc_write;
1004 return tsc;
1005}
1006
1007void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1008{
1009 struct kvm *kvm = vcpu->kvm;
1010 u64 offset, ns, elapsed;
1011 unsigned long flags;
1012 s64 sdiff;
1013
1014 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1015 offset = data - native_read_tsc();
1016 ns = get_kernel_ns();
1017 elapsed = ns - kvm->arch.last_tsc_nsec;
1018 sdiff = data - kvm->arch.last_tsc_write;
1019 if (sdiff < 0)
1020 sdiff = -sdiff;
1021
1022 /*
1023 * Special case: close write to TSC within 5 seconds of
1024 * another CPU is interpreted as an attempt to synchronize
1025 * The 5 seconds is to accomodate host load / swapping as
1026 * well as any reset of TSC during the boot process.
1027 *
1028 * In that case, for a reliable TSC, we can match TSC offsets,
1029 * or make a best guest using elapsed value.
1030 */
1031 if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
1032 elapsed < 5ULL * NSEC_PER_SEC) {
1033 if (!check_tsc_unstable()) {
1034 offset = kvm->arch.last_tsc_offset;
1035 pr_debug("kvm: matched tsc offset for %llu\n", data);
1036 } else {
1037 u64 delta = nsec_to_cycles(elapsed);
1038 offset += delta;
1039 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1040 }
1041 ns = kvm->arch.last_tsc_nsec;
1042 }
1043 kvm->arch.last_tsc_nsec = ns;
1044 kvm->arch.last_tsc_write = data;
1045 kvm->arch.last_tsc_offset = offset;
1046 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1047 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1048
1049 /* Reset of TSC must disable overshoot protection below */
1050 vcpu->arch.hv_clock.tsc_timestamp = 0;
1051 vcpu->arch.last_tsc_write = data;
1052 vcpu->arch.last_tsc_nsec = ns;
1053}
1054EXPORT_SYMBOL_GPL(kvm_write_tsc);
1055
1056static int kvm_guest_time_update(struct kvm_vcpu *v)
899{ 1057{
900 struct timespec ts;
901 unsigned long flags; 1058 unsigned long flags;
902 struct kvm_vcpu_arch *vcpu = &v->arch; 1059 struct kvm_vcpu_arch *vcpu = &v->arch;
903 void *shared_kaddr; 1060 void *shared_kaddr;
904 unsigned long this_tsc_khz; 1061 unsigned long this_tsc_khz;
1062 s64 kernel_ns, max_kernel_ns;
1063 u64 tsc_timestamp;
905 1064
906 if ((!vcpu->time_page)) 1065 /* Keep irq disabled to prevent changes to the clock */
907 return; 1066 local_irq_save(flags);
1067 kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
1068 kernel_ns = get_kernel_ns();
1069 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
908 1070
909 this_tsc_khz = get_cpu_var(cpu_tsc_khz); 1071 if (unlikely(this_tsc_khz == 0)) {
910 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) { 1072 local_irq_restore(flags);
911 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock); 1073 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
912 vcpu->hv_clock_tsc_khz = this_tsc_khz; 1074 return 1;
1075 }
1076
1077 /*
1078 * We may have to catch up the TSC to match elapsed wall clock
1079 * time for two reasons, even if kvmclock is used.
1080 * 1) CPU could have been running below the maximum TSC rate
1081 * 2) Broken TSC compensation resets the base at each VCPU
1082 * entry to avoid unknown leaps of TSC even when running
1083 * again on the same CPU. This may cause apparent elapsed
1084 * time to disappear, and the guest to stand still or run
1085 * very slowly.
1086 */
1087 if (vcpu->tsc_catchup) {
1088 u64 tsc = compute_guest_tsc(v, kernel_ns);
1089 if (tsc > tsc_timestamp) {
1090 kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
1091 tsc_timestamp = tsc;
1092 }
913 } 1093 }
914 put_cpu_var(cpu_tsc_khz);
915 1094
916 /* Keep irq disabled to prevent changes to the clock */
917 local_irq_save(flags);
918 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
919 ktime_get_ts(&ts);
920 monotonic_to_bootbased(&ts);
921 local_irq_restore(flags); 1095 local_irq_restore(flags);
922 1096
923 /* With all the info we got, fill in the values */ 1097 if (!vcpu->time_page)
1098 return 0;
924 1099
925 vcpu->hv_clock.system_time = ts.tv_nsec + 1100 /*
926 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset; 1101 * Time as measured by the TSC may go backwards when resetting the base
1102 * tsc_timestamp. The reason for this is that the TSC resolution is
1103 * higher than the resolution of the other clock scales. Thus, many
1104 * possible measurments of the TSC correspond to one measurement of any
1105 * other clock, and so a spread of values is possible. This is not a
1106 * problem for the computation of the nanosecond clock; with TSC rates
1107 * around 1GHZ, there can only be a few cycles which correspond to one
1108 * nanosecond value, and any path through this code will inevitably
1109 * take longer than that. However, with the kernel_ns value itself,
1110 * the precision may be much lower, down to HZ granularity. If the
1111 * first sampling of TSC against kernel_ns ends in the low part of the
1112 * range, and the second in the high end of the range, we can get:
1113 *
1114 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1115 *
1116 * As the sampling errors potentially range in the thousands of cycles,
1117 * it is possible such a time value has already been observed by the
1118 * guest. To protect against this, we must compute the system time as
1119 * observed by the guest and ensure the new system time is greater.
1120 */
1121 max_kernel_ns = 0;
1122 if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
1123 max_kernel_ns = vcpu->last_guest_tsc -
1124 vcpu->hv_clock.tsc_timestamp;
1125 max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1126 vcpu->hv_clock.tsc_to_system_mul,
1127 vcpu->hv_clock.tsc_shift);
1128 max_kernel_ns += vcpu->last_kernel_ns;
1129 }
927 1130
1131 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1132 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1133 &vcpu->hv_clock.tsc_shift,
1134 &vcpu->hv_clock.tsc_to_system_mul);
1135 vcpu->hw_tsc_khz = this_tsc_khz;
1136 }
1137
1138 if (max_kernel_ns > kernel_ns)
1139 kernel_ns = max_kernel_ns;
1140
1141 /* With all the info we got, fill in the values */
1142 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1143 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1144 vcpu->last_kernel_ns = kernel_ns;
1145 vcpu->last_guest_tsc = tsc_timestamp;
928 vcpu->hv_clock.flags = 0; 1146 vcpu->hv_clock.flags = 0;
929 1147
930 /* 1148 /*
@@ -942,16 +1160,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
942 kunmap_atomic(shared_kaddr, KM_USER0); 1160 kunmap_atomic(shared_kaddr, KM_USER0);
943 1161
944 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); 1162 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
945} 1163 return 0;
946
947static int kvm_request_guest_time_update(struct kvm_vcpu *v)
948{
949 struct kvm_vcpu_arch *vcpu = &v->arch;
950
951 if (!vcpu->time_page)
952 return 0;
953 kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
954 return 1;
955} 1164}
956 1165
957static bool msr_mtrr_valid(unsigned msr) 1166static bool msr_mtrr_valid(unsigned msr)
@@ -1277,6 +1486,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1277 } 1486 }
1278 1487
1279 vcpu->arch.time = data; 1488 vcpu->arch.time = data;
1489 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1280 1490
1281 /* we verify if the enable bit is set... */ 1491 /* we verify if the enable bit is set... */
1282 if (!(data & 1)) 1492 if (!(data & 1))
@@ -1292,8 +1502,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1292 kvm_release_page_clean(vcpu->arch.time_page); 1502 kvm_release_page_clean(vcpu->arch.time_page);
1293 vcpu->arch.time_page = NULL; 1503 vcpu->arch.time_page = NULL;
1294 } 1504 }
1295
1296 kvm_request_guest_time_update(vcpu);
1297 break; 1505 break;
1298 } 1506 }
1299 case MSR_IA32_MCG_CTL: 1507 case MSR_IA32_MCG_CTL:
@@ -1330,6 +1538,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1330 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " 1538 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1331 "0x%x data 0x%llx\n", msr, data); 1539 "0x%x data 0x%llx\n", msr, data);
1332 break; 1540 break;
1541 case MSR_K7_CLK_CTL:
1542 /*
1543 * Ignore all writes to this no longer documented MSR.
1544 * Writes are only relevant for old K7 processors,
1545 * all pre-dating SVM, but a recommended workaround from
1546 * AMD for these chips. It is possible to speicify the
1547 * affected processor models on the command line, hence
1548 * the need to ignore the workaround.
1549 */
1550 break;
1333 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 1551 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1334 if (kvm_hv_msr_partition_wide(msr)) { 1552 if (kvm_hv_msr_partition_wide(msr)) {
1335 int r; 1553 int r;
@@ -1522,6 +1740,20 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1522 case 0xcd: /* fsb frequency */ 1740 case 0xcd: /* fsb frequency */
1523 data = 3; 1741 data = 3;
1524 break; 1742 break;
1743 /*
1744 * MSR_EBC_FREQUENCY_ID
1745 * Conservative value valid for even the basic CPU models.
1746 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1747 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1748 * and 266MHz for model 3, or 4. Set Core Clock
1749 * Frequency to System Bus Frequency Ratio to 1 (bits
1750 * 31:24) even though these are only valid for CPU
1751 * models > 2, however guests may end up dividing or
1752 * multiplying by zero otherwise.
1753 */
1754 case MSR_EBC_FREQUENCY_ID:
1755 data = 1 << 24;
1756 break;
1525 case MSR_IA32_APICBASE: 1757 case MSR_IA32_APICBASE:
1526 data = kvm_get_apic_base(vcpu); 1758 data = kvm_get_apic_base(vcpu);
1527 break; 1759 break;
@@ -1555,6 +1787,18 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1555 case MSR_IA32_MCG_STATUS: 1787 case MSR_IA32_MCG_STATUS:
1556 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 1788 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1557 return get_msr_mce(vcpu, msr, pdata); 1789 return get_msr_mce(vcpu, msr, pdata);
1790 case MSR_K7_CLK_CTL:
1791 /*
1792 * Provide expected ramp-up count for K7. All other
1793 * are set to zero, indicating minimum divisors for
1794 * every field.
1795 *
1796 * This prevents guest kernels on AMD host with CPU
1797 * type 6, model 8 and higher from exploding due to
1798 * the rdmsr failing.
1799 */
1800 data = 0x20000000;
1801 break;
1558 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 1802 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1559 if (kvm_hv_msr_partition_wide(msr)) { 1803 if (kvm_hv_msr_partition_wide(msr)) {
1560 int r; 1804 int r;
@@ -1808,19 +2052,28 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1808 } 2052 }
1809 2053
1810 kvm_x86_ops->vcpu_load(vcpu, cpu); 2054 kvm_x86_ops->vcpu_load(vcpu, cpu);
1811 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) { 2055 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
1812 unsigned long khz = cpufreq_quick_get(cpu); 2056 /* Make sure TSC doesn't go backwards */
1813 if (!khz) 2057 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
1814 khz = tsc_khz; 2058 native_read_tsc() - vcpu->arch.last_host_tsc;
1815 per_cpu(cpu_tsc_khz, cpu) = khz; 2059 if (tsc_delta < 0)
2060 mark_tsc_unstable("KVM discovered backwards TSC");
2061 if (check_tsc_unstable()) {
2062 kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
2063 vcpu->arch.tsc_catchup = 1;
2064 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2065 }
2066 if (vcpu->cpu != cpu)
2067 kvm_migrate_timers(vcpu);
2068 vcpu->cpu = cpu;
1816 } 2069 }
1817 kvm_request_guest_time_update(vcpu);
1818} 2070}
1819 2071
1820void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2072void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1821{ 2073{
1822 kvm_x86_ops->vcpu_put(vcpu); 2074 kvm_x86_ops->vcpu_put(vcpu);
1823 kvm_put_guest_fpu(vcpu); 2075 kvm_put_guest_fpu(vcpu);
2076 vcpu->arch.last_host_tsc = native_read_tsc();
1824} 2077}
1825 2078
1826static int is_efer_nx(void) 2079static int is_efer_nx(void)
@@ -1995,7 +2248,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1995 F(F16C); 2248 F(F16C);
1996 /* cpuid 0x80000001.ecx */ 2249 /* cpuid 0x80000001.ecx */
1997 const u32 kvm_supported_word6_x86_features = 2250 const u32 kvm_supported_word6_x86_features =
1998 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | 2251 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
1999 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 2252 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2000 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 2253 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2001 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); 2254 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
@@ -2204,6 +2457,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2204 return -ENXIO; 2457 return -ENXIO;
2205 2458
2206 kvm_queue_interrupt(vcpu, irq->irq, false); 2459 kvm_queue_interrupt(vcpu, irq->irq, false);
2460 kvm_make_request(KVM_REQ_EVENT, vcpu);
2207 2461
2208 return 0; 2462 return 0;
2209} 2463}
@@ -2357,6 +2611,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2357 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) 2611 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2358 vcpu->arch.sipi_vector = events->sipi_vector; 2612 vcpu->arch.sipi_vector = events->sipi_vector;
2359 2613
2614 kvm_make_request(KVM_REQ_EVENT, vcpu);
2615
2360 return 0; 2616 return 0;
2361} 2617}
2362 2618
@@ -2760,7 +3016,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2760 3016
2761static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 3017static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2762{ 3018{
2763 return kvm->arch.n_alloc_mmu_pages; 3019 return kvm->arch.n_max_mmu_pages;
2764} 3020}
2765 3021
2766static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3022static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
@@ -2796,18 +3052,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2796 r = 0; 3052 r = 0;
2797 switch (chip->chip_id) { 3053 switch (chip->chip_id) {
2798 case KVM_IRQCHIP_PIC_MASTER: 3054 case KVM_IRQCHIP_PIC_MASTER:
2799 raw_spin_lock(&pic_irqchip(kvm)->lock); 3055 spin_lock(&pic_irqchip(kvm)->lock);
2800 memcpy(&pic_irqchip(kvm)->pics[0], 3056 memcpy(&pic_irqchip(kvm)->pics[0],
2801 &chip->chip.pic, 3057 &chip->chip.pic,
2802 sizeof(struct kvm_pic_state)); 3058 sizeof(struct kvm_pic_state));
2803 raw_spin_unlock(&pic_irqchip(kvm)->lock); 3059 spin_unlock(&pic_irqchip(kvm)->lock);
2804 break; 3060 break;
2805 case KVM_IRQCHIP_PIC_SLAVE: 3061 case KVM_IRQCHIP_PIC_SLAVE:
2806 raw_spin_lock(&pic_irqchip(kvm)->lock); 3062 spin_lock(&pic_irqchip(kvm)->lock);
2807 memcpy(&pic_irqchip(kvm)->pics[1], 3063 memcpy(&pic_irqchip(kvm)->pics[1],
2808 &chip->chip.pic, 3064 &chip->chip.pic,
2809 sizeof(struct kvm_pic_state)); 3065 sizeof(struct kvm_pic_state));
2810 raw_spin_unlock(&pic_irqchip(kvm)->lock); 3066 spin_unlock(&pic_irqchip(kvm)->lock);
2811 break; 3067 break;
2812 case KVM_IRQCHIP_IOAPIC: 3068 case KVM_IRQCHIP_IOAPIC:
2813 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); 3069 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -3201,7 +3457,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
3201 break; 3457 break;
3202 } 3458 }
3203 case KVM_SET_CLOCK: { 3459 case KVM_SET_CLOCK: {
3204 struct timespec now;
3205 struct kvm_clock_data user_ns; 3460 struct kvm_clock_data user_ns;
3206 u64 now_ns; 3461 u64 now_ns;
3207 s64 delta; 3462 s64 delta;
@@ -3215,20 +3470,21 @@ long kvm_arch_vm_ioctl(struct file *filp,
3215 goto out; 3470 goto out;
3216 3471
3217 r = 0; 3472 r = 0;
3218 ktime_get_ts(&now); 3473 local_irq_disable();
3219 now_ns = timespec_to_ns(&now); 3474 now_ns = get_kernel_ns();
3220 delta = user_ns.clock - now_ns; 3475 delta = user_ns.clock - now_ns;
3476 local_irq_enable();
3221 kvm->arch.kvmclock_offset = delta; 3477 kvm->arch.kvmclock_offset = delta;
3222 break; 3478 break;
3223 } 3479 }
3224 case KVM_GET_CLOCK: { 3480 case KVM_GET_CLOCK: {
3225 struct timespec now;
3226 struct kvm_clock_data user_ns; 3481 struct kvm_clock_data user_ns;
3227 u64 now_ns; 3482 u64 now_ns;
3228 3483
3229 ktime_get_ts(&now); 3484 local_irq_disable();
3230 now_ns = timespec_to_ns(&now); 3485 now_ns = get_kernel_ns();
3231 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable();
3232 user_ns.flags = 0; 3488 user_ns.flags = 0;
3233 3489
3234 r = -EFAULT; 3490 r = -EFAULT;
@@ -3292,30 +3548,51 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
3292 kvm_x86_ops->get_segment(vcpu, var, seg); 3548 kvm_x86_ops->get_segment(vcpu, var, seg);
3293} 3549}
3294 3550
3551static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3552{
3553 return gpa;
3554}
3555
3556static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3557{
3558 gpa_t t_gpa;
3559 u32 error;
3560
3561 BUG_ON(!mmu_is_nested(vcpu));
3562
3563 /* NPT walks are always user-walks */
3564 access |= PFERR_USER_MASK;
3565 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
3566 if (t_gpa == UNMAPPED_GVA)
3567 vcpu->arch.fault.nested = true;
3568
3569 return t_gpa;
3570}
3571
3295gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3572gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3296{ 3573{
3297 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3574 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3298 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3575 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3299} 3576}
3300 3577
3301 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3578 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3302{ 3579{
3303 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3580 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3304 access |= PFERR_FETCH_MASK; 3581 access |= PFERR_FETCH_MASK;
3305 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3582 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3306} 3583}
3307 3584
3308gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3585gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3309{ 3586{
3310 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3587 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3311 access |= PFERR_WRITE_MASK; 3588 access |= PFERR_WRITE_MASK;
3312 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); 3589 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3313} 3590}
3314 3591
3315/* uses this to access any guest's mapped memory without checking CPL */ 3592/* uses this to access any guest's mapped memory without checking CPL */
3316gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) 3593gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3317{ 3594{
3318 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); 3595 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3319} 3596}
3320 3597
3321static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3598static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
@@ -3326,7 +3603,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3326 int r = X86EMUL_CONTINUE; 3603 int r = X86EMUL_CONTINUE;
3327 3604
3328 while (bytes) { 3605 while (bytes) {
3329 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); 3606 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3607 error);
3330 unsigned offset = addr & (PAGE_SIZE-1); 3608 unsigned offset = addr & (PAGE_SIZE-1);
3331 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3609 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3332 int ret; 3610 int ret;
@@ -3381,8 +3659,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
3381 int r = X86EMUL_CONTINUE; 3659 int r = X86EMUL_CONTINUE;
3382 3660
3383 while (bytes) { 3661 while (bytes) {
3384 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, 3662 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3385 PFERR_WRITE_MASK, error); 3663 PFERR_WRITE_MASK,
3664 error);
3386 unsigned offset = addr & (PAGE_SIZE-1); 3665 unsigned offset = addr & (PAGE_SIZE-1);
3387 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3666 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3388 int ret; 3667 int ret;
@@ -3624,7 +3903,7 @@ static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3624 if (vcpu->arch.pio.count) 3903 if (vcpu->arch.pio.count)
3625 goto data_avail; 3904 goto data_avail;
3626 3905
3627 trace_kvm_pio(1, port, size, 1); 3906 trace_kvm_pio(0, port, size, 1);
3628 3907
3629 vcpu->arch.pio.port = port; 3908 vcpu->arch.pio.port = port;
3630 vcpu->arch.pio.in = 1; 3909 vcpu->arch.pio.in = 1;
@@ -3652,7 +3931,7 @@ static int emulator_pio_out_emulated(int size, unsigned short port,
3652 const void *val, unsigned int count, 3931 const void *val, unsigned int count,
3653 struct kvm_vcpu *vcpu) 3932 struct kvm_vcpu *vcpu)
3654{ 3933{
3655 trace_kvm_pio(0, port, size, 1); 3934 trace_kvm_pio(1, port, size, 1);
3656 3935
3657 vcpu->arch.pio.port = port; 3936 vcpu->arch.pio.port = port;
3658 vcpu->arch.pio.in = 0; 3937 vcpu->arch.pio.in = 0;
@@ -3791,6 +4070,11 @@ static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3791 kvm_x86_ops->get_gdt(vcpu, dt); 4070 kvm_x86_ops->get_gdt(vcpu, dt);
3792} 4071}
3793 4072
4073static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
4074{
4075 kvm_x86_ops->get_idt(vcpu, dt);
4076}
4077
3794static unsigned long emulator_get_cached_segment_base(int seg, 4078static unsigned long emulator_get_cached_segment_base(int seg,
3795 struct kvm_vcpu *vcpu) 4079 struct kvm_vcpu *vcpu)
3796{ 4080{
@@ -3884,6 +4168,7 @@ static struct x86_emulate_ops emulate_ops = {
3884 .set_segment_selector = emulator_set_segment_selector, 4168 .set_segment_selector = emulator_set_segment_selector,
3885 .get_cached_segment_base = emulator_get_cached_segment_base, 4169 .get_cached_segment_base = emulator_get_cached_segment_base,
3886 .get_gdt = emulator_get_gdt, 4170 .get_gdt = emulator_get_gdt,
4171 .get_idt = emulator_get_idt,
3887 .get_cr = emulator_get_cr, 4172 .get_cr = emulator_get_cr,
3888 .set_cr = emulator_set_cr, 4173 .set_cr = emulator_set_cr,
3889 .cpl = emulator_get_cpl, 4174 .cpl = emulator_get_cpl,
@@ -3919,13 +4204,64 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
3919{ 4204{
3920 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4205 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
3921 if (ctxt->exception == PF_VECTOR) 4206 if (ctxt->exception == PF_VECTOR)
3922 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code); 4207 kvm_propagate_fault(vcpu);
3923 else if (ctxt->error_code_valid) 4208 else if (ctxt->error_code_valid)
3924 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); 4209 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
3925 else 4210 else
3926 kvm_queue_exception(vcpu, ctxt->exception); 4211 kvm_queue_exception(vcpu, ctxt->exception);
3927} 4212}
3928 4213
4214static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4215{
4216 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4217 int cs_db, cs_l;
4218
4219 cache_all_regs(vcpu);
4220
4221 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4222
4223 vcpu->arch.emulate_ctxt.vcpu = vcpu;
4224 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
4225 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
4226 vcpu->arch.emulate_ctxt.mode =
4227 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
4228 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
4229 ? X86EMUL_MODE_VM86 : cs_l
4230 ? X86EMUL_MODE_PROT64 : cs_db
4231 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
4232 memset(c, 0, sizeof(struct decode_cache));
4233 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4234}
4235
4236int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
4237{
4238 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4239 int ret;
4240
4241 init_emulate_ctxt(vcpu);
4242
4243 vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
4244 vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
4245 vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
4246 ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
4247
4248 if (ret != X86EMUL_CONTINUE)
4249 return EMULATE_FAIL;
4250
4251 vcpu->arch.emulate_ctxt.eip = c->eip;
4252 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4253 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4254 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4255
4256 if (irq == NMI_VECTOR)
4257 vcpu->arch.nmi_pending = false;
4258 else
4259 vcpu->arch.interrupt.pending = false;
4260
4261 return EMULATE_DONE;
4262}
4263EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4264
3929static int handle_emulation_failure(struct kvm_vcpu *vcpu) 4265static int handle_emulation_failure(struct kvm_vcpu *vcpu)
3930{ 4266{
3931 ++vcpu->stat.insn_emulation_fail; 4267 ++vcpu->stat.insn_emulation_fail;
@@ -3982,24 +4318,15 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
3982 cache_all_regs(vcpu); 4318 cache_all_regs(vcpu);
3983 4319
3984 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 4320 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
3985 int cs_db, cs_l; 4321 init_emulate_ctxt(vcpu);
3986 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3987
3988 vcpu->arch.emulate_ctxt.vcpu = vcpu;
3989 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
3990 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
3991 vcpu->arch.emulate_ctxt.mode =
3992 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
3993 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
3994 ? X86EMUL_MODE_VM86 : cs_l
3995 ? X86EMUL_MODE_PROT64 : cs_db
3996 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3997 memset(c, 0, sizeof(struct decode_cache));
3998 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
3999 vcpu->arch.emulate_ctxt.interruptibility = 0; 4322 vcpu->arch.emulate_ctxt.interruptibility = 0;
4000 vcpu->arch.emulate_ctxt.exception = -1; 4323 vcpu->arch.emulate_ctxt.exception = -1;
4324 vcpu->arch.emulate_ctxt.perm_ok = false;
4325
4326 r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
4327 if (r == X86EMUL_PROPAGATE_FAULT)
4328 goto done;
4001 4329
4002 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
4003 trace_kvm_emulate_insn_start(vcpu); 4330 trace_kvm_emulate_insn_start(vcpu);
4004 4331
4005 /* Only allow emulation of specific instructions on #UD 4332 /* Only allow emulation of specific instructions on #UD
@@ -4049,41 +4376,39 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
4049 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); 4376 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4050 4377
4051restart: 4378restart:
4052 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); 4379 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
4053 4380
4054 if (r) { /* emulation failed */ 4381 if (r == EMULATION_FAILED) {
4055 if (reexecute_instruction(vcpu, cr2)) 4382 if (reexecute_instruction(vcpu, cr2))
4056 return EMULATE_DONE; 4383 return EMULATE_DONE;
4057 4384
4058 return handle_emulation_failure(vcpu); 4385 return handle_emulation_failure(vcpu);
4059 } 4386 }
4060 4387
4061 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); 4388done:
4062 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4063 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4064 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4065
4066 if (vcpu->arch.emulate_ctxt.exception >= 0) { 4389 if (vcpu->arch.emulate_ctxt.exception >= 0) {
4067 inject_emulated_exception(vcpu); 4390 inject_emulated_exception(vcpu);
4068 return EMULATE_DONE; 4391 r = EMULATE_DONE;
4069 } 4392 } else if (vcpu->arch.pio.count) {
4070
4071 if (vcpu->arch.pio.count) {
4072 if (!vcpu->arch.pio.in) 4393 if (!vcpu->arch.pio.in)
4073 vcpu->arch.pio.count = 0; 4394 vcpu->arch.pio.count = 0;
4074 return EMULATE_DO_MMIO; 4395 r = EMULATE_DO_MMIO;
4075 } 4396 } else if (vcpu->mmio_needed) {
4076
4077 if (vcpu->mmio_needed) {
4078 if (vcpu->mmio_is_write) 4397 if (vcpu->mmio_is_write)
4079 vcpu->mmio_needed = 0; 4398 vcpu->mmio_needed = 0;
4080 return EMULATE_DO_MMIO; 4399 r = EMULATE_DO_MMIO;
4081 } 4400 } else if (r == EMULATION_RESTART)
4082
4083 if (vcpu->arch.emulate_ctxt.restart)
4084 goto restart; 4401 goto restart;
4402 else
4403 r = EMULATE_DONE;
4085 4404
4086 return EMULATE_DONE; 4405 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4406 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4407 kvm_make_request(KVM_REQ_EVENT, vcpu);
4408 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4409 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4410
4411 return r;
4087} 4412}
4088EXPORT_SYMBOL_GPL(emulate_instruction); 4413EXPORT_SYMBOL_GPL(emulate_instruction);
4089 4414
@@ -4097,9 +4422,23 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4097} 4422}
4098EXPORT_SYMBOL_GPL(kvm_fast_pio_out); 4423EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4099 4424
4100static void bounce_off(void *info) 4425static void tsc_bad(void *info)
4426{
4427 __get_cpu_var(cpu_tsc_khz) = 0;
4428}
4429
4430static void tsc_khz_changed(void *data)
4101{ 4431{
4102 /* nothing */ 4432 struct cpufreq_freqs *freq = data;
4433 unsigned long khz = 0;
4434
4435 if (data)
4436 khz = freq->new;
4437 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4438 khz = cpufreq_quick_get(raw_smp_processor_id());
4439 if (!khz)
4440 khz = tsc_khz;
4441 __get_cpu_var(cpu_tsc_khz) = khz;
4103} 4442}
4104 4443
4105static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 4444static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -4110,21 +4449,60 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
4110 struct kvm_vcpu *vcpu; 4449 struct kvm_vcpu *vcpu;
4111 int i, send_ipi = 0; 4450 int i, send_ipi = 0;
4112 4451
4452 /*
4453 * We allow guests to temporarily run on slowing clocks,
4454 * provided we notify them after, or to run on accelerating
4455 * clocks, provided we notify them before. Thus time never
4456 * goes backwards.
4457 *
4458 * However, we have a problem. We can't atomically update
4459 * the frequency of a given CPU from this function; it is
4460 * merely a notifier, which can be called from any CPU.
4461 * Changing the TSC frequency at arbitrary points in time
4462 * requires a recomputation of local variables related to
4463 * the TSC for each VCPU. We must flag these local variables
4464 * to be updated and be sure the update takes place with the
4465 * new frequency before any guests proceed.
4466 *
4467 * Unfortunately, the combination of hotplug CPU and frequency
4468 * change creates an intractable locking scenario; the order
4469 * of when these callouts happen is undefined with respect to
4470 * CPU hotplug, and they can race with each other. As such,
4471 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
4472 * undefined; you can actually have a CPU frequency change take
4473 * place in between the computation of X and the setting of the
4474 * variable. To protect against this problem, all updates of
4475 * the per_cpu tsc_khz variable are done in an interrupt
4476 * protected IPI, and all callers wishing to update the value
4477 * must wait for a synchronous IPI to complete (which is trivial
4478 * if the caller is on the CPU already). This establishes the
4479 * necessary total order on variable updates.
4480 *
4481 * Note that because a guest time update may take place
4482 * anytime after the setting of the VCPU's request bit, the
4483 * correct TSC value must be set before the request. However,
4484 * to ensure the update actually makes it to any guest which
4485 * starts running in hardware virtualization between the set
4486 * and the acquisition of the spinlock, we must also ping the
4487 * CPU after setting the request bit.
4488 *
4489 */
4490
4113 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 4491 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4114 return 0; 4492 return 0;
4115 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 4493 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4116 return 0; 4494 return 0;
4117 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new; 4495
4496 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4118 4497
4119 spin_lock(&kvm_lock); 4498 spin_lock(&kvm_lock);
4120 list_for_each_entry(kvm, &vm_list, vm_list) { 4499 list_for_each_entry(kvm, &vm_list, vm_list) {
4121 kvm_for_each_vcpu(i, vcpu, kvm) { 4500 kvm_for_each_vcpu(i, vcpu, kvm) {
4122 if (vcpu->cpu != freq->cpu) 4501 if (vcpu->cpu != freq->cpu)
4123 continue; 4502 continue;
4124 if (!kvm_request_guest_time_update(vcpu)) 4503 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4125 continue;
4126 if (vcpu->cpu != smp_processor_id()) 4504 if (vcpu->cpu != smp_processor_id())
4127 send_ipi++; 4505 send_ipi = 1;
4128 } 4506 }
4129 } 4507 }
4130 spin_unlock(&kvm_lock); 4508 spin_unlock(&kvm_lock);
@@ -4142,32 +4520,57 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
4142 * guest context is entered kvmclock will be updated, 4520 * guest context is entered kvmclock will be updated,
4143 * so the guest will not see stale values. 4521 * so the guest will not see stale values.
4144 */ 4522 */
4145 smp_call_function_single(freq->cpu, bounce_off, NULL, 1); 4523 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4146 } 4524 }
4147 return 0; 4525 return 0;
4148} 4526}
4149 4527
4150static struct notifier_block kvmclock_cpufreq_notifier_block = { 4528static struct notifier_block kvmclock_cpufreq_notifier_block = {
4151 .notifier_call = kvmclock_cpufreq_notifier 4529 .notifier_call = kvmclock_cpufreq_notifier
4530};
4531
4532static int kvmclock_cpu_notifier(struct notifier_block *nfb,
4533 unsigned long action, void *hcpu)
4534{
4535 unsigned int cpu = (unsigned long)hcpu;
4536
4537 switch (action) {
4538 case CPU_ONLINE:
4539 case CPU_DOWN_FAILED:
4540 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4541 break;
4542 case CPU_DOWN_PREPARE:
4543 smp_call_function_single(cpu, tsc_bad, NULL, 1);
4544 break;
4545 }
4546 return NOTIFY_OK;
4547}
4548
4549static struct notifier_block kvmclock_cpu_notifier_block = {
4550 .notifier_call = kvmclock_cpu_notifier,
4551 .priority = -INT_MAX
4152}; 4552};
4153 4553
4154static void kvm_timer_init(void) 4554static void kvm_timer_init(void)
4155{ 4555{
4156 int cpu; 4556 int cpu;
4157 4557
4558 max_tsc_khz = tsc_khz;
4559 register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4158 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 4560 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4561#ifdef CONFIG_CPU_FREQ
4562 struct cpufreq_policy policy;
4563 memset(&policy, 0, sizeof(policy));
4564 cpufreq_get_policy(&policy, get_cpu());
4565 if (policy.cpuinfo.max_freq)
4566 max_tsc_khz = policy.cpuinfo.max_freq;
4567#endif
4159 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 4568 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4160 CPUFREQ_TRANSITION_NOTIFIER); 4569 CPUFREQ_TRANSITION_NOTIFIER);
4161 for_each_online_cpu(cpu) {
4162 unsigned long khz = cpufreq_get(cpu);
4163 if (!khz)
4164 khz = tsc_khz;
4165 per_cpu(cpu_tsc_khz, cpu) = khz;
4166 }
4167 } else {
4168 for_each_possible_cpu(cpu)
4169 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
4170 } 4570 }
4571 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
4572 for_each_online_cpu(cpu)
4573 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4171} 4574}
4172 4575
4173static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 4576static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@@ -4269,6 +4672,7 @@ void kvm_arch_exit(void)
4269 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 4672 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4270 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 4673 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4271 CPUFREQ_TRANSITION_NOTIFIER); 4674 CPUFREQ_TRANSITION_NOTIFIER);
4675 unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4272 kvm_x86_ops = NULL; 4676 kvm_x86_ops = NULL;
4273 kvm_mmu_module_exit(); 4677 kvm_mmu_module_exit();
4274} 4678}
@@ -4684,8 +5088,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4684 kvm_mmu_unload(vcpu); 5088 kvm_mmu_unload(vcpu);
4685 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 5089 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
4686 __kvm_migrate_timers(vcpu); 5090 __kvm_migrate_timers(vcpu);
4687 if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) 5091 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
4688 kvm_write_guest_time(vcpu); 5092 r = kvm_guest_time_update(vcpu);
5093 if (unlikely(r))
5094 goto out;
5095 }
4689 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 5096 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
4690 kvm_mmu_sync_roots(vcpu); 5097 kvm_mmu_sync_roots(vcpu);
4691 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 5098 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
@@ -4710,6 +5117,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4710 if (unlikely(r)) 5117 if (unlikely(r))
4711 goto out; 5118 goto out;
4712 5119
5120 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5121 inject_pending_event(vcpu);
5122
5123 /* enable NMI/IRQ window open exits if needed */
5124 if (vcpu->arch.nmi_pending)
5125 kvm_x86_ops->enable_nmi_window(vcpu);
5126 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5127 kvm_x86_ops->enable_irq_window(vcpu);
5128
5129 if (kvm_lapic_enabled(vcpu)) {
5130 update_cr8_intercept(vcpu);
5131 kvm_lapic_sync_to_vapic(vcpu);
5132 }
5133 }
5134
4713 preempt_disable(); 5135 preempt_disable();
4714 5136
4715 kvm_x86_ops->prepare_guest_switch(vcpu); 5137 kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -4728,23 +5150,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4728 smp_wmb(); 5150 smp_wmb();
4729 local_irq_enable(); 5151 local_irq_enable();
4730 preempt_enable(); 5152 preempt_enable();
5153 kvm_x86_ops->cancel_injection(vcpu);
4731 r = 1; 5154 r = 1;
4732 goto out; 5155 goto out;
4733 } 5156 }
4734 5157
4735 inject_pending_event(vcpu);
4736
4737 /* enable NMI/IRQ window open exits if needed */
4738 if (vcpu->arch.nmi_pending)
4739 kvm_x86_ops->enable_nmi_window(vcpu);
4740 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4741 kvm_x86_ops->enable_irq_window(vcpu);
4742
4743 if (kvm_lapic_enabled(vcpu)) {
4744 update_cr8_intercept(vcpu);
4745 kvm_lapic_sync_to_vapic(vcpu);
4746 }
4747
4748 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5158 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4749 5159
4750 kvm_guest_enter(); 5160 kvm_guest_enter();
@@ -4770,6 +5180,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4770 if (hw_breakpoint_active()) 5180 if (hw_breakpoint_active())
4771 hw_breakpoint_restore(); 5181 hw_breakpoint_restore();
4772 5182
5183 kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
5184
4773 atomic_set(&vcpu->guest_mode, 0); 5185 atomic_set(&vcpu->guest_mode, 0);
4774 smp_wmb(); 5186 smp_wmb();
4775 local_irq_enable(); 5187 local_irq_enable();
@@ -4899,8 +5311,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4899 if (!irqchip_in_kernel(vcpu->kvm)) 5311 if (!irqchip_in_kernel(vcpu->kvm))
4900 kvm_set_cr8(vcpu, kvm_run->cr8); 5312 kvm_set_cr8(vcpu, kvm_run->cr8);
4901 5313
4902 if (vcpu->arch.pio.count || vcpu->mmio_needed || 5314 if (vcpu->arch.pio.count || vcpu->mmio_needed) {
4903 vcpu->arch.emulate_ctxt.restart) {
4904 if (vcpu->mmio_needed) { 5315 if (vcpu->mmio_needed) {
4905 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 5316 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4906 vcpu->mmio_read_completed = 1; 5317 vcpu->mmio_read_completed = 1;
@@ -4981,6 +5392,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4981 5392
4982 vcpu->arch.exception.pending = false; 5393 vcpu->arch.exception.pending = false;
4983 5394
5395 kvm_make_request(KVM_REQ_EVENT, vcpu);
5396
4984 return 0; 5397 return 0;
4985} 5398}
4986 5399
@@ -5044,6 +5457,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5044 struct kvm_mp_state *mp_state) 5457 struct kvm_mp_state *mp_state)
5045{ 5458{
5046 vcpu->arch.mp_state = mp_state->mp_state; 5459 vcpu->arch.mp_state = mp_state->mp_state;
5460 kvm_make_request(KVM_REQ_EVENT, vcpu);
5047 return 0; 5461 return 0;
5048} 5462}
5049 5463
@@ -5051,24 +5465,11 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5051 bool has_error_code, u32 error_code) 5465 bool has_error_code, u32 error_code)
5052{ 5466{
5053 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; 5467 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
5054 int cs_db, cs_l, ret; 5468 int ret;
5055 cache_all_regs(vcpu);
5056
5057 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5058 5469
5059 vcpu->arch.emulate_ctxt.vcpu = vcpu; 5470 init_emulate_ctxt(vcpu);
5060 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
5061 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
5062 vcpu->arch.emulate_ctxt.mode =
5063 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5064 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
5065 ? X86EMUL_MODE_VM86 : cs_l
5066 ? X86EMUL_MODE_PROT64 : cs_db
5067 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
5068 memset(c, 0, sizeof(struct decode_cache));
5069 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
5070 5471
5071 ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops, 5472 ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
5072 tss_selector, reason, has_error_code, 5473 tss_selector, reason, has_error_code,
5073 error_code); 5474 error_code);
5074 5475
@@ -5078,6 +5479,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5078 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 5479 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5079 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 5480 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5080 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 5481 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5482 kvm_make_request(KVM_REQ_EVENT, vcpu);
5081 return EMULATE_DONE; 5483 return EMULATE_DONE;
5082} 5484}
5083EXPORT_SYMBOL_GPL(kvm_task_switch); 5485EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5113,7 +5515,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5113 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5515 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5114 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5516 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5115 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5517 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5116 load_pdptrs(vcpu, vcpu->arch.cr3); 5518 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
5117 mmu_reset_needed = 1; 5519 mmu_reset_needed = 1;
5118 } 5520 }
5119 5521
@@ -5148,6 +5550,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5148 !is_protmode(vcpu)) 5550 !is_protmode(vcpu))
5149 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5551 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5150 5552
5553 kvm_make_request(KVM_REQ_EVENT, vcpu);
5554
5151 return 0; 5555 return 0;
5152} 5556}
5153 5557
@@ -5334,6 +5738,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5334struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 5738struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5335 unsigned int id) 5739 unsigned int id)
5336{ 5740{
5741 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
5742 printk_once(KERN_WARNING
5743 "kvm: SMP vm created on host with unstable TSC; "
5744 "guest TSC will not be reliable\n");
5337 return kvm_x86_ops->vcpu_create(kvm, id); 5745 return kvm_x86_ops->vcpu_create(kvm, id);
5338} 5746}
5339 5747
@@ -5376,22 +5784,22 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5376 vcpu->arch.dr6 = DR6_FIXED_1; 5784 vcpu->arch.dr6 = DR6_FIXED_1;
5377 vcpu->arch.dr7 = DR7_FIXED_1; 5785 vcpu->arch.dr7 = DR7_FIXED_1;
5378 5786
5787 kvm_make_request(KVM_REQ_EVENT, vcpu);
5788
5379 return kvm_x86_ops->vcpu_reset(vcpu); 5789 return kvm_x86_ops->vcpu_reset(vcpu);
5380} 5790}
5381 5791
5382int kvm_arch_hardware_enable(void *garbage) 5792int kvm_arch_hardware_enable(void *garbage)
5383{ 5793{
5384 /* 5794 struct kvm *kvm;
5385 * Since this may be called from a hotplug notifcation, 5795 struct kvm_vcpu *vcpu;
5386 * we can't get the CPU frequency directly. 5796 int i;
5387 */
5388 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5389 int cpu = raw_smp_processor_id();
5390 per_cpu(cpu_tsc_khz, cpu) = 0;
5391 }
5392 5797
5393 kvm_shared_msr_cpu_online(); 5798 kvm_shared_msr_cpu_online();
5394 5799 list_for_each_entry(kvm, &vm_list, vm_list)
5800 kvm_for_each_vcpu(i, vcpu, kvm)
5801 if (vcpu->cpu == smp_processor_id())
5802 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5395 return kvm_x86_ops->hardware_enable(garbage); 5803 return kvm_x86_ops->hardware_enable(garbage);
5396} 5804}
5397 5805
@@ -5425,7 +5833,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5425 BUG_ON(vcpu->kvm == NULL); 5833 BUG_ON(vcpu->kvm == NULL);
5426 kvm = vcpu->kvm; 5834 kvm = vcpu->kvm;
5427 5835
5836 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
5837 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5428 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 5838 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5839 vcpu->arch.mmu.translate_gpa = translate_gpa;
5840 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5429 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 5841 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5430 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5842 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5431 else 5843 else
@@ -5438,6 +5850,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5438 } 5850 }
5439 vcpu->arch.pio_data = page_address(page); 5851 vcpu->arch.pio_data = page_address(page);
5440 5852
5853 if (!kvm->arch.virtual_tsc_khz)
5854 kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
5855
5441 r = kvm_mmu_create(vcpu); 5856 r = kvm_mmu_create(vcpu);
5442 if (r < 0) 5857 if (r < 0)
5443 goto fail_free_pio_data; 5858 goto fail_free_pio_data;
@@ -5497,7 +5912,7 @@ struct kvm *kvm_arch_create_vm(void)
5497 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 5912 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5498 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 5913 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5499 5914
5500 rdtscll(kvm->arch.vm_init_tsc); 5915 spin_lock_init(&kvm->arch.tsc_write_lock);
5501 5916
5502 return kvm; 5917 return kvm;
5503} 5918}
@@ -5684,6 +6099,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5684 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 6099 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
5685 rflags |= X86_EFLAGS_TF; 6100 rflags |= X86_EFLAGS_TF;
5686 kvm_x86_ops->set_rflags(vcpu, rflags); 6101 kvm_x86_ops->set_rflags(vcpu, rflags);
6102 kvm_make_request(KVM_REQ_EVENT, vcpu);
5687} 6103}
5688EXPORT_SYMBOL_GPL(kvm_set_rflags); 6104EXPORT_SYMBOL_GPL(kvm_set_rflags);
5689 6105
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b7a404722d2b..2cea414489f3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -50,6 +50,11 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
50#endif 50#endif
51} 51}
52 52
53static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
54{
55 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
56}
57
53static inline int is_pae(struct kvm_vcpu *vcpu) 58static inline int is_pae(struct kvm_vcpu *vcpu)
54{ 59{
55 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 60 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
@@ -67,5 +72,8 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
67 72
68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 73void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 74void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
75int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);
76
77void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
70 78
71#endif 79#endif
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index b8528426ab1f..5b0c18c1cce1 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -4,7 +4,7 @@
4 * User space memory access functions 4 * User space memory access functions
5 * 5 *
6 * These routines provide basic accessing functions to the user memory 6 * These routines provide basic accessing functions to the user memory
7 * space for the kernel. This header file provides fuctions such as: 7 * space for the kernel. This header file provides functions such as:
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
diff --git a/block/blk-core.c b/block/blk-core.c
index f8548876d7ea..f0834e2f5727 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1661,7 +1661,7 @@ EXPORT_SYMBOL(submit_bio);
1661 * the insertion using this generic function. 1661 * the insertion using this generic function.
1662 * 1662 *
1663 * This function should also be useful for request stacking drivers 1663 * This function should also be useful for request stacking drivers
1664 * in some cases below, so export this fuction. 1664 * in some cases below, so export this function.
1665 * Request stacking drivers like request-based dm may change the queue 1665 * Request stacking drivers like request-based dm may change the queue
1666 * limits while requests are in the queue (e.g. dm's table swapping). 1666 * limits while requests are in the queue (e.g. dm's table swapping).
1667 * Such request stacking drivers should check those requests agaist 1667 * Such request stacking drivers should check those requests agaist
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e573077f1672..e4bac29a32e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,13 +23,12 @@ comment "Crypto core or helper"
23 23
24config CRYPTO_FIPS 24config CRYPTO_FIPS
25 bool "FIPS 200 compliance" 25 bool "FIPS 200 compliance"
26 depends on CRYPTO_ANSI_CPRNG 26 depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
27 help 27 help
28 This options enables the fips boot option which is 28 This options enables the fips boot option which is
29 required if you want to system to operate in a FIPS 200 29 required if you want to system to operate in a FIPS 200
30 certification. You should say no unless you know what 30 certification. You should say no unless you know what
31 this is. Note that CRYPTO_ANSI_CPRNG is required if this 31 this is.
32 option is selected
33 32
34config CRYPTO_ALGAPI 33config CRYPTO_ALGAPI
35 tristate 34 tristate
@@ -365,7 +364,7 @@ config CRYPTO_RMD128
365 RIPEMD-160 should be used. 364 RIPEMD-160 should be used.
366 365
367 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 366 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
368 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 367 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
369 368
370config CRYPTO_RMD160 369config CRYPTO_RMD160
371 tristate "RIPEMD-160 digest algorithm" 370 tristate "RIPEMD-160 digest algorithm"
@@ -382,7 +381,7 @@ config CRYPTO_RMD160
382 against RIPEMD-160. 381 against RIPEMD-160.
383 382
384 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 383 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
385 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 384 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
386 385
387config CRYPTO_RMD256 386config CRYPTO_RMD256
388 tristate "RIPEMD-256 digest algorithm" 387 tristate "RIPEMD-256 digest algorithm"
@@ -394,7 +393,7 @@ config CRYPTO_RMD256
394 (than RIPEMD-128). 393 (than RIPEMD-128).
395 394
396 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 395 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
397 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 396 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
398 397
399config CRYPTO_RMD320 398config CRYPTO_RMD320
400 tristate "RIPEMD-320 digest algorithm" 399 tristate "RIPEMD-320 digest algorithm"
@@ -406,7 +405,7 @@ config CRYPTO_RMD320
406 (than RIPEMD-160). 405 (than RIPEMD-160).
407 406
408 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 407 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
409 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 408 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
410 409
411config CRYPTO_SHA1 410config CRYPTO_SHA1
412 tristate "SHA1 digest algorithm" 411 tristate "SHA1 digest algorithm"
@@ -461,7 +460,7 @@ config CRYPTO_WP512
461 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard 460 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard
462 461
463 See also: 462 See also:
464 <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> 463 <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
465 464
466config CRYPTO_GHASH_CLMUL_NI_INTEL 465config CRYPTO_GHASH_CLMUL_NI_INTEL
467 tristate "GHASH digest algorithm (CLMUL-NI accelerated)" 466 tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
@@ -579,8 +578,8 @@ config CRYPTO_ANUBIS
579 in the NESSIE competition. 578 in the NESSIE competition.
580 579
581 See also: 580 See also:
582 <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/> 581 <https://www.cosic.esat.kuleuven.be/nessie/reports/>
583 <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html> 582 <http://www.larc.usp.br/~pbarreto/AnubisPage.html>
584 583
585config CRYPTO_ARC4 584config CRYPTO_ARC4
586 tristate "ARC4 cipher algorithm" 585 tristate "ARC4 cipher algorithm"
@@ -659,7 +658,7 @@ config CRYPTO_KHAZAD
659 on 32-bit processors. Khazad uses an 128 bit key size. 658 on 32-bit processors. Khazad uses an 128 bit key size.
660 659
661 See also: 660 See also:
662 <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html> 661 <http://www.larc.usp.br/~pbarreto/KhazadPage.html>
663 662
664config CRYPTO_SALSA20 663config CRYPTO_SALSA20
665 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" 664 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ef71318976c7..e46d21ae26bc 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -3,6 +3,13 @@
3 * 3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
6 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 14 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 15 * Software Foundation; either version 2 of the License, or (at your option)
@@ -12,6 +19,7 @@
12 19
13#include <crypto/algapi.h> 20#include <crypto/algapi.h>
14#include <crypto/internal/hash.h> 21#include <crypto/internal/hash.h>
22#include <crypto/internal/aead.h>
15#include <crypto/cryptd.h> 23#include <crypto/cryptd.h>
16#include <crypto/crypto_wq.h> 24#include <crypto/crypto_wq.h>
17#include <linux/err.h> 25#include <linux/err.h>
@@ -44,6 +52,11 @@ struct hashd_instance_ctx {
44 struct cryptd_queue *queue; 52 struct cryptd_queue *queue;
45}; 53};
46 54
55struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58};
59
47struct cryptd_blkcipher_ctx { 60struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child; 61 struct crypto_blkcipher *child;
49}; 62};
@@ -61,6 +74,14 @@ struct cryptd_hash_request_ctx {
61 struct shash_desc desc; 74 struct shash_desc desc;
62}; 75};
63 76
77struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83};
84
64static void cryptd_queue_worker(struct work_struct *work); 85static void cryptd_queue_worker(struct work_struct *work);
65 86
66static int cryptd_init_queue(struct cryptd_queue *queue, 87static int cryptd_init_queue(struct cryptd_queue *queue,
@@ -601,6 +622,144 @@ out_put_alg:
601 return err; 622 return err;
602} 623}
603 624
625static void cryptd_aead_crypt(struct aead_request *req,
626 struct crypto_aead *child,
627 int err,
628 int (*crypt)(struct aead_request *req))
629{
630 struct cryptd_aead_request_ctx *rctx;
631 rctx = aead_request_ctx(req);
632
633 if (unlikely(err == -EINPROGRESS))
634 goto out;
635 aead_request_set_tfm(req, child);
636 err = crypt( req );
637 req->base.complete = rctx->complete;
638out:
639 local_bh_disable();
640 rctx->complete(&req->base, err);
641 local_bh_enable();
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647 struct crypto_aead *child = ctx->child;
648 struct aead_request *req;
649
650 req = container_of(areq, struct aead_request, base);
651 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657 struct crypto_aead *child = ctx->child;
658 struct aead_request *req;
659
660 req = container_of(areq, struct aead_request, base);
661 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665 crypto_completion_t complete)
666{
667 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
670
671 rctx->complete = req->base.complete;
672 req->base.complete = complete;
673 return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689 struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692 struct crypto_aead *cipher;
693
694 cipher = crypto_spawn_aead(spawn);
695 if (IS_ERR(cipher))
696 return PTR_ERR(cipher);
697
698 crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699 ctx->child = cipher;
700 tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
701 return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707 crypto_free_aead(ctx->child);
708}
709
710static int cryptd_create_aead(struct crypto_template *tmpl,
711 struct rtattr **tb,
712 struct cryptd_queue *queue)
713{
714 struct aead_instance_ctx *ctx;
715 struct crypto_instance *inst;
716 struct crypto_alg *alg;
717 int err;
718
719 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720 CRYPTO_ALG_TYPE_MASK);
721 if (IS_ERR(alg))
722 return PTR_ERR(alg);
723
724 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725 err = PTR_ERR(inst);
726 if (IS_ERR(inst))
727 goto out_put_alg;
728
729 ctx = crypto_instance_ctx(inst);
730 ctx->queue = queue;
731
732 err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734 if (err)
735 goto out_free_inst;
736
737 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738 inst->alg.cra_type = alg->cra_type;
739 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740 inst->alg.cra_init = cryptd_aead_init_tfm;
741 inst->alg.cra_exit = cryptd_aead_exit_tfm;
742 inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
743 inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
745 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
746 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747 inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
748 inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
749 inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
750 inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
751
752 err = crypto_register_instance(tmpl, inst);
753 if (err) {
754 crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756 kfree(inst);
757 }
758out_put_alg:
759 crypto_mod_put(alg);
760 return err;
761}
762
604static struct cryptd_queue queue; 763static struct cryptd_queue queue;
605 764
606static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -616,6 +775,8 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
616 return cryptd_create_blkcipher(tmpl, tb, &queue); 775 return cryptd_create_blkcipher(tmpl, tb, &queue);
617 case CRYPTO_ALG_TYPE_DIGEST: 776 case CRYPTO_ALG_TYPE_DIGEST:
618 return cryptd_create_hash(tmpl, tb, &queue); 777 return cryptd_create_hash(tmpl, tb, &queue);
778 case CRYPTO_ALG_TYPE_AEAD:
779 return cryptd_create_aead(tmpl, tb, &queue);
619 } 780 }
620 781
621 return -EINVAL; 782 return -EINVAL;
@@ -625,16 +786,21 @@ static void cryptd_free(struct crypto_instance *inst)
625{ 786{
626 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 787 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
627 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 788 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
789 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
628 790
629 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 791 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
630 case CRYPTO_ALG_TYPE_AHASH: 792 case CRYPTO_ALG_TYPE_AHASH:
631 crypto_drop_shash(&hctx->spawn); 793 crypto_drop_shash(&hctx->spawn);
632 kfree(ahash_instance(inst)); 794 kfree(ahash_instance(inst));
633 return; 795 return;
796 case CRYPTO_ALG_TYPE_AEAD:
797 crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798 kfree(inst);
799 return;
800 default:
801 crypto_drop_spawn(&ctx->spawn);
802 kfree(inst);
634 } 803 }
635
636 crypto_drop_spawn(&ctx->spawn);
637 kfree(inst);
638} 804}
639 805
640static struct crypto_template cryptd_tmpl = { 806static struct crypto_template cryptd_tmpl = {
@@ -724,6 +890,40 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm)
724} 890}
725EXPORT_SYMBOL_GPL(cryptd_free_ahash); 891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
726 892
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894 u32 type, u32 mask)
895{
896 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
897 struct crypto_aead *tfm;
898
899 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901 return ERR_PTR(-EINVAL);
902 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903 if (IS_ERR(tfm))
904 return ERR_CAST(tfm);
905 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906 crypto_free_aead(tfm);
907 return ERR_PTR(-EINVAL);
908 }
909 return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915 struct cryptd_aead_ctx *ctx;
916 ctx = crypto_aead_ctx(&tfm->base);
917 return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923 crypto_free_aead(&tfm->base);
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
727static int __init cryptd_init(void) 927static int __init cryptd_init(void)
728{ 928{
729 int err; 929 int err;
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index ec2c777fcdb0..7aed5c792597 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1588,7 +1588,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1588 host->ports[0]->ioaddr.ctl_addr = (void *)res->start; 1588 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1589 1589
1590 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) { 1590 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1591 dev_err(&pdev->dev, "Requesting Peripherals faild\n"); 1591 dev_err(&pdev->dev, "Requesting Peripherals failed\n");
1592 return -EFAULT; 1592 return -EFAULT;
1593 } 1593 }
1594 1594
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index bf88f71a21f4..aa0e0c51cc08 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -15,8 +15,8 @@
15 * May be copied or modified under the terms of the GNU General Public License 15 * May be copied or modified under the terms of the GNU General Public License
16 * Based in part on the ITE vendor provided SCSI driver. 16 * Based in part on the ITE vendor provided SCSI driver.
17 * 17 *
18 * Documentation available from 18 * Documentation available from IT8212F_V04.pdf
19 * http://www.ite.com.tw/pc/IT8212F_V04.pdf 19 * http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91
20 * Some other documents are NDA. 20 * Some other documents are NDA.
21 * 21 *
22 * The ITE8212 isn't exactly a standard IDE controller. It has two 22 * The ITE8212 isn't exactly a standard IDE controller. It has two
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index be7461c9a87e..31c60101a69a 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -301,7 +301,7 @@ config ATM_IA
301 control memory (128K-1KVC, 512K-4KVC), the size of the packet 301 control memory (128K-1KVC, 512K-4KVC), the size of the packet
302 memory (128K, 512K, 1M), and the PHY type (Single/Multi mode OC3, 302 memory (128K, 512K, 1M), and the PHY type (Single/Multi mode OC3,
303 UTP155, UTP25, DS3 and E3). Go to: 303 UTP155, UTP25, DS3 and E3). Go to:
304 <http://www.iphase.com/products/ClassSheet.cfm?ClassID=ATM> 304 <http://www.iphase.com/>
305 for more info about the cards. Say Y (or M to compile as a module 305 for more info about the cards. Say Y (or M to compile as a module
306 named iphase) here if you have one of these cards. 306 named iphase) here if you have one of these cards.
307 307
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2cb49a93b1e6..6ed645411c40 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -233,7 +233,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
233 __func__, retval); 233 __func__, retval);
234 } 234 }
235 235
236 /* have the device type specific fuction add its stuff */ 236 /* have the device type specific function add its stuff */
237 if (dev->type && dev->type->uevent) { 237 if (dev->type && dev->type->uevent) {
238 retval = dev->type->uevent(dev, env); 238 retval = dev->type->uevent(dev, env);
239 if (retval) 239 if (retval)
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 5ddf67e76f8b..fcd867d923ba 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -34,7 +34,7 @@ config AGP_ALI
34 X on the following ALi chipsets. The supported chipsets 34 X on the following ALi chipsets. The supported chipsets
35 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. 35 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
36 For the ALi-chipset question, ALi suggests you refer to 36 For the ALi-chipset question, ALi suggests you refer to
37 <http://www.ali.com.tw/eng/support/index.shtml>. 37 <http://www.ali.com.tw/>.
38 38
39 The M1541 chipset can do AGP 1x and 2x, but note that there is an 39 The M1541 chipset can do AGP 1x and 2x, but note that there is an
40 acknowledged incompatibility with Matrox G200 cards. Due to 40 acknowledged incompatibility with Matrox G200 cards. Due to
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e763d3312ce7..75b763cb3ea1 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of 2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
3 * the "Intel 460GTX Chipset Software Developer's Manual": 3 * the "Intel 460GTX Chipset Software Developer's Manual":
4 * http://developer.intel.com/design/itanium/downloads/24870401s.htm 4 * http://www.intel.com/design/archives/itanium/downloads/248704.htm
5 */ 5 */
6/* 6/*
7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com> 7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 3022801669b1..45b987c9889e 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -7,8 +7,8 @@
7 * Intel Corporation, Microsoft Corporation. Advanced Power Management 7 * Intel Corporation, Microsoft Corporation. Advanced Power Management
8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996. 8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
9 * 9 *
10 * [This document is available from Microsoft at: 10 * This document is available from Microsoft at:
11 * http://www.microsoft.com/hwdev/busbios/amp_12.htm] 11 * http://www.microsoft.com/whdc/archive/amp_12.mspx
12 */ 12 */
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/poll.h> 14#include <linux/poll.h>
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 7b98c067190a..3ed20e8abc0d 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -2,7 +2,7 @@
2 * ipmi_bt_sm.c 2 * ipmi_bt_sm.c
3 * 3 *
4 * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part 4 * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
5 * of the driver architecture at http://sourceforge.net/project/openipmi 5 * of the driver architecture at http://sourceforge.net/projects/openipmi
6 * 6 *
7 * Author: Rocky Craig <first.last@hp.com> 7 * Author: Rocky Craig <first.last@hp.com>
8 * 8 *
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7bd7c45b53ef..e537610d2f09 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1974,8 +1974,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1974 1974
1975/* 1975/*
1976 * Defined at 1976 * Defined at
1977 * http://h21007.www2.hp.com/portal/download/files 1977 * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
1978 * /unprot/hpspmi.pdf
1979 */ 1978 */
1980struct SPMITable { 1979struct SPMITable {
1981 s8 Signature[4]; 1980 s8 Signature[4];
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index a98290d7a2c5..88dda0c45ee0 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -4,7 +4,6 @@
4 * Copyright by 4 * Copyright by
5 * Philips Automation Projects 5 * Philips Automation Projects
6 * Kassel (Germany) 6 * Kassel (Germany)
7 * http://www.pap-philips.de
8 * ----------------------------------------------------------- 7 * -----------------------------------------------------------
9 * This software may be used and distributed according to the terms of 8 * This software may be used and distributed according to the terms of
10 * the GNU General Public License, incorporated herein by reference. 9 * the GNU General Public License, incorporated herein by reference.
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index ffa0efce0aed..6614416a8623 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -28,7 +28,7 @@ config CARDMAN_4000
28 28
29 This kernel driver requires additional userspace support, either 29 This kernel driver requires additional userspace support, either
30 by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/), 30 by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/),
31 or via the cm4000 backend of OpenCT (http://www.opensc.com/). 31 or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc).
32 32
33config CARDMAN_4040 33config CARDMAN_4040
34 tristate "Omnikey CardMan 4040 support" 34 tristate "Omnikey CardMan 4040 support"
@@ -41,7 +41,7 @@ config CARDMAN_4040
41 in I/O space. To use the kernel driver, you will need either the 41 in I/O space. To use the kernel driver, you will need either the
42 PC/SC ifdhandler provided from the Omnikey homepage 42 PC/SC ifdhandler provided from the Omnikey homepage
43 (http://www.omnikey.com/), or a current development version of OpenCT 43 (http://www.omnikey.com/), or a current development version of OpenCT
44 (http://www.opensc.org/). 44 (http://www.opensc-project.org/opensc).
45 45
46config IPWIRELESS 46config IPWIRELESS
47 tristate "IPWireless 3G UMTS PCMCIA card support" 47 tristate "IPWireless 3G UMTS PCMCIA card support"
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 6835c23e9a51..d962f25dcc2a 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1666,7 +1666,7 @@ static int cmm_open(struct inode *inode, struct file *filp)
1666 /* opening will always block since the 1666 /* opening will always block since the
1667 * monitor will be started by open, which 1667 * monitor will be started by open, which
1668 * means we have to wait for ATR becoming 1668 * means we have to wait for ATR becoming
1669 * vaild = block until valid (or card 1669 * valid = block until valid (or card
1670 * inserted) 1670 * inserted)
1671 */ 1671 */
1672 if (filp->f_flags & O_NONBLOCK) { 1672 if (filp->f_flags & O_NONBLOCK) {
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8ef16490810c..4bef6ab83622 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -3181,7 +3181,7 @@ static void stl_cd1400flush(struct stlport *portp)
3181 3181
3182/* 3182/*
3183 * Return the current state of data flow on this port. This is only 3183 * Return the current state of data flow on this port. This is only
3184 * really interresting when determining if data has fully completed 3184 * really interesting when determining if data has fully completed
3185 * transmission or not... This is easy for the cd1400, it accurately 3185 * transmission or not... This is easy for the cd1400, it accurately
3186 * maintains the busy port flag. 3186 * maintains the busy port flag.
3187 */ 3187 */
@@ -4131,7 +4131,7 @@ static void stl_sc26198flush(struct stlport *portp)
4131 4131
4132/* 4132/*
4133 * Return the current state of data flow on this port. This is only 4133 * Return the current state of data flow on this port. This is only
4134 * really interresting when determining if data has fully completed 4134 * really interesting when determining if data has fully completed
4135 * transmission or not... The sc26198 interrupt scheme cannot 4135 * transmission or not... The sc26198 interrupt scheme cannot
4136 * determine when all data has actually drained, so we need to 4136 * determine when all data has actually drained, so we need to
4137 * check the port statusy register to be sure. 4137 * check the port statusy register to be sure.
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4dc338f3d1aa..f6595aba4f0f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -58,6 +58,6 @@ config TCG_INFINEON
58 To compile this driver as a module, choose M here; the module 58 To compile this driver as a module, choose M here; the module
59 will be called tpm_infineon. 59 will be called tpm_infineon.
60 Further information on this driver and the supported hardware 60 Further information on this driver and the supported hardware
61 can be found at http://www.prosec.rub.de/tpm 61 can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
62 62
63endif # TCG_TPM 63endif # TCG_TPM
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index f58440791e65..76da32e11f18 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com> 7 * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
8 * Sirrix AG - security technologies, http://www.sirrix.com and 8 * Sirrix AG - security technologies, http://www.sirrix.com and
9 * Applied Data Security Group, Ruhr-University Bochum, Germany 9 * Applied Data Security Group, Ruhr-University Bochum, Germany
10 * Project-Homepage: http://www.prosec.rub.de/tpm 10 * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as 13 * modify it under the terms of the GNU General Public License as
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ea0b3863ad0f..eab2cf7a0269 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA
172 172
173config CRYPTO_DEV_NIAGARA2 173config CRYPTO_DEV_NIAGARA2
174 tristate "Niagara2 Stream Processing Unit driver" 174 tristate "Niagara2 Stream Processing Unit driver"
175 select CRYPTO_DES
175 select CRYPTO_ALGAPI 176 select CRYPTO_ALGAPI
176 depends on SPARC64 177 depends on SPARC64
177 help 178 help
@@ -243,4 +244,12 @@ config CRYPTO_DEV_OMAP_SHAM
243 OMAP processors have SHA1/MD5 hw accelerator. Select this if you 244 OMAP processors have SHA1/MD5 hw accelerator. Select this if you
244 want to use the OMAP module for SHA1/MD5 algorithms. 245 want to use the OMAP module for SHA1/MD5 algorithms.
245 246
247config CRYPTO_DEV_OMAP_AES
248 tristate "Support for OMAP AES hw engine"
249 depends on ARCH_OMAP2 || ARCH_OMAP3
250 select CRYPTO_AES
251 help
252 OMAP processors have AES module accelerator. Select this if you
253 want to use the OMAP module for AES algorithms.
254
246endif # CRYPTO_HW 255endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6dbbe00c4524..256697330a41 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,11 +2,12 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o 4obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
5n2_crypto-objs := n2_core.o n2_asm.o 5n2_crypto-y := n2_core.o n2_asm.o
6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
12 13
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index aa376e8d5ed5..5c0c62b65d69 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o 1obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
2crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o 2crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e449ac5627a5..0eac3da566ba 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2700,8 +2700,7 @@ static void __devexit hifn_remove(struct pci_dev *pdev)
2700 dev = pci_get_drvdata(pdev); 2700 dev = pci_get_drvdata(pdev);
2701 2701
2702 if (dev) { 2702 if (dev) {
2703 cancel_delayed_work(&dev->work); 2703 cancel_delayed_work_sync(&dev->work);
2704 flush_scheduled_work();
2705 2704
2706 hifn_unregister_rng(dev); 2705 hifn_unregister_rng(dev);
2707 hifn_unregister_alg(dev); 2706 hifn_unregister_alg(dev);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
new file mode 100644
index 000000000000..799ca517c121
--- /dev/null
+++ b/drivers/crypto/omap-aes.c
@@ -0,0 +1,948 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/crypto.h>
28#include <linux/interrupt.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/aes.h>
31
32#include <plat/cpu.h>
33#include <plat/dma.h>
34
35/* OMAP TRM gives bitfields as start:end, where start is the higher bit
36 number. For example 7:0 */
37#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
38#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
39
40#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
41#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
42
43#define AES_REG_CTRL 0x30
44#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
45#define AES_REG_CTRL_CTR (1 << 6)
46#define AES_REG_CTRL_CBC (1 << 5)
47#define AES_REG_CTRL_KEY_SIZE (3 << 3)
48#define AES_REG_CTRL_DIRECTION (1 << 2)
49#define AES_REG_CTRL_INPUT_READY (1 << 1)
50#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
51
52#define AES_REG_DATA 0x34
53#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
54
55#define AES_REG_REV 0x44
56#define AES_REG_REV_MAJOR 0xF0
57#define AES_REG_REV_MINOR 0x0F
58
59#define AES_REG_MASK 0x48
60#define AES_REG_MASK_SIDLE (1 << 6)
61#define AES_REG_MASK_START (1 << 5)
62#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
63#define AES_REG_MASK_DMA_IN_EN (1 << 2)
64#define AES_REG_MASK_SOFTRESET (1 << 1)
65#define AES_REG_AUTOIDLE (1 << 0)
66
67#define AES_REG_SYSSTATUS 0x4C
68#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
69
70#define DEFAULT_TIMEOUT (5*HZ)
71
72#define FLAGS_MODE_MASK 0x000f
73#define FLAGS_ENCRYPT BIT(0)
74#define FLAGS_CBC BIT(1)
75#define FLAGS_GIV BIT(2)
76
77#define FLAGS_NEW_KEY BIT(4)
78#define FLAGS_NEW_IV BIT(5)
79#define FLAGS_INIT BIT(6)
80#define FLAGS_FAST BIT(7)
81#define FLAGS_BUSY 8
82
83struct omap_aes_ctx {
84 struct omap_aes_dev *dd;
85
86 int keylen;
87 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
88 unsigned long flags;
89};
90
91struct omap_aes_reqctx {
92 unsigned long mode;
93};
94
95#define OMAP_AES_QUEUE_LENGTH 1
96#define OMAP_AES_CACHE_SIZE 0
97
98struct omap_aes_dev {
99 struct list_head list;
100 unsigned long phys_base;
101 void __iomem *io_base;
102 struct clk *iclk;
103 struct omap_aes_ctx *ctx;
104 struct device *dev;
105 unsigned long flags;
106
107 u32 *iv;
108 u32 ctrl;
109
110 spinlock_t lock;
111 struct crypto_queue queue;
112
113 struct tasklet_struct task;
114
115 struct ablkcipher_request *req;
116 size_t total;
117 struct scatterlist *in_sg;
118 size_t in_offset;
119 struct scatterlist *out_sg;
120 size_t out_offset;
121
122 size_t buflen;
123 void *buf_in;
124 size_t dma_size;
125 int dma_in;
126 int dma_lch_in;
127 dma_addr_t dma_addr_in;
128 void *buf_out;
129 int dma_out;
130 int dma_lch_out;
131 dma_addr_t dma_addr_out;
132};
133
134/* keep registered devices data here */
135static LIST_HEAD(dev_list);
136static DEFINE_SPINLOCK(list_lock);
137
138static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
139{
140 return __raw_readl(dd->io_base + offset);
141}
142
143static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
144 u32 value)
145{
146 __raw_writel(value, dd->io_base + offset);
147}
148
149static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
150 u32 value, u32 mask)
151{
152 u32 val;
153
154 val = omap_aes_read(dd, offset);
155 val &= ~mask;
156 val |= value;
157 omap_aes_write(dd, offset, val);
158}
159
160static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
161 u32 *value, int count)
162{
163 for (; count--; value++, offset += 4)
164 omap_aes_write(dd, offset, *value);
165}
166
167static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
168{
169 unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
170
171 while (!(omap_aes_read(dd, offset) & bit)) {
172 if (time_is_before_jiffies(timeout)) {
173 dev_err(dd->dev, "omap-aes timeout\n");
174 return -ETIMEDOUT;
175 }
176 }
177 return 0;
178}
179
180static int omap_aes_hw_init(struct omap_aes_dev *dd)
181{
182 int err = 0;
183
184 clk_enable(dd->iclk);
185 if (!(dd->flags & FLAGS_INIT)) {
186 /* is it necessary to reset before every operation? */
187 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
188 AES_REG_MASK_SOFTRESET);
189 /*
190 * prevent OCP bus error (SRESP) in case an access to the module
191 * is performed while the module is coming out of soft reset
192 */
193 __asm__ __volatile__("nop");
194 __asm__ __volatile__("nop");
195
196 err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
197 AES_REG_SYSSTATUS_RESETDONE);
198 if (!err)
199 dd->flags |= FLAGS_INIT;
200 }
201
202 return err;
203}
204
205static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
206{
207 clk_disable(dd->iclk);
208}
209
210static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
211{
212 unsigned int key32;
213 int i;
214 u32 val, mask;
215
216 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
217 if (dd->flags & FLAGS_CBC)
218 val |= AES_REG_CTRL_CBC;
219 if (dd->flags & FLAGS_ENCRYPT)
220 val |= AES_REG_CTRL_DIRECTION;
221
222 if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
223 !(dd->ctx->flags & FLAGS_NEW_KEY))
224 goto out;
225
226 /* only need to write control registers for new settings */
227
228 dd->ctrl = val;
229
230 val = 0;
231 if (dd->dma_lch_out >= 0)
232 val |= AES_REG_MASK_DMA_OUT_EN;
233 if (dd->dma_lch_in >= 0)
234 val |= AES_REG_MASK_DMA_IN_EN;
235
236 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
237
238 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
239
240 pr_debug("Set key\n");
241 key32 = dd->ctx->keylen / sizeof(u32);
242 /* set a key */
243 for (i = 0; i < key32; i++) {
244 omap_aes_write(dd, AES_REG_KEY(i),
245 __le32_to_cpu(dd->ctx->key[i]));
246 }
247 dd->ctx->flags &= ~FLAGS_NEW_KEY;
248
249 if (dd->flags & FLAGS_NEW_IV) {
250 pr_debug("Set IV\n");
251 omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
252 dd->flags &= ~FLAGS_NEW_IV;
253 }
254
255 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
256 AES_REG_CTRL_KEY_SIZE;
257
258 omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
259
260out:
261 /* start DMA or disable idle mode */
262 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
263 AES_REG_MASK_START);
264}
265
266static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
267{
268 struct omap_aes_dev *dd = NULL, *tmp;
269
270 spin_lock_bh(&list_lock);
271 if (!ctx->dd) {
272 list_for_each_entry(tmp, &dev_list, list) {
273 /* FIXME: take fist available aes core */
274 dd = tmp;
275 break;
276 }
277 ctx->dd = dd;
278 } else {
279 /* already found before */
280 dd = ctx->dd;
281 }
282 spin_unlock_bh(&list_lock);
283
284 return dd;
285}
286
287static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
288{
289 struct omap_aes_dev *dd = data;
290
291 if (lch == dd->dma_lch_out)
292 tasklet_schedule(&dd->task);
293}
294
295static int omap_aes_dma_init(struct omap_aes_dev *dd)
296{
297 int err = -ENOMEM;
298
299 dd->dma_lch_out = -1;
300 dd->dma_lch_in = -1;
301
302 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
303 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
304 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
305 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
306
307 if (!dd->buf_in || !dd->buf_out) {
308 dev_err(dd->dev, "unable to alloc pages.\n");
309 goto err_alloc;
310 }
311
312 /* MAP here */
313 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
314 DMA_TO_DEVICE);
315 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
316 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
317 err = -EINVAL;
318 goto err_map_in;
319 }
320
321 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
322 DMA_FROM_DEVICE);
323 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
324 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
325 err = -EINVAL;
326 goto err_map_out;
327 }
328
329 err = omap_request_dma(dd->dma_in, "omap-aes-rx",
330 omap_aes_dma_callback, dd, &dd->dma_lch_in);
331 if (err) {
332 dev_err(dd->dev, "Unable to request DMA channel\n");
333 goto err_dma_in;
334 }
335 err = omap_request_dma(dd->dma_out, "omap-aes-tx",
336 omap_aes_dma_callback, dd, &dd->dma_lch_out);
337 if (err) {
338 dev_err(dd->dev, "Unable to request DMA channel\n");
339 goto err_dma_out;
340 }
341
342 omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
343 dd->phys_base + AES_REG_DATA, 0, 4);
344
345 omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
346 omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
347
348 omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
349 dd->phys_base + AES_REG_DATA, 0, 4);
350
351 omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
352 omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
353
354 return 0;
355
356err_dma_out:
357 omap_free_dma(dd->dma_lch_in);
358err_dma_in:
359 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
360 DMA_FROM_DEVICE);
361err_map_out:
362 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
363err_map_in:
364 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
365 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
366err_alloc:
367 if (err)
368 pr_err("error: %d\n", err);
369 return err;
370}
371
372static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
373{
374 omap_free_dma(dd->dma_lch_out);
375 omap_free_dma(dd->dma_lch_in);
376 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
377 DMA_FROM_DEVICE);
378 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
379 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
380 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
381}
382
383static void sg_copy_buf(void *buf, struct scatterlist *sg,
384 unsigned int start, unsigned int nbytes, int out)
385{
386 struct scatter_walk walk;
387
388 if (!nbytes)
389 return;
390
391 scatterwalk_start(&walk, sg);
392 scatterwalk_advance(&walk, start);
393 scatterwalk_copychunks(buf, &walk, nbytes, out);
394 scatterwalk_done(&walk, out, 0);
395}
396
397static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
398 size_t buflen, size_t total, int out)
399{
400 unsigned int count, off = 0;
401
402 while (buflen && total) {
403 count = min((*sg)->length - *offset, total);
404 count = min(count, buflen);
405
406 if (!count)
407 return off;
408
409 sg_copy_buf(buf + off, *sg, *offset, count, out);
410
411 off += count;
412 buflen -= count;
413 *offset += count;
414 total -= count;
415
416 if (*offset == (*sg)->length) {
417 *sg = sg_next(*sg);
418 if (*sg)
419 *offset = 0;
420 else
421 total = 0;
422 }
423 }
424
425 return off;
426}
427
428static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
429 dma_addr_t dma_addr_out, int length)
430{
431 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
432 struct omap_aes_dev *dd = ctx->dd;
433 int len32;
434
435 pr_debug("len: %d\n", length);
436
437 dd->dma_size = length;
438
439 if (!(dd->flags & FLAGS_FAST))
440 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
441 DMA_TO_DEVICE);
442
443 len32 = DIV_ROUND_UP(length, sizeof(u32));
444
445 /* IN */
446 omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
447 len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
448 OMAP_DMA_DST_SYNC);
449
450 omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
451 dma_addr_in, 0, 0);
452
453 /* OUT */
454 omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
455 len32, 1, OMAP_DMA_SYNC_PACKET,
456 dd->dma_out, OMAP_DMA_SRC_SYNC);
457
458 omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
459 dma_addr_out, 0, 0);
460
461 omap_start_dma(dd->dma_lch_in);
462 omap_start_dma(dd->dma_lch_out);
463
464 omap_aes_write_ctrl(dd);
465
466 return 0;
467}
468
469static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
470{
471 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
472 crypto_ablkcipher_reqtfm(dd->req));
473 int err, fast = 0, in, out;
474 size_t count;
475 dma_addr_t addr_in, addr_out;
476
477 pr_debug("total: %d\n", dd->total);
478
479 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
480 /* check for alignment */
481 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
482 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
483
484 fast = in && out;
485 }
486
487 if (fast) {
488 count = min(dd->total, sg_dma_len(dd->in_sg));
489 count = min(count, sg_dma_len(dd->out_sg));
490
491 if (count != dd->total)
492 return -EINVAL;
493
494 pr_debug("fast\n");
495
496 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
497 if (!err) {
498 dev_err(dd->dev, "dma_map_sg() error\n");
499 return -EINVAL;
500 }
501
502 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
503 if (!err) {
504 dev_err(dd->dev, "dma_map_sg() error\n");
505 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
506 return -EINVAL;
507 }
508
509 addr_in = sg_dma_address(dd->in_sg);
510 addr_out = sg_dma_address(dd->out_sg);
511
512 dd->flags |= FLAGS_FAST;
513
514 } else {
515 /* use cache buffers */
516 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
517 dd->buflen, dd->total, 0);
518
519 addr_in = dd->dma_addr_in;
520 addr_out = dd->dma_addr_out;
521
522 dd->flags &= ~FLAGS_FAST;
523
524 }
525
526 dd->total -= count;
527
528 err = omap_aes_hw_init(dd);
529
530 err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
531
532 return err;
533}
534
535static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
536{
537 struct omap_aes_ctx *ctx;
538
539 pr_debug("err: %d\n", err);
540
541 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
542
543 if (!dd->total)
544 dd->req->base.complete(&dd->req->base, err);
545}
546
547static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
548{
549 int err = 0;
550 size_t count;
551
552 pr_debug("total: %d\n", dd->total);
553
554 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
555
556 omap_aes_hw_cleanup(dd);
557
558 omap_stop_dma(dd->dma_lch_in);
559 omap_stop_dma(dd->dma_lch_out);
560
561 if (dd->flags & FLAGS_FAST) {
562 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
563 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
564 } else {
565 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
566 dd->dma_size, DMA_FROM_DEVICE);
567
568 /* copy data */
569 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
570 dd->buflen, dd->dma_size, 1);
571 if (count != dd->dma_size) {
572 err = -EINVAL;
573 pr_err("not all data converted: %u\n", count);
574 }
575 }
576
577 if (err || !dd->total)
578 omap_aes_finish_req(dd, err);
579
580 return err;
581}
582
583static int omap_aes_handle_req(struct omap_aes_dev *dd)
584{
585 struct crypto_async_request *async_req, *backlog;
586 struct omap_aes_ctx *ctx;
587 struct omap_aes_reqctx *rctx;
588 struct ablkcipher_request *req;
589 unsigned long flags;
590
591 if (dd->total)
592 goto start;
593
594 spin_lock_irqsave(&dd->lock, flags);
595 backlog = crypto_get_backlog(&dd->queue);
596 async_req = crypto_dequeue_request(&dd->queue);
597 if (!async_req)
598 clear_bit(FLAGS_BUSY, &dd->flags);
599 spin_unlock_irqrestore(&dd->lock, flags);
600
601 if (!async_req)
602 return 0;
603
604 if (backlog)
605 backlog->complete(backlog, -EINPROGRESS);
606
607 req = ablkcipher_request_cast(async_req);
608
609 pr_debug("get new req\n");
610
611 /* assign new request to device */
612 dd->req = req;
613 dd->total = req->nbytes;
614 dd->in_offset = 0;
615 dd->in_sg = req->src;
616 dd->out_offset = 0;
617 dd->out_sg = req->dst;
618
619 rctx = ablkcipher_request_ctx(req);
620 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
621 rctx->mode &= FLAGS_MODE_MASK;
622 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
623
624 dd->iv = req->info;
625 if ((dd->flags & FLAGS_CBC) && dd->iv)
626 dd->flags |= FLAGS_NEW_IV;
627 else
628 dd->flags &= ~FLAGS_NEW_IV;
629
630 ctx->dd = dd;
631 if (dd->ctx != ctx) {
632 /* assign new context to device */
633 dd->ctx = ctx;
634 ctx->flags |= FLAGS_NEW_KEY;
635 }
636
637 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
638 pr_err("request size is not exact amount of AES blocks\n");
639
640start:
641 return omap_aes_crypt_dma_start(dd);
642}
643
644static void omap_aes_task(unsigned long data)
645{
646 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
647 int err;
648
649 pr_debug("enter\n");
650
651 err = omap_aes_crypt_dma_stop(dd);
652
653 err = omap_aes_handle_req(dd);
654
655 pr_debug("exit\n");
656}
657
658static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
659{
660 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
661 crypto_ablkcipher_reqtfm(req));
662 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct omap_aes_dev *dd;
664 unsigned long flags;
665 int err;
666
667 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
668 !!(mode & FLAGS_ENCRYPT),
669 !!(mode & FLAGS_CBC));
670
671 dd = omap_aes_find_dev(ctx);
672 if (!dd)
673 return -ENODEV;
674
675 rctx->mode = mode;
676
677 spin_lock_irqsave(&dd->lock, flags);
678 err = ablkcipher_enqueue_request(&dd->queue, req);
679 spin_unlock_irqrestore(&dd->lock, flags);
680
681 if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
682 omap_aes_handle_req(dd);
683
684 pr_debug("exit\n");
685
686 return err;
687}
688
689/* ********************** ALG API ************************************ */
690
691static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
692 unsigned int keylen)
693{
694 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
695
696 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
697 keylen != AES_KEYSIZE_256)
698 return -EINVAL;
699
700 pr_debug("enter, keylen: %d\n", keylen);
701
702 memcpy(ctx->key, key, keylen);
703 ctx->keylen = keylen;
704 ctx->flags |= FLAGS_NEW_KEY;
705
706 return 0;
707}
708
709static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
710{
711 return omap_aes_crypt(req, FLAGS_ENCRYPT);
712}
713
714static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
715{
716 return omap_aes_crypt(req, 0);
717}
718
719static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
720{
721 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
722}
723
724static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
725{
726 return omap_aes_crypt(req, FLAGS_CBC);
727}
728
729static int omap_aes_cra_init(struct crypto_tfm *tfm)
730{
731 pr_debug("enter\n");
732
733 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
734
735 return 0;
736}
737
738static void omap_aes_cra_exit(struct crypto_tfm *tfm)
739{
740 pr_debug("enter\n");
741}
742
743/* ********************** ALGS ************************************ */
744
745static struct crypto_alg algs[] = {
746{
747 .cra_name = "ecb(aes)",
748 .cra_driver_name = "ecb-aes-omap",
749 .cra_priority = 100,
750 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
751 .cra_blocksize = AES_BLOCK_SIZE,
752 .cra_ctxsize = sizeof(struct omap_aes_ctx),
753 .cra_alignmask = 0,
754 .cra_type = &crypto_ablkcipher_type,
755 .cra_module = THIS_MODULE,
756 .cra_init = omap_aes_cra_init,
757 .cra_exit = omap_aes_cra_exit,
758 .cra_u.ablkcipher = {
759 .min_keysize = AES_MIN_KEY_SIZE,
760 .max_keysize = AES_MAX_KEY_SIZE,
761 .setkey = omap_aes_setkey,
762 .encrypt = omap_aes_ecb_encrypt,
763 .decrypt = omap_aes_ecb_decrypt,
764 }
765},
766{
767 .cra_name = "cbc(aes)",
768 .cra_driver_name = "cbc-aes-omap",
769 .cra_priority = 100,
770 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
771 .cra_blocksize = AES_BLOCK_SIZE,
772 .cra_ctxsize = sizeof(struct omap_aes_ctx),
773 .cra_alignmask = 0,
774 .cra_type = &crypto_ablkcipher_type,
775 .cra_module = THIS_MODULE,
776 .cra_init = omap_aes_cra_init,
777 .cra_exit = omap_aes_cra_exit,
778 .cra_u.ablkcipher = {
779 .min_keysize = AES_MIN_KEY_SIZE,
780 .max_keysize = AES_MAX_KEY_SIZE,
781 .ivsize = AES_BLOCK_SIZE,
782 .setkey = omap_aes_setkey,
783 .encrypt = omap_aes_cbc_encrypt,
784 .decrypt = omap_aes_cbc_decrypt,
785 }
786}
787};
788
789static int omap_aes_probe(struct platform_device *pdev)
790{
791 struct device *dev = &pdev->dev;
792 struct omap_aes_dev *dd;
793 struct resource *res;
794 int err = -ENOMEM, i, j;
795 u32 reg;
796
797 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
798 if (dd == NULL) {
799 dev_err(dev, "unable to alloc data struct.\n");
800 goto err_data;
801 }
802 dd->dev = dev;
803 platform_set_drvdata(pdev, dd);
804
805 spin_lock_init(&dd->lock);
806 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
807
808 /* Get the base address */
809 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
810 if (!res) {
811 dev_err(dev, "invalid resource type\n");
812 err = -ENODEV;
813 goto err_res;
814 }
815 dd->phys_base = res->start;
816
817 /* Get the DMA */
818 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
819 if (!res)
820 dev_info(dev, "no DMA info\n");
821 else
822 dd->dma_out = res->start;
823
824 /* Get the DMA */
825 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
826 if (!res)
827 dev_info(dev, "no DMA info\n");
828 else
829 dd->dma_in = res->start;
830
831 /* Initializing the clock */
832 dd->iclk = clk_get(dev, "ick");
833 if (!dd->iclk) {
834 dev_err(dev, "clock intialization failed.\n");
835 err = -ENODEV;
836 goto err_res;
837 }
838
839 dd->io_base = ioremap(dd->phys_base, SZ_4K);
840 if (!dd->io_base) {
841 dev_err(dev, "can't ioremap\n");
842 err = -ENOMEM;
843 goto err_io;
844 }
845
846 clk_enable(dd->iclk);
847 reg = omap_aes_read(dd, AES_REG_REV);
848 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
849 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
850 clk_disable(dd->iclk);
851
852 tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
853
854 err = omap_aes_dma_init(dd);
855 if (err)
856 goto err_dma;
857
858 INIT_LIST_HEAD(&dd->list);
859 spin_lock(&list_lock);
860 list_add_tail(&dd->list, &dev_list);
861 spin_unlock(&list_lock);
862
863 for (i = 0; i < ARRAY_SIZE(algs); i++) {
864 pr_debug("i: %d\n", i);
865 INIT_LIST_HEAD(&algs[i].cra_list);
866 err = crypto_register_alg(&algs[i]);
867 if (err)
868 goto err_algs;
869 }
870
871 pr_info("probe() done\n");
872
873 return 0;
874err_algs:
875 for (j = 0; j < i; j++)
876 crypto_unregister_alg(&algs[j]);
877 omap_aes_dma_cleanup(dd);
878err_dma:
879 tasklet_kill(&dd->task);
880 iounmap(dd->io_base);
881err_io:
882 clk_put(dd->iclk);
883err_res:
884 kfree(dd);
885 dd = NULL;
886err_data:
887 dev_err(dev, "initialization failed.\n");
888 return err;
889}
890
891static int omap_aes_remove(struct platform_device *pdev)
892{
893 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
894 int i;
895
896 if (!dd)
897 return -ENODEV;
898
899 spin_lock(&list_lock);
900 list_del(&dd->list);
901 spin_unlock(&list_lock);
902
903 for (i = 0; i < ARRAY_SIZE(algs); i++)
904 crypto_unregister_alg(&algs[i]);
905
906 tasklet_kill(&dd->task);
907 omap_aes_dma_cleanup(dd);
908 iounmap(dd->io_base);
909 clk_put(dd->iclk);
910 kfree(dd);
911 dd = NULL;
912
913 return 0;
914}
915
916static struct platform_driver omap_aes_driver = {
917 .probe = omap_aes_probe,
918 .remove = omap_aes_remove,
919 .driver = {
920 .name = "omap-aes",
921 .owner = THIS_MODULE,
922 },
923};
924
925static int __init omap_aes_mod_init(void)
926{
927 pr_info("loading %s driver\n", "omap-aes");
928
929 if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) {
930 pr_err("Unsupported cpu\n");
931 return -ENODEV;
932 }
933
934 return platform_driver_register(&omap_aes_driver);
935}
936
937static void __exit omap_aes_mod_exit(void)
938{
939 platform_driver_unregister(&omap_aes_driver);
940}
941
942module_init(omap_aes_mod_init);
943module_exit(omap_aes_mod_exit);
944
945MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
946MODULE_LICENSE("GPL v2");
947MODULE_AUTHOR("Dmitry Kasatkin");
948
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7d1485676886..a081c7c7d03f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -311,7 +311,8 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
311 len32 = DIV_ROUND_UP(length, sizeof(u32)); 311 len32 = DIV_ROUND_UP(length, sizeof(u32));
312 312
313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
314 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); 314 1, OMAP_DMA_SYNC_PACKET, dd->dma,
315 OMAP_DMA_DST_SYNC_PREFETCH);
315 316
316 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 317 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
317 dma_addr, 0, 0); 318 dma_addr, 0, 0);
@@ -1072,6 +1073,9 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd)
1072 omap_set_dma_dest_burst_mode(dd->dma_lch, 1073 omap_set_dma_dest_burst_mode(dd->dma_lch,
1073 OMAP_DMA_DATA_BURST_16); 1074 OMAP_DMA_DATA_BURST_16);
1074 1075
1076 omap_set_dma_src_burst_mode(dd->dma_lch,
1077 OMAP_DMA_DATA_BURST_4);
1078
1075 return 0; 1079 return 0;
1076} 1080}
1077 1081
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 4bcd825b5739..b879c3f5d7c0 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -161,7 +161,7 @@ struct talitos_private {
161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
162{ 162{
163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
164 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); 164 talitos_ptr->eptr = upper_32_bits(dma_addr);
165} 165}
166 166
167/* 167/*
@@ -332,10 +332,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
332 332
333 /* GO! */ 333 /* GO! */
334 wmb(); 334 wmb();
335 out_be32(priv->reg + TALITOS_FF(ch), 335 out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
336 cpu_to_be32(upper_32_bits(request->dma_desc)));
337 out_be32(priv->reg + TALITOS_FF_LO(ch), 336 out_be32(priv->reg + TALITOS_FF_LO(ch),
338 cpu_to_be32(lower_32_bits(request->dma_desc))); 337 lower_32_bits(request->dma_desc));
339 338
340 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 339 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
341 340
@@ -1751,14 +1750,14 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
1751 ahash_init(areq); 1750 ahash_init(areq);
1752 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1751 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1753 1752
1754 req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); 1753 req_ctx->hw_context[0] = SHA224_H0;
1755 req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); 1754 req_ctx->hw_context[1] = SHA224_H1;
1756 req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); 1755 req_ctx->hw_context[2] = SHA224_H2;
1757 req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); 1756 req_ctx->hw_context[3] = SHA224_H3;
1758 req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); 1757 req_ctx->hw_context[4] = SHA224_H4;
1759 req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); 1758 req_ctx->hw_context[5] = SHA224_H5;
1760 req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); 1759 req_ctx->hw_context[6] = SHA224_H6;
1761 req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); 1760 req_ctx->hw_context[7] = SHA224_H7;
1762 1761
1763 /* init 64-bit count */ 1762 /* init 64-bit count */
1764 req_ctx->hw_context[8] = 0; 1763 req_ctx->hw_context[8] = 0;
@@ -2333,8 +2332,7 @@ static int talitos_remove(struct platform_device *ofdev)
2333 talitos_unregister_rng(dev); 2332 talitos_unregister_rng(dev);
2334 2333
2335 for (i = 0; i < priv->num_channels; i++) 2334 for (i = 0; i < priv->num_channels; i++)
2336 if (priv->chan[i].fifo) 2335 kfree(priv->chan[i].fifo);
2337 kfree(priv->chan[i].fifo);
2338 2336
2339 kfree(priv->chan); 2337 kfree(priv->chan);
2340 2338
@@ -2389,6 +2387,9 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2389 DESC_HDR_MODE0_MDEU_SHA256; 2387 DESC_HDR_MODE0_MDEU_SHA256;
2390 } 2388 }
2391 break; 2389 break;
2390 default:
2391 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2392 return ERR_PTR(-EINVAL);
2392 } 2393 }
2393 2394
2394 alg->cra_module = THIS_MODULE; 2395 alg->cra_module = THIS_MODULE;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 9dbb28b9559f..f436a2fa9f38 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -209,6 +209,13 @@ config EDAC_I5100
209 Support for error detection and correction the Intel 209 Support for error detection and correction the Intel
210 San Clemente MCH. 210 San Clemente MCH.
211 211
212config EDAC_I7300
213 tristate "Intel Clarksboro MCH"
214 depends on EDAC_MM_EDAC && X86 && PCI
215 help
216 Support for error detection and correction the Intel
217 Clarksboro MCH (Intel 7300 chipset).
218
212config EDAC_MPC85XX 219config EDAC_MPC85XX
213 tristate "Freescale MPC83xx / MPC85xx" 220 tristate "Freescale MPC83xx / MPC85xx"
214 depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx) 221 depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 32c7bc93c525..b3781399b38a 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
27obj-$(CONFIG_EDAC_I5000) += i5000_edac.o 27obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
28obj-$(CONFIG_EDAC_I5100) += i5100_edac.o 28obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
29obj-$(CONFIG_EDAC_I5400) += i5400_edac.o 29obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
30obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
30obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 31obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
31obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 32obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
32obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 33obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 2941dca91aae..400de071cabc 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * file for managing the edac_device class of devices for EDAC 2 * file for managing the edac_device class of devices for EDAC
3 * 3 *
4 * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com) 4 * (C) 2007 SoftwareBitMaker
5 * 5 *
6 * This file may be distributed under the terms of the 6 * This file may be distributed under the terms of the
7 * GNU General Public License. 7 * GNU General Public License.
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
new file mode 100644
index 000000000000..05523b504271
--- /dev/null
+++ b/drivers/edac/i7300_edac.c
@@ -0,0 +1,1247 @@
1/*
2 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License version 2 only.
6 *
7 * Copyright (c) 2010 by:
8 * Mauro Carvalho Chehab <mchehab@redhat.com>
9 *
10 * Red Hat Inc. http://www.redhat.com
11 *
12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
13 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
14 *
15 * TODO: The chipset allow checking for PCI Express errors also. Currently,
16 * the driver covers only memory error errors
17 *
18 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
19 */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/pci_ids.h>
25#include <linux/slab.h>
26#include <linux/edac.h>
27#include <linux/mmzone.h>
28
29#include "edac_core.h"
30
31/*
32 * Alter this version for the I7300 module when modifications are made
33 */
34#define I7300_REVISION " Ver: 1.0.0 " __DATE__
35
36#define EDAC_MOD_STR "i7300_edac"
37
38#define i7300_printk(level, fmt, arg...) \
39 edac_printk(level, "i7300", fmt, ##arg)
40
41#define i7300_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
43
44/***********************************************
45 * i7300 Limit constants Structs and static vars
46 ***********************************************/
47
48/*
49 * Memory topology is organized as:
50 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
51 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
52 * Each channel can have to 8 DIMM sets (called as SLOTS)
53 * Slots should generally be filled in pairs
54 * Except on Single Channel mode of operation
55 * just slot 0/channel0 filled on this mode
56 * On normal operation mode, the two channels on a branch should be
57 * filled together for the same SLOT#
58 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
59 * channels on both branches should be filled
60 */
61
62/* Limits for i7300 */
63#define MAX_SLOTS 8
64#define MAX_BRANCHES 2
65#define MAX_CH_PER_BRANCH 2
66#define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
67#define MAX_MIR 3
68
69#define to_channel(ch, branch) ((((branch)) << 1) | (ch))
70
71#define to_csrow(slot, ch, branch) \
72 (to_channel(ch, branch) | ((slot) << 2))
73
74/* Device name and register DID (Device ID) */
75struct i7300_dev_info {
76 const char *ctl_name; /* name for this device */
77 u16 fsb_mapping_errors; /* DID for the branchmap,control */
78};
79
80/* Table of devices attributes supported by this driver */
81static const struct i7300_dev_info i7300_devs[] = {
82 {
83 .ctl_name = "I7300",
84 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
85 },
86};
87
88struct i7300_dimm_info {
89 int megabytes; /* size, 0 means not present */
90};
91
92/* driver private data structure */
93struct i7300_pvt {
94 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
95 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
96 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
97 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
98
99 u16 tolm; /* top of low memory */
100 u64 ambase; /* AMB BAR */
101
102 u32 mc_settings; /* Report several settings */
103 u32 mc_settings_a;
104
105 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
106
107 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
108 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
109
110 /* DIMM information matrix, allocating architecture maximums */
111 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
112
113 /* Temporary buffer for use when preparing error messages */
114 char *tmp_prt_buffer;
115};
116
117/* FIXME: Why do we need to have this static? */
118static struct edac_pci_ctl_info *i7300_pci;
119
120/***************************************************
121 * i7300 Register definitions for memory enumeration
122 ***************************************************/
123
124/*
125 * Device 16,
126 * Function 0: System Address (not documented)
127 * Function 1: Memory Branch Map, Control, Errors Register
128 */
129
130 /* OFFSETS for Function 0 */
131#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
132#define MAXCH 0x56 /* Max Channel Number */
133#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
134
135 /* OFFSETS for Function 1 */
136#define MC_SETTINGS 0x40
137 #define IS_MIRRORED(mc) ((mc) & (1 << 16))
138 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
139 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
140 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
141
142#define MC_SETTINGS_A 0x58
143 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
144
145#define TOLM 0x6C
146
147#define MIR0 0x80
148#define MIR1 0x84
149#define MIR2 0x88
150
151/*
152 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
153 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
154 * seems that we cannot use this information directly for the same usage.
155 * Each memory slot may have up to 2 AMB interfaces, one for income and another
156 * for outcome interface to the next slot.
157 * For now, the driver just stores the AMB present registers, but rely only at
158 * the MTR info to detect memory.
159 * Datasheet is also not clear about how to map each AMBPRESENT registers to
160 * one of the 4 available channels.
161 */
162#define AMBPRESENT_0 0x64
163#define AMBPRESENT_1 0x66
164
165const static u16 mtr_regs[MAX_SLOTS] = {
166 0x80, 0x84, 0x88, 0x8c,
167 0x82, 0x86, 0x8a, 0x8e
168};
169
170/*
171 * Defines to extract the vaious fields from the
172 * MTRx - Memory Technology Registers
173 */
174#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
175#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
176#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
177#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
178#define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
179#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
180#define MTR_DRAM_BANKS_ADDR_BITS 2
181#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
182#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
183#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
184
185#ifdef CONFIG_EDAC_DEBUG
186/* MTR NUMROW */
187static const char *numrow_toString[] = {
188 "8,192 - 13 rows",
189 "16,384 - 14 rows",
190 "32,768 - 15 rows",
191 "65,536 - 16 rows"
192};
193
194/* MTR NUMCOL */
195static const char *numcol_toString[] = {
196 "1,024 - 10 columns",
197 "2,048 - 11 columns",
198 "4,096 - 12 columns",
199 "reserved"
200};
201#endif
202
203/************************************************
204 * i7300 Register definitions for error detection
205 ************************************************/
206
207/*
208 * Device 16.1: FBD Error Registers
209 */
210#define FERR_FAT_FBD 0x98
211static const char *ferr_fat_fbd_name[] = {
212 [22] = "Non-Redundant Fast Reset Timeout",
213 [2] = ">Tmid Thermal event with intelligent throttling disabled",
214 [1] = "Memory or FBD configuration CRC read error",
215 [0] = "Memory Write error on non-redundant retry or "
216 "FBD configuration Write error on retry",
217};
218#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
219#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
220
221#define FERR_NF_FBD 0xa0
222static const char *ferr_nf_fbd_name[] = {
223 [24] = "DIMM-Spare Copy Completed",
224 [23] = "DIMM-Spare Copy Initiated",
225 [22] = "Redundant Fast Reset Timeout",
226 [21] = "Memory Write error on redundant retry",
227 [18] = "SPD protocol Error",
228 [17] = "FBD Northbound parity error on FBD Sync Status",
229 [16] = "Correctable Patrol Data ECC",
230 [15] = "Correctable Resilver- or Spare-Copy Data ECC",
231 [14] = "Correctable Mirrored Demand Data ECC",
232 [13] = "Correctable Non-Mirrored Demand Data ECC",
233 [11] = "Memory or FBD configuration CRC read error",
234 [10] = "FBD Configuration Write error on first attempt",
235 [9] = "Memory Write error on first attempt",
236 [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
237 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
238 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
239 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
240 [4] = "Aliased Uncorrectable Patrol Data ECC",
241 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
242 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
243 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
244 [0] = "Uncorrectable Data ECC on Replay",
245};
246#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
247#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
248 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
249 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
250 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
251 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
252 (1 << 1) | (1 << 0))
253
254#define EMASK_FBD 0xa8
255#define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
256 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
257 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
258 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
259 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
260 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
261 (1 << 1) | (1 << 0))
262
263/*
264 * Device 16.2: Global Error Registers
265 */
266
267#define FERR_GLOBAL_HI 0x48
268static const char *ferr_global_hi_name[] = {
269 [3] = "FSB 3 Fatal Error",
270 [2] = "FSB 2 Fatal Error",
271 [1] = "FSB 1 Fatal Error",
272 [0] = "FSB 0 Fatal Error",
273};
274#define ferr_global_hi_is_fatal(errno) 1
275
276#define FERR_GLOBAL_LO 0x40
277static const char *ferr_global_lo_name[] = {
278 [31] = "Internal MCH Fatal Error",
279 [30] = "Intel QuickData Technology Device Fatal Error",
280 [29] = "FSB1 Fatal Error",
281 [28] = "FSB0 Fatal Error",
282 [27] = "FBD Channel 3 Fatal Error",
283 [26] = "FBD Channel 2 Fatal Error",
284 [25] = "FBD Channel 1 Fatal Error",
285 [24] = "FBD Channel 0 Fatal Error",
286 [23] = "PCI Express Device 7Fatal Error",
287 [22] = "PCI Express Device 6 Fatal Error",
288 [21] = "PCI Express Device 5 Fatal Error",
289 [20] = "PCI Express Device 4 Fatal Error",
290 [19] = "PCI Express Device 3 Fatal Error",
291 [18] = "PCI Express Device 2 Fatal Error",
292 [17] = "PCI Express Device 1 Fatal Error",
293 [16] = "ESI Fatal Error",
294 [15] = "Internal MCH Non-Fatal Error",
295 [14] = "Intel QuickData Technology Device Non Fatal Error",
296 [13] = "FSB1 Non-Fatal Error",
297 [12] = "FSB 0 Non-Fatal Error",
298 [11] = "FBD Channel 3 Non-Fatal Error",
299 [10] = "FBD Channel 2 Non-Fatal Error",
300 [9] = "FBD Channel 1 Non-Fatal Error",
301 [8] = "FBD Channel 0 Non-Fatal Error",
302 [7] = "PCI Express Device 7 Non-Fatal Error",
303 [6] = "PCI Express Device 6 Non-Fatal Error",
304 [5] = "PCI Express Device 5 Non-Fatal Error",
305 [4] = "PCI Express Device 4 Non-Fatal Error",
306 [3] = "PCI Express Device 3 Non-Fatal Error",
307 [2] = "PCI Express Device 2 Non-Fatal Error",
308 [1] = "PCI Express Device 1 Non-Fatal Error",
309 [0] = "ESI Non-Fatal Error",
310};
311#define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
312
313#define NRECMEMA 0xbe
314 #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
315 #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
316
317#define NRECMEMB 0xc0
318 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
319 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
320 #define NRECMEMB_RAS(v) ((v) & 0xffff)
321
322#define REDMEMA 0xdc
323
324#define REDMEMB 0x7c
325 #define IS_SECOND_CH(v) ((v) * (1 << 17))
326
327#define RECMEMA 0xe0
328 #define RECMEMA_BANK(v) (((v) >> 12) & 7)
329 #define RECMEMA_RANK(v) (((v) >> 8) & 15)
330
331#define RECMEMB 0xe4
332 #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
333 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
334 #define RECMEMB_RAS(v) ((v) & 0xffff)
335
336/********************************************
337 * i7300 Functions related to error detection
338 ********************************************/
339
340/**
341 * get_err_from_table() - Gets the error message from a table
342 * @table: table name (array of char *)
343 * @size: number of elements at the table
344 * @pos: position of the element to be returned
345 *
346 * This is a small routine that gets the pos-th element of a table. If the
347 * element doesn't exist (or it is empty), it returns "reserved".
348 * Instead of calling it directly, the better is to call via the macro
349 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
350 * ARRAY_SIZE() macro
351 */
352static const char *get_err_from_table(const char *table[], int size, int pos)
353{
354 if (unlikely(pos >= size))
355 return "Reserved";
356
357 if (unlikely(!table[pos]))
358 return "Reserved";
359
360 return table[pos];
361}
362
363#define GET_ERR_FROM_TABLE(table, pos) \
364 get_err_from_table(table, ARRAY_SIZE(table), pos)
365
366/**
367 * i7300_process_error_global() - Retrieve the hardware error information from
368 * the hardware global error registers and
369 * sends it to dmesg
370 * @mci: struct mem_ctl_info pointer
371 */
372static void i7300_process_error_global(struct mem_ctl_info *mci)
373{
374 struct i7300_pvt *pvt;
375 u32 errnum, value;
376 unsigned long errors;
377 const char *specific;
378 bool is_fatal;
379
380 pvt = mci->pvt_info;
381
382 /* read in the 1st FATAL error register */
383 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
384 FERR_GLOBAL_HI, &value);
385 if (unlikely(value)) {
386 errors = value;
387 errnum = find_first_bit(&errors,
388 ARRAY_SIZE(ferr_global_hi_name));
389 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
390 is_fatal = ferr_global_hi_is_fatal(errnum);
391
392 /* Clear the error bit */
393 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
394 FERR_GLOBAL_HI, value);
395
396 goto error_global;
397 }
398
399 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
400 FERR_GLOBAL_LO, &value);
401 if (unlikely(value)) {
402 errors = value;
403 errnum = find_first_bit(&errors,
404 ARRAY_SIZE(ferr_global_lo_name));
405 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
406 is_fatal = ferr_global_lo_is_fatal(errnum);
407
408 /* Clear the error bit */
409 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
410 FERR_GLOBAL_LO, value);
411
412 goto error_global;
413 }
414 return;
415
416error_global:
417 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
418 is_fatal ? "Fatal" : "NOT fatal", specific);
419}
420
421/**
422 * i7300_process_fbd_error() - Retrieve the hardware error information from
423 * the FBD error registers and sends it via
424 * EDAC error API calls
425 * @mci: struct mem_ctl_info pointer
426 */
427static void i7300_process_fbd_error(struct mem_ctl_info *mci)
428{
429 struct i7300_pvt *pvt;
430 u32 errnum, value;
431 u16 val16;
432 unsigned branch, channel, bank, rank, cas, ras;
433 u32 syndrome;
434
435 unsigned long errors;
436 const char *specific;
437 bool is_wr;
438
439 pvt = mci->pvt_info;
440
441 /* read in the 1st FATAL error register */
442 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
443 FERR_FAT_FBD, &value);
444 if (unlikely(value & FERR_FAT_FBD_ERR_MASK)) {
445 errors = value & FERR_FAT_FBD_ERR_MASK ;
446 errnum = find_first_bit(&errors,
447 ARRAY_SIZE(ferr_fat_fbd_name));
448 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
449
450 branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0;
451 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
452 NRECMEMA, &val16);
453 bank = NRECMEMA_BANK(val16);
454 rank = NRECMEMA_RANK(val16);
455
456 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
457 NRECMEMB, &value);
458
459 is_wr = NRECMEMB_IS_WR(value);
460 cas = NRECMEMB_CAS(value);
461 ras = NRECMEMB_RAS(value);
462
463 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
464 "FATAL (Branch=%d DRAM-Bank=%d %s "
465 "RAS=%d CAS=%d Err=0x%lx (%s))",
466 branch, bank,
467 is_wr ? "RDWR" : "RD",
468 ras, cas,
469 errors, specific);
470
471 /* Call the helper to output message */
472 edac_mc_handle_fbd_ue(mci, rank, branch << 1,
473 (branch << 1) + 1,
474 pvt->tmp_prt_buffer);
475 }
476
477 /* read in the 1st NON-FATAL error register */
478 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
479 FERR_NF_FBD, &value);
480 if (unlikely(value & FERR_NF_FBD_ERR_MASK)) {
481 errors = value & FERR_NF_FBD_ERR_MASK;
482 errnum = find_first_bit(&errors,
483 ARRAY_SIZE(ferr_nf_fbd_name));
484 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
485
486 /* Clear the error bit */
487 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
488 FERR_GLOBAL_LO, value);
489
490 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
491 REDMEMA, &syndrome);
492
493 branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0;
494 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
495 RECMEMA, &val16);
496 bank = RECMEMA_BANK(val16);
497 rank = RECMEMA_RANK(val16);
498
499 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
500 RECMEMB, &value);
501
502 is_wr = RECMEMB_IS_WR(value);
503 cas = RECMEMB_CAS(value);
504 ras = RECMEMB_RAS(value);
505
506 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
507 REDMEMB, &value);
508
509 channel = (branch << 1);
510 if (IS_SECOND_CH(value))
511 channel++;
512
513 /* Form out message */
514 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
515 "Corrected error (Branch=%d, Channel %d), "
516 " DRAM-Bank=%d %s "
517 "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
518 branch, channel,
519 bank,
520 is_wr ? "RDWR" : "RD",
521 ras, cas,
522 errors, syndrome, specific);
523
524 /*
525 * Call the helper to output message
526 * NOTE: Errors are reported per-branch, and not per-channel
527 * Currently, we don't know how to identify the right
528 * channel.
529 */
530 edac_mc_handle_fbd_ce(mci, rank, channel,
531 pvt->tmp_prt_buffer);
532 }
533 return;
534}
535
536/**
537 * i7300_check_error() - Calls the error checking subroutines
538 * @mci: struct mem_ctl_info pointer
539 */
540static void i7300_check_error(struct mem_ctl_info *mci)
541{
542 i7300_process_error_global(mci);
543 i7300_process_fbd_error(mci);
544};
545
546/**
547 * i7300_clear_error() - Clears the error registers
548 * @mci: struct mem_ctl_info pointer
549 */
550static void i7300_clear_error(struct mem_ctl_info *mci)
551{
552 struct i7300_pvt *pvt = mci->pvt_info;
553 u32 value;
554 /*
555 * All error values are RWC - we need to read and write 1 to the
556 * bit that we want to cleanup
557 */
558
559 /* Clear global error registers */
560 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
561 FERR_GLOBAL_HI, &value);
562 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
563 FERR_GLOBAL_HI, value);
564
565 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
566 FERR_GLOBAL_LO, &value);
567 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
568 FERR_GLOBAL_LO, value);
569
570 /* Clear FBD error registers */
571 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
572 FERR_FAT_FBD, &value);
573 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
574 FERR_FAT_FBD, value);
575
576 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
577 FERR_NF_FBD, &value);
578 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
579 FERR_NF_FBD, value);
580}
581
582/**
583 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
584 * hardware
585 * @mci: struct mem_ctl_info pointer
586 */
587static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
588{
589 struct i7300_pvt *pvt = mci->pvt_info;
590 u32 fbd_error_mask;
591
592 /* Read the FBD Error Mask Register */
593 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
594 EMASK_FBD, &fbd_error_mask);
595
596 /* Enable with a '0' */
597 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
598
599 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
600 EMASK_FBD, fbd_error_mask);
601}
602
603/************************************************
604 * i7300 Functions related to memory enumberation
605 ************************************************/
606
607/**
608 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
609 * @pvt: pointer to the private data struct used by i7300 driver
610 * @slot: DIMM slot (0 to 7)
611 * @ch: Channel number within the branch (0 or 1)
612 * @branch: Branch number (0 or 1)
613 * @dinfo: Pointer to DIMM info where dimm size is stored
614 * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
615 */
616static int decode_mtr(struct i7300_pvt *pvt,
617 int slot, int ch, int branch,
618 struct i7300_dimm_info *dinfo,
619 struct csrow_info *p_csrow,
620 u32 *nr_pages)
621{
622 int mtr, ans, addrBits, channel;
623
624 channel = to_channel(ch, branch);
625
626 mtr = pvt->mtr[slot][branch];
627 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
628
629 debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n",
630 slot, channel,
631 ans ? "Present" : "NOT Present");
632
633 /* Determine if there is a DIMM present in this DIMM slot */
634 if (!ans)
635 return 0;
636
637 /* Start with the number of bits for a Bank
638 * on the DRAM */
639 addrBits = MTR_DRAM_BANKS_ADDR_BITS;
640 /* Add thenumber of ROW bits */
641 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
642 /* add the number of COLUMN bits */
643 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
644 /* add the number of RANK bits */
645 addrBits += MTR_DIMM_RANKS(mtr);
646
647 addrBits += 6; /* add 64 bits per DIMM */
648 addrBits -= 20; /* divide by 2^^20 */
649 addrBits -= 3; /* 8 bits per bytes */
650
651 dinfo->megabytes = 1 << addrBits;
652 *nr_pages = dinfo->megabytes << 8;
653
654 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
655
656 debugf2("\t\tELECTRICAL THROTTLING is %s\n",
657 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
658
659 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
660 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single");
661 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
662 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
663 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
664
665 p_csrow->grain = 8;
666 p_csrow->mtype = MEM_FB_DDR2;
667 p_csrow->csrow_idx = slot;
668 p_csrow->page_mask = 0;
669
670 /*
671 * The type of error detection actually depends of the
672 * mode of operation. When it is just one single memory chip, at
673 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
674 * In normal or mirrored mode, it uses Lockstep mode,
675 * with the possibility of using an extended algorithm for x8 memories
676 * See datasheet Sections 7.3.6 to 7.3.8
677 */
678
679 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
680 p_csrow->edac_mode = EDAC_SECDED;
681 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
682 } else {
683 debugf2("\t\tECC code is on Lockstep mode\n");
684 if (MTR_DRAM_WIDTH(mtr) == 8)
685 p_csrow->edac_mode = EDAC_S8ECD8ED;
686 else
687 p_csrow->edac_mode = EDAC_S4ECD4ED;
688 }
689
690 /* ask what device type on this row */
691 if (MTR_DRAM_WIDTH(mtr) == 8) {
692 debugf2("\t\tScrub algorithm for x8 is on %s mode\n",
693 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
694 "enhanced" : "normal");
695
696 p_csrow->dtype = DEV_X8;
697 } else
698 p_csrow->dtype = DEV_X4;
699
700 return mtr;
701}
702
703/**
704 * print_dimm_size() - Prints dump of the memory organization
705 * @pvt: pointer to the private data struct used by i7300 driver
706 *
707 * Useful for debug. If debug is disabled, this routine do nothing
708 */
709static void print_dimm_size(struct i7300_pvt *pvt)
710{
711#ifdef CONFIG_EDAC_DEBUG
712 struct i7300_dimm_info *dinfo;
713 char *p;
714 int space, n;
715 int channel, slot;
716
717 space = PAGE_SIZE;
718 p = pvt->tmp_prt_buffer;
719
720 n = snprintf(p, space, " ");
721 p += n;
722 space -= n;
723 for (channel = 0; channel < MAX_CHANNELS; channel++) {
724 n = snprintf(p, space, "channel %d | ", channel);
725 p += n;
726 space -= n;
727 }
728 debugf2("%s\n", pvt->tmp_prt_buffer);
729 p = pvt->tmp_prt_buffer;
730 space = PAGE_SIZE;
731 n = snprintf(p, space, "-------------------------------"
732 "------------------------------");
733 p += n;
734 space -= n;
735 debugf2("%s\n", pvt->tmp_prt_buffer);
736 p = pvt->tmp_prt_buffer;
737 space = PAGE_SIZE;
738
739 for (slot = 0; slot < MAX_SLOTS; slot++) {
740 n = snprintf(p, space, "csrow/SLOT %d ", slot);
741 p += n;
742 space -= n;
743
744 for (channel = 0; channel < MAX_CHANNELS; channel++) {
745 dinfo = &pvt->dimm_info[slot][channel];
746 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
747 p += n;
748 space -= n;
749 }
750
751 debugf2("%s\n", pvt->tmp_prt_buffer);
752 p = pvt->tmp_prt_buffer;
753 space = PAGE_SIZE;
754 }
755
756 n = snprintf(p, space, "-------------------------------"
757 "------------------------------");
758 p += n;
759 space -= n;
760 debugf2("%s\n", pvt->tmp_prt_buffer);
761 p = pvt->tmp_prt_buffer;
762 space = PAGE_SIZE;
763#endif
764}
765
766/**
767 * i7300_init_csrows() - Initialize the 'csrows' table within
768 * the mci control structure with the
769 * addressing of memory.
770 * @mci: struct mem_ctl_info pointer
771 */
772static int i7300_init_csrows(struct mem_ctl_info *mci)
773{
774 struct i7300_pvt *pvt;
775 struct i7300_dimm_info *dinfo;
776 struct csrow_info *p_csrow;
777 int rc = -ENODEV;
778 int mtr;
779 int ch, branch, slot, channel;
780 u32 last_page = 0, nr_pages;
781
782 pvt = mci->pvt_info;
783
784 debugf2("Memory Technology Registers:\n");
785
786 /* Get the AMB present registers for the four channels */
787 for (branch = 0; branch < MAX_BRANCHES; branch++) {
788 /* Read and dump branch 0's MTRs */
789 channel = to_channel(0, branch);
790 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
791 AMBPRESENT_0,
792 &pvt->ambpresent[channel]);
793 debugf2("\t\tAMB-present CH%d = 0x%x:\n",
794 channel, pvt->ambpresent[channel]);
795
796 channel = to_channel(1, branch);
797 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
798 AMBPRESENT_1,
799 &pvt->ambpresent[channel]);
800 debugf2("\t\tAMB-present CH%d = 0x%x:\n",
801 channel, pvt->ambpresent[channel]);
802 }
803
804 /* Get the set of MTR[0-7] regs by each branch */
805 for (slot = 0; slot < MAX_SLOTS; slot++) {
806 int where = mtr_regs[slot];
807 for (branch = 0; branch < MAX_BRANCHES; branch++) {
808 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
809 where,
810 &pvt->mtr[slot][branch]);
811 for (ch = 0; ch < MAX_BRANCHES; ch++) {
812 int channel = to_channel(ch, branch);
813
814 dinfo = &pvt->dimm_info[slot][channel];
815 p_csrow = &mci->csrows[slot];
816
817 mtr = decode_mtr(pvt, slot, ch, branch,
818 dinfo, p_csrow, &nr_pages);
819 /* if no DIMMS on this row, continue */
820 if (!MTR_DIMMS_PRESENT(mtr))
821 continue;
822
823 /* Update per_csrow memory count */
824 p_csrow->nr_pages += nr_pages;
825 p_csrow->first_page = last_page;
826 last_page += nr_pages;
827 p_csrow->last_page = last_page;
828
829 rc = 0;
830 }
831 }
832 }
833
834 return rc;
835}
836
837/**
838 * decode_mir() - Decodes Memory Interleave Register (MIR) info
839 * @int mir_no: number of the MIR register to decode
840 * @mir: array with the MIR data cached on the driver
841 */
842static void decode_mir(int mir_no, u16 mir[MAX_MIR])
843{
844 if (mir[mir_no] & 3)
845 debugf2("MIR%d: limit= 0x%x Branch(es) that participate:"
846 " %s %s\n",
847 mir_no,
848 (mir[mir_no] >> 4) & 0xfff,
849 (mir[mir_no] & 1) ? "B0" : "",
850 (mir[mir_no] & 2) ? "B1" : "");
851}
852
853/**
854 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
855 * @mci: struct mem_ctl_info pointer
856 *
857 * Data read is cached internally for its usage when needed
858 */
859static int i7300_get_mc_regs(struct mem_ctl_info *mci)
860{
861 struct i7300_pvt *pvt;
862 u32 actual_tolm;
863 int i, rc;
864
865 pvt = mci->pvt_info;
866
867 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
868 (u32 *) &pvt->ambase);
869
870 debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
871
872 /* Get the Branch Map regs */
873 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
874 pvt->tolm >>= 12;
875 debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
876 pvt->tolm);
877
878 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
879 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
880 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
881
882 /* Get memory controller settings */
883 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
884 &pvt->mc_settings);
885 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
886 &pvt->mc_settings_a);
887
888 if (IS_SINGLE_MODE(pvt->mc_settings_a))
889 debugf0("Memory controller operating on single mode\n");
890 else
891 debugf0("Memory controller operating on %s mode\n",
892 IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored");
893
894 debugf0("Error detection is %s\n",
895 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
896 debugf0("Retry is %s\n",
897 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
898
899 /* Get Memory Interleave Range registers */
900 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
901 &pvt->mir[0]);
902 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
903 &pvt->mir[1]);
904 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
905 &pvt->mir[2]);
906
907 /* Decode the MIR regs */
908 for (i = 0; i < MAX_MIR; i++)
909 decode_mir(i, pvt->mir);
910
911 rc = i7300_init_csrows(mci);
912 if (rc < 0)
913 return rc;
914
915 /* Go and determine the size of each DIMM and place in an
916 * orderly matrix */
917 print_dimm_size(pvt);
918
919 return 0;
920}
921
922/*************************************************
923 * i7300 Functions related to device probe/release
924 *************************************************/
925
926/**
927 * i7300_put_devices() - Release the PCI devices
928 * @mci: struct mem_ctl_info pointer
929 */
930static void i7300_put_devices(struct mem_ctl_info *mci)
931{
932 struct i7300_pvt *pvt;
933 int branch;
934
935 pvt = mci->pvt_info;
936
937 /* Decrement usage count for devices */
938 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
939 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
940 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
941 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
942}
943
944/**
945 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
946 * device/functions we want to reference for this driver
947 * @mci: struct mem_ctl_info pointer
948 *
949 * Access and prepare the several devices for usage:
950 * I7300 devices used by this driver:
951 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
952 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
953 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
954 */
955static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
956{
957 struct i7300_pvt *pvt;
958 struct pci_dev *pdev;
959
960 pvt = mci->pvt_info;
961
962 /* Attempt to 'get' the MCH register we want */
963 pdev = NULL;
964 while (!pvt->pci_dev_16_1_fsb_addr_map ||
965 !pvt->pci_dev_16_2_fsb_err_regs) {
966 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
967 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
968 if (!pdev) {
969 /* End of list, leave */
970 i7300_printk(KERN_ERR,
971 "'system address,Process Bus' "
972 "device not found:"
973 "vendor 0x%x device 0x%x ERR funcs "
974 "(broken BIOS?)\n",
975 PCI_VENDOR_ID_INTEL,
976 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
977 goto error;
978 }
979
980 /* Store device 16 funcs 1 and 2 */
981 switch (PCI_FUNC(pdev->devfn)) {
982 case 1:
983 pvt->pci_dev_16_1_fsb_addr_map = pdev;
984 break;
985 case 2:
986 pvt->pci_dev_16_2_fsb_err_regs = pdev;
987 break;
988 }
989 }
990
991 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
992 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
993 pvt->pci_dev_16_0_fsb_ctlr->vendor,
994 pvt->pci_dev_16_0_fsb_ctlr->device);
995 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
996 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
997 pvt->pci_dev_16_1_fsb_addr_map->vendor,
998 pvt->pci_dev_16_1_fsb_addr_map->device);
999 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
1000 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
1001 pvt->pci_dev_16_2_fsb_err_regs->vendor,
1002 pvt->pci_dev_16_2_fsb_err_regs->device);
1003
1004 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
1005 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
1006 NULL);
1007 if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
1008 i7300_printk(KERN_ERR,
1009 "MC: 'BRANCH 0' device not found:"
1010 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
1011 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
1012 goto error;
1013 }
1014
1015 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
1016 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
1017 NULL);
1018 if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1019 i7300_printk(KERN_ERR,
1020 "MC: 'BRANCH 1' device not found:"
1021 "vendor 0x%x device 0x%x Func 0 "
1022 "(broken BIOS?)\n",
1023 PCI_VENDOR_ID_INTEL,
1024 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1025 goto error;
1026 }
1027
1028 return 0;
1029
1030error:
1031 i7300_put_devices(mci);
1032 return -ENODEV;
1033}
1034
1035/**
1036 * i7300_init_one() - Probe for one instance of the device
1037 * @pdev: struct pci_dev pointer
1038 * @id: struct pci_device_id pointer - currently unused
1039 */
1040static int __devinit i7300_init_one(struct pci_dev *pdev,
1041 const struct pci_device_id *id)
1042{
1043 struct mem_ctl_info *mci;
1044 struct i7300_pvt *pvt;
1045 int num_channels;
1046 int num_dimms_per_channel;
1047 int num_csrows;
1048 int rc;
1049
1050 /* wake up device */
1051 rc = pci_enable_device(pdev);
1052 if (rc == -EIO)
1053 return rc;
1054
1055 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1056 __func__,
1057 pdev->bus->number,
1058 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1059
1060 /* We only are looking for func 0 of the set */
1061 if (PCI_FUNC(pdev->devfn) != 0)
1062 return -ENODEV;
1063
1064 /* As we don't have a motherboard identification routine to determine
1065 * actual number of slots/dimms per channel, we thus utilize the
1066 * resource as specified by the chipset. Thus, we might have
1067 * have more DIMMs per channel than actually on the mobo, but this
1068 * allows the driver to support upto the chipset max, without
1069 * some fancy mobo determination.
1070 */
1071 num_dimms_per_channel = MAX_SLOTS;
1072 num_channels = MAX_CHANNELS;
1073 num_csrows = MAX_SLOTS * MAX_CHANNELS;
1074
1075 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
1076 __func__, num_channels, num_dimms_per_channel, num_csrows);
1077
1078 /* allocate a new MC control structure */
1079 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1080
1081 if (mci == NULL)
1082 return -ENOMEM;
1083
1084 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
1085
1086 mci->dev = &pdev->dev; /* record ptr to the generic device */
1087
1088 pvt = mci->pvt_info;
1089 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
1090
1091 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1092 if (!pvt->tmp_prt_buffer) {
1093 edac_mc_free(mci);
1094 return -ENOMEM;
1095 }
1096
1097 /* 'get' the pci devices we want to reserve for our use */
1098 if (i7300_get_devices(mci))
1099 goto fail0;
1100
1101 mci->mc_idx = 0;
1102 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1103 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1104 mci->edac_cap = EDAC_FLAG_NONE;
1105 mci->mod_name = "i7300_edac.c";
1106 mci->mod_ver = I7300_REVISION;
1107 mci->ctl_name = i7300_devs[0].ctl_name;
1108 mci->dev_name = pci_name(pdev);
1109 mci->ctl_page_to_phys = NULL;
1110
1111 /* Set the function pointer to an actual operation function */
1112 mci->edac_check = i7300_check_error;
1113
1114 /* initialize the MC control structure 'csrows' table
1115 * with the mapping and control information */
1116 if (i7300_get_mc_regs(mci)) {
1117 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
1118 " because i7300_init_csrows() returned nonzero "
1119 "value\n");
1120 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1121 } else {
1122 debugf1("MC: Enable error reporting now\n");
1123 i7300_enable_error_reporting(mci);
1124 }
1125
1126 /* add this new MC control structure to EDAC's list of MCs */
1127 if (edac_mc_add_mc(mci)) {
1128 debugf0("MC: " __FILE__
1129 ": %s(): failed edac_mc_add_mc()\n", __func__);
1130 /* FIXME: perhaps some code should go here that disables error
1131 * reporting if we just enabled it
1132 */
1133 goto fail1;
1134 }
1135
1136 i7300_clear_error(mci);
1137
1138 /* allocating generic PCI control info */
1139 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1140 if (!i7300_pci) {
1141 printk(KERN_WARNING
1142 "%s(): Unable to create PCI control\n",
1143 __func__);
1144 printk(KERN_WARNING
1145 "%s(): PCI error report via EDAC not setup\n",
1146 __func__);
1147 }
1148
1149 return 0;
1150
1151 /* Error exit unwinding stack */
1152fail1:
1153
1154 i7300_put_devices(mci);
1155
1156fail0:
1157 kfree(pvt->tmp_prt_buffer);
1158 edac_mc_free(mci);
1159 return -ENODEV;
1160}
1161
1162/**
1163 * i7300_remove_one() - Remove the driver
1164 * @pdev: struct pci_dev pointer
1165 */
1166static void __devexit i7300_remove_one(struct pci_dev *pdev)
1167{
1168 struct mem_ctl_info *mci;
1169 char *tmp;
1170
1171 debugf0(__FILE__ ": %s()\n", __func__);
1172
1173 if (i7300_pci)
1174 edac_pci_release_generic_ctl(i7300_pci);
1175
1176 mci = edac_mc_del_mc(&pdev->dev);
1177 if (!mci)
1178 return;
1179
1180 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1181
1182 /* retrieve references to resources, and free those resources */
1183 i7300_put_devices(mci);
1184
1185 kfree(tmp);
1186 edac_mc_free(mci);
1187}
1188
1189/*
1190 * pci_device_id: table for which devices we are looking for
1191 *
1192 * Has only 8086:360c PCI ID
1193 */
1194static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
1195 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1196 {0,} /* 0 terminated list. */
1197};
1198
1199MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1200
1201/*
1202 * i7300_driver: pci_driver structure for this module
1203 */
1204static struct pci_driver i7300_driver = {
1205 .name = "i7300_edac",
1206 .probe = i7300_init_one,
1207 .remove = __devexit_p(i7300_remove_one),
1208 .id_table = i7300_pci_tbl,
1209};
1210
1211/**
1212 * i7300_init() - Registers the driver
1213 */
1214static int __init i7300_init(void)
1215{
1216 int pci_rc;
1217
1218 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1219
1220 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1221 opstate_init();
1222
1223 pci_rc = pci_register_driver(&i7300_driver);
1224
1225 return (pci_rc < 0) ? pci_rc : 0;
1226}
1227
1228/**
1229 * i7300_init() - Unregisters the driver
1230 */
1231static void __exit i7300_exit(void)
1232{
1233 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1234 pci_unregister_driver(&i7300_driver);
1235}
1236
1237module_init(i7300_init);
1238module_exit(i7300_exit);
1239
1240MODULE_LICENSE("GPL");
1241MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1242MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1243MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1244 I7300_REVISION);
1245
1246module_param(edac_op_state, int, 0444);
1247MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index a2fa1feed724..678405ab04e4 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. 12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
13 * 13 *
14 * Written with reference to 82443BX Host Bridge Datasheet: 14 * Written with reference to 82443BX Host Bridge Datasheet:
15 * http://www.intel.com/design/chipsets/440/documentation.htm 15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf
16 * references to this document given in []. 16 * references to this document given in [].
17 * 17 *
18 * This module doesn't support the 440LX, but it may be possible to 18 * This module doesn't support the 440LX, but it may be possible to
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 88a3ae6cd023..e8b6a13515bd 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -74,7 +74,8 @@ config EFI_PCDP
74 74
75 You must also enable the appropriate drivers (serial, VGA, etc.) 75 You must also enable the appropriate drivers (serial, VGA, etc.)
76 76
77 See <http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf> 77 See DIG64_HCDPv20_042804.pdf available from
78 <http://www.dig64.org/specifications/>
78 79
79config DELL_RBU 80config DELL_RBU
80 tristate "BIOS update support for DELL systems via sysfs" 81 tristate "BIOS update support for DELL systems via sysfs"
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index f287fe79edc4..96c25d93eed1 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -15,7 +15,7 @@
15 * made in setup.S, copied to safe structures in setup.c, 15 * made in setup.S, copied to safe structures in setup.c,
16 * and presents it in sysfs. 16 * and presents it in sysfs.
17 * 17 *
18 * Please see http://linux.dell.com/edd30/results.html for 18 * Please see http://linux.dell.com/edd/results.html for
19 * the list of BIOSs which have been reported to implement EDD. 19 * the list of BIOSs which have been reported to implement EDD.
20 * 20 *
21 * This program is free software; you can redistribute it and/or modify 21 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
index ce910d68bd19..e5530608e00d 100644
--- a/drivers/firmware/pcdp.h
+++ b/drivers/firmware/pcdp.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Definitions for PCDP-defined console devices 2 * Definitions for PCDP-defined console devices
3 * 3 *
4 * v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf 4 * For DIG64_HCDPv10a_01.pdf and DIG64_PCDPv20.pdf (v1.0a and v2.0 resp.),
5 * v2.0: http://www.dig64.org/specifications/DIG64_PCDPv20.pdf 5 * please see <http://www.dig64.org/specifications/>
6 * 6 *
7 * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P. 7 * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
8 * Khalid Aziz <khalid.aziz@hp.com> 8 * Khalid Aziz <khalid.aziz@hp.com>
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 949326d2a8e5..58e65f92c232 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -76,7 +76,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
76 * according to the hdisplay, vdisplay, vrefresh. 76 * according to the hdisplay, vdisplay, vrefresh.
77 * It is based from the VESA(TM) Coordinated Video Timing Generator by 77 * It is based from the VESA(TM) Coordinated Video Timing Generator by
78 * Graham Loveridge April 9, 2003 available at 78 * Graham Loveridge April 9, 2003 available at
79 * http://www.vesa.org/public/CVT/CVTd6r1.xls 79 * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
80 * 80 *
81 * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. 81 * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
82 * What I have done is to translate it by using integer calculation. 82 * What I have done is to translate it by using integer calculation.
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index f71cb32f7571..cfe7c8426d1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -24,7 +24,6 @@
24#define __NOUVEAU_I2C_H__ 24#define __NOUVEAU_I2C_H__
25 25
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c-id.h>
28#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
29#include "drm_dp_helper.h" 28#include "drm_dp_helper.h"
30 29
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fcc79b5d22d1..6d64a2705f12 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
268 } 268 }
269 r = radeon_ib_schedule(rdev, parser.ib); 269 r = radeon_ib_schedule(rdev, parser.ib);
270 if (r) { 270 if (r) {
271 DRM_ERROR("Faild to schedule IB !\n"); 271 DRM_ERROR("Failed to schedule IB !\n");
272 } 272 }
273 radeon_cs_parser_fini(&parser, r); 273 radeon_cs_parser_fini(&parser, r);
274 mutex_unlock(&rdev->cs_mutex); 274 mutex_unlock(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 17a6602b5885..454c1dc7ea45 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -36,7 +36,6 @@
36#include <drm_dp_helper.h> 36#include <drm_dp_helper.h>
37#include <drm_fixed.h> 37#include <drm_fixed.h>
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/i2c-id.h>
40#include <linux/i2c-algo-bit.h> 39#include <linux/i2c-algo-bit.h>
41 40
42struct radeon_bo; 41struct radeon_bo;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 84c53e41a88f..a823d8fe54c2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -631,7 +631,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
631 return drm_mmap(filp, vma); 631 return drm_mmap(filp, vma);
632 } 632 }
633 633
634 file_priv = (struct drm_file *)filp->private_data; 634 file_priv = filp->private_data;
635 rdev = file_priv->minor->dev->dev_private; 635 rdev = file_priv->minor->dev->dev_private;
636 if (rdev == NULL) { 636 if (rdev == NULL) {
637 return -EINVAL; 637 return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 0fe31766e4cf..635c0ffee7fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -545,7 +545,7 @@ int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
545 struct drm_file *file_priv; 545 struct drm_file *file_priv;
546 struct vmw_private *dev_priv; 546 struct vmw_private *dev_priv;
547 547
548 file_priv = (struct drm_file *)filp->private_data; 548 file_priv = filp->private_data;
549 dev_priv = vmw_priv(file_priv->minor->dev); 549 dev_priv = vmw_priv(file_priv->minor->dev);
550 550
551 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || 551 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 83123287c60c..1e8eedd901e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -39,7 +39,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
39 return drm_mmap(filp, vma); 39 return drm_mmap(filp, vma);
40 } 40 }
41 41
42 file_priv = (struct drm_file *)filp->private_data; 42 file_priv = filp->private_data;
43 dev_priv = vmw_priv(file_priv->minor->dev); 43 dev_priv = vmw_priv(file_priv->minor->dev);
44 return ttm_bo_mmap(filp, vma, &dev_priv->bdev); 44 return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
45} 45}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6369ba7f96f8..3052e2969ad0 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -56,20 +56,20 @@ menu "Special HID drivers"
56 depends on HID 56 depends on HID
57 57
58config HID_3M_PCT 58config HID_3M_PCT
59 tristate "3M PCT" 59 tristate "3M PCT touchscreen"
60 depends on USB_HID 60 depends on USB_HID
61 ---help--- 61 ---help---
62 Support for 3M PCT touch screens. 62 Support for 3M PCT touch screens.
63 63
64config HID_A4TECH 64config HID_A4TECH
65 tristate "A4 tech" if EMBEDDED 65 tristate "A4 tech mice" if EMBEDDED
66 depends on USB_HID 66 depends on USB_HID
67 default !EMBEDDED 67 default !EMBEDDED
68 ---help--- 68 ---help---
69 Support for A4 tech X5 and WOP-35 / Trust 450L mice. 69 Support for A4 tech X5 and WOP-35 / Trust 450L mice.
70 70
71config HID_ACRUX_FF 71config HID_ACRUX_FF
72 tristate "ACRUX force feedback support" 72 tristate "ACRUX force feedback"
73 depends on USB_HID 73 depends on USB_HID
74 select INPUT_FF_MEMLESS 74 select INPUT_FF_MEMLESS
75 ---help--- 75 ---help---
@@ -77,7 +77,7 @@ config HID_ACRUX_FF
77 game controllers. 77 game controllers.
78 78
79config HID_APPLE 79config HID_APPLE
80 tristate "Apple" if EMBEDDED 80 tristate "Apple {i,Power,Mac}Books" if EMBEDDED
81 depends on (USB_HID || BT_HIDP) 81 depends on (USB_HID || BT_HIDP)
82 default !EMBEDDED 82 default !EMBEDDED
83 ---help--- 83 ---help---
@@ -88,7 +88,7 @@ config HID_APPLE
88 MacBooks, MacBook Pros and Apple Aluminum. 88 MacBooks, MacBook Pros and Apple Aluminum.
89 89
90config HID_BELKIN 90config HID_BELKIN
91 tristate "Belkin" if EMBEDDED 91 tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED
92 depends on USB_HID 92 depends on USB_HID
93 default !EMBEDDED 93 default !EMBEDDED
94 ---help--- 94 ---help---
@@ -101,14 +101,14 @@ config HID_CANDO
101 Support for Cando dual touch panel. 101 Support for Cando dual touch panel.
102 102
103config HID_CHERRY 103config HID_CHERRY
104 tristate "Cherry" if EMBEDDED 104 tristate "Cherry Cymotion keyboard" if EMBEDDED
105 depends on USB_HID 105 depends on USB_HID
106 default !EMBEDDED 106 default !EMBEDDED
107 ---help--- 107 ---help---
108 Support for Cherry Cymotion keyboard. 108 Support for Cherry Cymotion keyboard.
109 109
110config HID_CHICONY 110config HID_CHICONY
111 tristate "Chicony" if EMBEDDED 111 tristate "Chicony Tactical pad" if EMBEDDED
112 depends on USB_HID 112 depends on USB_HID
113 default !EMBEDDED 113 default !EMBEDDED
114 ---help--- 114 ---help---
@@ -130,20 +130,20 @@ config HID_PRODIKEYS
130 and some additional multimedia keys. 130 and some additional multimedia keys.
131 131
132config HID_CYPRESS 132config HID_CYPRESS
133 tristate "Cypress" if EMBEDDED 133 tristate "Cypress mouse and barcode readers" if EMBEDDED
134 depends on USB_HID 134 depends on USB_HID
135 default !EMBEDDED 135 default !EMBEDDED
136 ---help--- 136 ---help---
137 Support for cypress mouse and barcode readers. 137 Support for cypress mouse and barcode readers.
138 138
139config HID_DRAGONRISE 139config HID_DRAGONRISE
140 tristate "DragonRise Inc. support" 140 tristate "DragonRise Inc. game controller"
141 depends on USB_HID 141 depends on USB_HID
142 ---help--- 142 ---help---
143 Say Y here if you have DragonRise Inc.game controllers. 143 Say Y here if you have DragonRise Inc.game controllers.
144 144
145config DRAGONRISE_FF 145config DRAGONRISE_FF
146 bool "DragonRise Inc. force feedback support" 146 bool "DragonRise Inc. force feedback"
147 depends on HID_DRAGONRISE 147 depends on HID_DRAGONRISE
148 select INPUT_FF_MEMLESS 148 select INPUT_FF_MEMLESS
149 ---help--- 149 ---help---
@@ -157,46 +157,58 @@ config HID_EGALAX
157 Support for the eGalax dual-touch panel. 157 Support for the eGalax dual-touch panel.
158 158
159config HID_ELECOM 159config HID_ELECOM
160 tristate "ELECOM" 160 tristate "ELECOM BM084 bluetooth mouse"
161 depends on BT_HIDP 161 depends on BT_HIDP
162 ---help--- 162 ---help---
163 Support for the ELECOM BM084 (bluetooth mouse). 163 Support for the ELECOM BM084 (bluetooth mouse).
164 164
165config HID_EZKEY 165config HID_EZKEY
166 tristate "Ezkey" if EMBEDDED 166 tristate "Ezkey BTC 8193 keyboard" if EMBEDDED
167 depends on USB_HID 167 depends on USB_HID
168 default !EMBEDDED 168 default !EMBEDDED
169 ---help--- 169 ---help---
170 Support for Ezkey BTC 8193 keyboard. 170 Support for Ezkey BTC 8193 keyboard.
171 171
172config HID_KYE 172config HID_KYE
173 tristate "Kye" if EMBEDDED 173 tristate "Kye/Genius Ergo Mouse" if EMBEDDED
174 depends on USB_HID 174 depends on USB_HID
175 default !EMBEDDED 175 default !EMBEDDED
176 ---help--- 176 ---help---
177 Support for Kye/Genius Ergo Mouse. 177 Support for Kye/Genius Ergo Mouse.
178 178
179config HID_UCLOGIC
180 tristate "UC-Logic"
181 depends on USB_HID
182 ---help---
183 Support for UC-Logic tablets.
184
185config HID_WALTOP
186 tristate "Waltop"
187 depends on USB_HID
188 ---help---
189 Support for Waltop tablets.
190
179config HID_GYRATION 191config HID_GYRATION
180 tristate "Gyration" 192 tristate "Gyration remote control"
181 depends on USB_HID 193 depends on USB_HID
182 ---help--- 194 ---help---
183 Support for Gyration remote control. 195 Support for Gyration remote control.
184 196
185config HID_TWINHAN 197config HID_TWINHAN
186 tristate "Twinhan" 198 tristate "Twinhan IR remote control"
187 depends on USB_HID 199 depends on USB_HID
188 ---help--- 200 ---help---
189 Support for Twinhan IR remote control. 201 Support for Twinhan IR remote control.
190 202
191config HID_KENSINGTON 203config HID_KENSINGTON
192 tristate "Kensington" if EMBEDDED 204 tristate "Kensington Slimblade Trackball" if EMBEDDED
193 depends on USB_HID 205 depends on USB_HID
194 default !EMBEDDED 206 default !EMBEDDED
195 ---help--- 207 ---help---
196 Support for Kensington Slimblade Trackball. 208 Support for Kensington Slimblade Trackball.
197 209
198config HID_LOGITECH 210config HID_LOGITECH
199 tristate "Logitech" if EMBEDDED 211 tristate "Logitech devices" if EMBEDDED
200 depends on USB_HID 212 depends on USB_HID
201 default !EMBEDDED 213 default !EMBEDDED
202 ---help--- 214 ---help---
@@ -220,12 +232,12 @@ config LOGITECH_FF
220 force feedback. 232 force feedback.
221 233
222config LOGIRUMBLEPAD2_FF 234config LOGIRUMBLEPAD2_FF
223 bool "Logitech Rumblepad 2 force feedback support" 235 bool "Logitech RumblePad/Rumblepad 2 force feedback support"
224 depends on HID_LOGITECH 236 depends on HID_LOGITECH
225 select INPUT_FF_MEMLESS 237 select INPUT_FF_MEMLESS
226 help 238 help
227 Say Y here if you want to enable force feedback support for Logitech 239 Say Y here if you want to enable force feedback support for Logitech
228 Rumblepad 2 devices. 240 RumblePad and Rumblepad 2 devices.
229 241
230config LOGIG940_FF 242config LOGIG940_FF
231 bool "Logitech Flight System G940 force feedback support" 243 bool "Logitech Flight System G940 force feedback support"
@@ -235,6 +247,14 @@ config LOGIG940_FF
235 Say Y here if you want to enable force feedback support for Logitech 247 Say Y here if you want to enable force feedback support for Logitech
236 Flight System G940 devices. 248 Flight System G940 devices.
237 249
250config LOGIWII_FF
251 bool "Logitech Speed Force Wireless force feedback support"
252 depends on HID_LOGITECH
253 select INPUT_FF_MEMLESS
254 help
255 Say Y here if you want to enable force feedback support for Logitech
256 Speed Force Wireless (Wii) devices.
257
238config HID_MAGICMOUSE 258config HID_MAGICMOUSE
239 tristate "Apple MagicMouse multi-touch support" 259 tristate "Apple MagicMouse multi-touch support"
240 depends on BT_HIDP 260 depends on BT_HIDP
@@ -245,39 +265,39 @@ config HID_MAGICMOUSE
245 Apple Wireless "Magic" Mouse. 265 Apple Wireless "Magic" Mouse.
246 266
247config HID_MICROSOFT 267config HID_MICROSOFT
248 tristate "Microsoft" if EMBEDDED 268 tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED
249 depends on USB_HID 269 depends on USB_HID
250 default !EMBEDDED 270 default !EMBEDDED
251 ---help--- 271 ---help---
252 Support for Microsoft devices that are not fully compliant with HID standard. 272 Support for Microsoft devices that are not fully compliant with HID standard.
253 273
254config HID_MOSART 274config HID_MOSART
255 tristate "MosArt" 275 tristate "MosArt dual-touch panels"
256 depends on USB_HID 276 depends on USB_HID
257 ---help--- 277 ---help---
258 Support for MosArt dual-touch panels. 278 Support for MosArt dual-touch panels.
259 279
260config HID_MONTEREY 280config HID_MONTEREY
261 tristate "Monterey" if EMBEDDED 281 tristate "Monterey Genius KB29E keyboard" if EMBEDDED
262 depends on USB_HID 282 depends on USB_HID
263 default !EMBEDDED 283 default !EMBEDDED
264 ---help--- 284 ---help---
265 Support for Monterey Genius KB29E. 285 Support for Monterey Genius KB29E.
266 286
267config HID_NTRIG 287config HID_NTRIG
268 tristate "NTrig" 288 tristate "N-Trig touch screen"
269 depends on USB_HID 289 depends on USB_HID
270 ---help--- 290 ---help---
271 Support for N-Trig touch screen. 291 Support for N-Trig touch screen.
272 292
273config HID_ORTEK 293config HID_ORTEK
274 tristate "Ortek" 294 tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad"
275 depends on USB_HID 295 depends on USB_HID
276 ---help--- 296 ---help---
277 Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. 297 Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
278 298
279config HID_PANTHERLORD 299config HID_PANTHERLORD
280 tristate "Pantherlord support" 300 tristate "Pantherlord/GreenAsia game controller"
281 depends on USB_HID 301 depends on USB_HID
282 ---help--- 302 ---help---
283 Say Y here if you have a PantherLord/GreenAsia based game controller 303 Say Y here if you have a PantherLord/GreenAsia based game controller
@@ -292,7 +312,7 @@ config PANTHERLORD_FF
292 or adapter and want to enable force feedback support for it. 312 or adapter and want to enable force feedback support for it.
293 313
294config HID_PETALYNX 314config HID_PETALYNX
295 tristate "Petalynx" 315 tristate "Petalynx Maxter remote control"
296 depends on USB_HID 316 depends on USB_HID
297 ---help--- 317 ---help---
298 Support for Petalynx Maxter remote control. 318 Support for Petalynx Maxter remote control.
@@ -356,7 +376,7 @@ config HID_PICOLCD_LEDS
356 Provide access to PicoLCD's GPO pins via leds class. 376 Provide access to PicoLCD's GPO pins via leds class.
357 377
358config HID_QUANTA 378config HID_QUANTA
359 tristate "Quanta Optical Touch" 379 tristate "Quanta Optical Touch panels"
360 depends on USB_HID 380 depends on USB_HID
361 ---help--- 381 ---help---
362 Support for Quanta Optical Touch dual-touch panels. 382 Support for Quanta Optical Touch dual-touch panels.
@@ -376,32 +396,39 @@ config HID_ROCCAT_KONE
376 ---help--- 396 ---help---
377 Support for Roccat Kone mouse. 397 Support for Roccat Kone mouse.
378 398
399config HID_ROCCAT_PYRA
400 tristate "Roccat Pyra mouse support"
401 depends on USB_HID
402 select HID_ROCCAT
403 ---help---
404 Support for Roccat Pyra mouse.
405
379config HID_SAMSUNG 406config HID_SAMSUNG
380 tristate "Samsung" 407 tristate "Samsung InfraRed remote control or keyboards"
381 depends on USB_HID 408 depends on USB_HID
382 ---help--- 409 ---help---
383 Support for Samsung InfraRed remote control or keyboards. 410 Support for Samsung InfraRed remote control or keyboards.
384 411
385config HID_SONY 412config HID_SONY
386 tristate "Sony" 413 tristate "Sony PS3 controller"
387 depends on USB_HID 414 depends on USB_HID
388 ---help--- 415 ---help---
389 Support for Sony PS3 controller. 416 Support for Sony PS3 controller.
390 417
391config HID_STANTUM 418config HID_STANTUM
392 tristate "Stantum" 419 tristate "Stantum multitouch panel"
393 depends on USB_HID 420 depends on USB_HID
394 ---help--- 421 ---help---
395 Support for Stantum multitouch panel. 422 Support for Stantum multitouch panel.
396 423
397config HID_SUNPLUS 424config HID_SUNPLUS
398 tristate "Sunplus" 425 tristate "Sunplus wireless desktop"
399 depends on USB_HID 426 depends on USB_HID
400 ---help--- 427 ---help---
401 Support for Sunplus wireless desktop. 428 Support for Sunplus wireless desktop.
402 429
403config HID_GREENASIA 430config HID_GREENASIA
404 tristate "GreenAsia (Product ID 0x12) support" 431 tristate "GreenAsia (Product ID 0x12) game controller support"
405 depends on USB_HID 432 depends on USB_HID
406 ---help--- 433 ---help---
407 Say Y here if you have a GreenAsia (Product ID 0x12) based game 434 Say Y here if you have a GreenAsia (Product ID 0x12) based game
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 46f037f3df80..c335605b9200 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -21,6 +21,9 @@ endif
21ifdef CONFIG_LOGIG940_FF 21ifdef CONFIG_LOGIG940_FF
22 hid-logitech-objs += hid-lg3ff.o 22 hid-logitech-objs += hid-lg3ff.o
23endif 23endif
24ifdef CONFIG_LOGIWII_FF
25 hid-logitech-objs += hid-lg4ff.o
26endif
24 27
25obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o 28obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
26obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o 29obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
@@ -52,6 +55,7 @@ obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
52obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o 55obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
53obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o 56obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
54obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o 57obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
58obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
55obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 59obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
56obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o 60obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
57obj-$(CONFIG_HID_SONY) += hid-sony.o 61obj-$(CONFIG_HID_SONY) += hid-sony.o
@@ -61,9 +65,11 @@ obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
61obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o 65obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
62obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o 66obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
63obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o 67obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
68obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
64obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o 69obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
65obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o 70obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
66obj-$(CONFIG_HID_WACOM) += hid-wacom.o 71obj-$(CONFIG_HID_WACOM) += hid-wacom.o
72obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
67 73
68obj-$(CONFIG_USB_HID) += usbhid/ 74obj-$(CONFIG_USB_HID) += usbhid/
69obj-$(CONFIG_USB_MOUSE) += usbhid/ 75obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index 2a0d56b7a02b..02d8cd3b1b1b 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -2,6 +2,8 @@
2 * HID driver for 3M PCT multitouch panels 2 * HID driver for 3M PCT multitouch panels
3 * 3 *
4 * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> 4 * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
5 * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se>
6 * Copyright (c) 2010 Canonical, Ltd.
5 * 7 *
6 */ 8 */
7 9
@@ -24,15 +26,26 @@ MODULE_LICENSE("GPL");
24 26
25#include "hid-ids.h" 27#include "hid-ids.h"
26 28
29#define MAX_SLOTS 60
30#define MAX_TRKID USHRT_MAX
31#define MAX_EVENTS 360
32
33/* estimated signal-to-noise ratios */
34#define SN_MOVE 2048
35#define SN_WIDTH 128
36
27struct mmm_finger { 37struct mmm_finger {
28 __s32 x, y, w, h; 38 __s32 x, y, w, h;
29 __u8 rank; 39 __u16 id;
40 bool prev_touch;
30 bool touch, valid; 41 bool touch, valid;
31}; 42};
32 43
33struct mmm_data { 44struct mmm_data {
34 struct mmm_finger f[10]; 45 struct mmm_finger f[MAX_SLOTS];
35 __u8 curid, num; 46 __u16 id;
47 __u8 curid;
48 __u8 nexp, nreal;
36 bool touch, valid; 49 bool touch, valid;
37}; 50};
38 51
@@ -40,6 +53,10 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
40 struct hid_field *field, struct hid_usage *usage, 53 struct hid_field *field, struct hid_usage *usage,
41 unsigned long **bit, int *max) 54 unsigned long **bit, int *max)
42{ 55{
56 int f1 = field->logical_minimum;
57 int f2 = field->logical_maximum;
58 int df = f2 - f1;
59
43 switch (usage->hid & HID_USAGE_PAGE) { 60 switch (usage->hid & HID_USAGE_PAGE) {
44 61
45 case HID_UP_BUTTON: 62 case HID_UP_BUTTON:
@@ -50,18 +67,20 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
50 case HID_GD_X: 67 case HID_GD_X:
51 hid_map_usage(hi, usage, bit, max, 68 hid_map_usage(hi, usage, bit, max,
52 EV_ABS, ABS_MT_POSITION_X); 69 EV_ABS, ABS_MT_POSITION_X);
70 input_set_abs_params(hi->input, ABS_MT_POSITION_X,
71 f1, f2, df / SN_MOVE, 0);
53 /* touchscreen emulation */ 72 /* touchscreen emulation */
54 input_set_abs_params(hi->input, ABS_X, 73 input_set_abs_params(hi->input, ABS_X,
55 field->logical_minimum, 74 f1, f2, df / SN_MOVE, 0);
56 field->logical_maximum, 0, 0);
57 return 1; 75 return 1;
58 case HID_GD_Y: 76 case HID_GD_Y:
59 hid_map_usage(hi, usage, bit, max, 77 hid_map_usage(hi, usage, bit, max,
60 EV_ABS, ABS_MT_POSITION_Y); 78 EV_ABS, ABS_MT_POSITION_Y);
79 input_set_abs_params(hi->input, ABS_MT_POSITION_Y,
80 f1, f2, df / SN_MOVE, 0);
61 /* touchscreen emulation */ 81 /* touchscreen emulation */
62 input_set_abs_params(hi->input, ABS_Y, 82 input_set_abs_params(hi->input, ABS_Y,
63 field->logical_minimum, 83 f1, f2, df / SN_MOVE, 0);
64 field->logical_maximum, 0, 0);
65 return 1; 84 return 1;
66 } 85 }
67 return 0; 86 return 0;
@@ -81,21 +100,31 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
81 case HID_DG_TIPSWITCH: 100 case HID_DG_TIPSWITCH:
82 /* touchscreen emulation */ 101 /* touchscreen emulation */
83 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 102 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
103 input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
84 return 1; 104 return 1;
85 case HID_DG_WIDTH: 105 case HID_DG_WIDTH:
86 hid_map_usage(hi, usage, bit, max, 106 hid_map_usage(hi, usage, bit, max,
87 EV_ABS, ABS_MT_TOUCH_MAJOR); 107 EV_ABS, ABS_MT_TOUCH_MAJOR);
108 input_set_abs_params(hi->input, ABS_MT_TOUCH_MAJOR,
109 f1, f2, df / SN_WIDTH, 0);
88 return 1; 110 return 1;
89 case HID_DG_HEIGHT: 111 case HID_DG_HEIGHT:
90 hid_map_usage(hi, usage, bit, max, 112 hid_map_usage(hi, usage, bit, max,
91 EV_ABS, ABS_MT_TOUCH_MINOR); 113 EV_ABS, ABS_MT_TOUCH_MINOR);
114 input_set_abs_params(hi->input, ABS_MT_TOUCH_MINOR,
115 f1, f2, df / SN_WIDTH, 0);
92 input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 116 input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
93 1, 1, 0, 0); 117 0, 1, 0, 0);
94 return 1; 118 return 1;
95 case HID_DG_CONTACTID: 119 case HID_DG_CONTACTID:
96 field->logical_maximum = 59; 120 field->logical_maximum = MAX_TRKID;
97 hid_map_usage(hi, usage, bit, max, 121 hid_map_usage(hi, usage, bit, max,
98 EV_ABS, ABS_MT_TRACKING_ID); 122 EV_ABS, ABS_MT_TRACKING_ID);
123 input_set_abs_params(hi->input, ABS_MT_TRACKING_ID,
124 0, MAX_TRKID, 0, 0);
125 if (!hi->input->mt)
126 input_mt_create_slots(hi->input, MAX_SLOTS);
127 input_set_events_per_packet(hi->input, MAX_EVENTS);
99 return 1; 128 return 1;
100 } 129 }
101 /* let hid-input decide for the others */ 130 /* let hid-input decide for the others */
@@ -113,10 +142,10 @@ static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi,
113 struct hid_field *field, struct hid_usage *usage, 142 struct hid_field *field, struct hid_usage *usage,
114 unsigned long **bit, int *max) 143 unsigned long **bit, int *max)
115{ 144{
145 /* tell hid-input to skip setup of these event types */
116 if (usage->type == EV_KEY || usage->type == EV_ABS) 146 if (usage->type == EV_KEY || usage->type == EV_ABS)
117 clear_bit(usage->code, *bit); 147 set_bit(usage->type, hi->input->evbit);
118 148 return -1;
119 return 0;
120} 149}
121 150
122/* 151/*
@@ -126,70 +155,49 @@ static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi,
126static void mmm_filter_event(struct mmm_data *md, struct input_dev *input) 155static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
127{ 156{
128 struct mmm_finger *oldest = 0; 157 struct mmm_finger *oldest = 0;
129 bool pressed = false, released = false;
130 int i; 158 int i;
131 159 for (i = 0; i < MAX_SLOTS; ++i) {
132 /*
133 * we need to iterate on all fingers to decide if we have a press
134 * or a release event in our touchscreen emulation.
135 */
136 for (i = 0; i < 10; ++i) {
137 struct mmm_finger *f = &md->f[i]; 160 struct mmm_finger *f = &md->f[i];
138 if (!f->valid) { 161 if (!f->valid) {
139 /* this finger is just placeholder data, ignore */ 162 /* this finger is just placeholder data, ignore */
140 } else if (f->touch) { 163 continue;
164 }
165 input_mt_slot(input, i);
166 if (f->touch) {
141 /* this finger is on the screen */ 167 /* this finger is on the screen */
142 int wide = (f->w > f->h); 168 int wide = (f->w > f->h);
143 input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i); 169 /* divided by two to match visual scale of touch */
170 int major = max(f->w, f->h) >> 1;
171 int minor = min(f->w, f->h) >> 1;
172
173 if (!f->prev_touch)
174 f->id = md->id++;
175 input_event(input, EV_ABS, ABS_MT_TRACKING_ID, f->id);
144 input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x); 176 input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
145 input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y); 177 input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
146 input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide); 178 input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
147 input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, 179 input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
148 wide ? f->w : f->h); 180 input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
149 input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, 181 /* touchscreen emulation: pick the oldest contact */
150 wide ? f->h : f->w); 182 if (!oldest || ((f->id - oldest->id) & (SHRT_MAX + 1)))
151 input_mt_sync(input);
152 /*
153 * touchscreen emulation: maintain the age rank
154 * of this finger, decide if we have a press
155 */
156 if (f->rank == 0) {
157 f->rank = ++(md->num);
158 if (f->rank == 1)
159 pressed = true;
160 }
161 if (f->rank == 1)
162 oldest = f; 183 oldest = f;
163 } else { 184 } else {
164 /* this finger took off the screen */ 185 /* this finger took off the screen */
165 /* touchscreen emulation: maintain age rank of others */ 186 input_event(input, EV_ABS, ABS_MT_TRACKING_ID, -1);
166 int j;
167
168 for (j = 0; j < 10; ++j) {
169 struct mmm_finger *g = &md->f[j];
170 if (g->rank > f->rank) {
171 g->rank--;
172 if (g->rank == 1)
173 oldest = g;
174 }
175 }
176 f->rank = 0;
177 --(md->num);
178 if (md->num == 0)
179 released = true;
180 } 187 }
188 f->prev_touch = f->touch;
181 f->valid = 0; 189 f->valid = 0;
182 } 190 }
183 191
184 /* touchscreen emulation */ 192 /* touchscreen emulation */
185 if (oldest) { 193 if (oldest) {
186 if (pressed) 194 input_event(input, EV_KEY, BTN_TOUCH, 1);
187 input_event(input, EV_KEY, BTN_TOUCH, 1);
188 input_event(input, EV_ABS, ABS_X, oldest->x); 195 input_event(input, EV_ABS, ABS_X, oldest->x);
189 input_event(input, EV_ABS, ABS_Y, oldest->y); 196 input_event(input, EV_ABS, ABS_Y, oldest->y);
190 } else if (released) { 197 } else {
191 input_event(input, EV_KEY, BTN_TOUCH, 0); 198 input_event(input, EV_KEY, BTN_TOUCH, 0);
192 } 199 }
200 input_sync(input);
193} 201}
194 202
195/* 203/*
@@ -223,10 +231,12 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
223 md->f[md->curid].h = value; 231 md->f[md->curid].h = value;
224 break; 232 break;
225 case HID_DG_CONTACTID: 233 case HID_DG_CONTACTID:
234 value = clamp_val(value, 0, MAX_SLOTS - 1);
226 if (md->valid) { 235 if (md->valid) {
227 md->curid = value; 236 md->curid = value;
228 md->f[value].touch = md->touch; 237 md->f[value].touch = md->touch;
229 md->f[value].valid = 1; 238 md->f[value].valid = 1;
239 md->nreal++;
230 } 240 }
231 break; 241 break;
232 case HID_GD_X: 242 case HID_GD_X:
@@ -238,7 +248,12 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
238 md->f[md->curid].y = value; 248 md->f[md->curid].y = value;
239 break; 249 break;
240 case HID_DG_CONTACTCOUNT: 250 case HID_DG_CONTACTCOUNT:
241 mmm_filter_event(md, input); 251 if (value)
252 md->nexp = value;
253 if (md->nreal >= md->nexp) {
254 mmm_filter_event(md, input);
255 md->nreal = 0;
256 }
242 break; 257 break;
243 } 258 }
244 } 259 }
@@ -255,6 +270,8 @@ static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id)
255 int ret; 270 int ret;
256 struct mmm_data *md; 271 struct mmm_data *md;
257 272
273 hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
274
258 md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL); 275 md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL);
259 if (!md) { 276 if (!md) {
260 dev_err(&hdev->dev, "cannot allocate 3M data\n"); 277 dev_err(&hdev->dev, "cannot allocate 3M data\n");
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 3a2b223c1da4..1666c1684e79 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -133,6 +133,8 @@ static const struct hid_device_id a4_devices[] = {
133 .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, 133 .driver_data = A4_2WHEEL_MOUSE_HACK_7 },
134 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), 134 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D),
135 .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, 135 .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
136 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
137 .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
136 { } 138 { }
137}; 139};
138MODULE_DEVICE_TABLE(hid, a4_devices); 140MODULE_DEVICE_TABLE(hid, a4_devices);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index bba05d0a8980..eaeca564a8d3 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -246,17 +246,18 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
246/* 246/*
247 * MacBook JIS keyboard has wrong logical maximum 247 * MacBook JIS keyboard has wrong logical maximum
248 */ 248 */
249static void apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, 249static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
250 unsigned int rsize) 250 unsigned int *rsize)
251{ 251{
252 struct apple_sc *asc = hid_get_drvdata(hdev); 252 struct apple_sc *asc = hid_get_drvdata(hdev);
253 253
254 if ((asc->quirks & APPLE_RDESC_JIS) && rsize >= 60 && 254 if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
255 rdesc[53] == 0x65 && rdesc[59] == 0x65) { 255 rdesc[53] == 0x65 && rdesc[59] == 0x65) {
256 dev_info(&hdev->dev, "fixing up MacBook JIS keyboard report " 256 dev_info(&hdev->dev, "fixing up MacBook JIS keyboard report "
257 "descriptor\n"); 257 "descriptor\n");
258 rdesc[53] = rdesc[59] = 0xe7; 258 rdesc[53] = rdesc[59] = 0xe7;
259 } 259 }
260 return rdesc;
260} 261}
261 262
262static void apple_setup_input(struct input_dev *input) 263static void apple_setup_input(struct input_dev *input)
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 24663a8717b1..e880086c2311 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -26,15 +26,16 @@
26 * Cherry Cymotion keyboard have an invalid HID report descriptor, 26 * Cherry Cymotion keyboard have an invalid HID report descriptor,
27 * that needs fixing before we can parse it. 27 * that needs fixing before we can parse it.
28 */ 28 */
29static void ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, 29static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
30 unsigned int rsize) 30 unsigned int *rsize)
31{ 31{
32 if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { 32 if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
33 dev_info(&hdev->dev, "fixing up Cherry Cymotion report " 33 dev_info(&hdev->dev, "fixing up Cherry Cymotion report "
34 "descriptor\n"); 34 "descriptor\n");
35 rdesc[11] = rdesc[16] = 0xff; 35 rdesc[11] = rdesc[16] = 0xff;
36 rdesc[12] = rdesc[17] = 0x03; 36 rdesc[12] = rdesc[17] = 0x03;
37 } 37 }
38 return rdesc;
38} 39}
39 40
40#define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 41#define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3cb6632d4518..7832b6e2478b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -388,12 +388,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
388 __u32 data; 388 __u32 data;
389 unsigned n; 389 unsigned n;
390 390
391 /* Local delimiter could have value 0, which allows size to be 0 */
392 if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) {
393 dbg_hid("item data expected for local item\n");
394 return -1;
395 }
396
397 data = item_udata(item); 391 data = item_udata(item);
398 392
399 switch (item->tag) { 393 switch (item->tag) {
@@ -651,7 +645,7 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
651 }; 645 };
652 646
653 if (device->driver->report_fixup) 647 if (device->driver->report_fixup)
654 device->driver->report_fixup(device, start, size); 648 start = device->driver->report_fixup(device, start, &size);
655 649
656 device->rdesc = kmemdup(start, size, GFP_KERNEL); 650 device->rdesc = kmemdup(start, size, GFP_KERNEL);
657 if (device->rdesc == NULL) 651 if (device->rdesc == NULL)
@@ -1241,6 +1235,7 @@ static const struct hid_device_id hid_blacklist[] = {
1241 { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) }, 1235 { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
1242 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1236 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
1243 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1237 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
1238 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
1244#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) 1239#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE)
1245 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, 1240 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
1246#endif 1241#endif
@@ -1248,6 +1243,7 @@ static const struct hid_device_id hid_blacklist[] = {
1248 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, 1243 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
1249 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, 1244 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
1250 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, 1245 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
1246 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
1251 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, 1247 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
1252 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, 1248 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
1253 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, 1249 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1327,6 +1323,7 @@ static const struct hid_device_id hid_blacklist[] = {
1327 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) }, 1323 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
1328 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) }, 1324 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
1329 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) }, 1325 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
1326 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
1330 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, 1327 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
1331 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, 1328 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
1332 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, 1329 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
@@ -1336,6 +1333,7 @@ static const struct hid_device_id hid_blacklist[] = {
1336 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, 1333 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
1337 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, 1334 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
1338 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, 1335 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
1336 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
1339 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, 1337 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
1340 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 1338 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1341 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 1339 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
@@ -1371,12 +1369,15 @@ static const struct hid_device_id hid_blacklist[] = {
1371 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, 1369 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
1372 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, 1370 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
1373 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, 1371 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
1372 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
1374 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 1373 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
1375 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, 1374 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
1376 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1375 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
1377 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1376 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
1378 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 1377 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
1379 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, 1378 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
1379 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM) },
1380 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX) },
1380 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 1381 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
1381 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 1382 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
1382 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, 1383 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
@@ -1388,8 +1389,16 @@ static const struct hid_device_id hid_blacklist[] = {
1388 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1389 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1389 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1390 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1390 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1391 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
1392 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
1393 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
1394 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
1395 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
1391 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 1396 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
1392 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, 1397 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
1398 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
1399 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
1400 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
1401 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
1393 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 1402 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
1394 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 1403 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
1395 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 1404 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 998b6f443d7d..4cd0e2345991 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -31,16 +31,16 @@
31 * Some USB barcode readers from cypress have usage min and usage max in 31 * Some USB barcode readers from cypress have usage min and usage max in
32 * the wrong order 32 * the wrong order
33 */ 33 */
34static void cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, 34static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
35 unsigned int rsize) 35 unsigned int *rsize)
36{ 36{
37 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 37 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
38 unsigned int i; 38 unsigned int i;
39 39
40 if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) 40 if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
41 return; 41 return rdesc;
42 42
43 for (i = 0; i < rsize - 4; i++) 43 for (i = 0; i < *rsize - 4; i++)
44 if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { 44 if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
45 __u8 tmp; 45 __u8 tmp;
46 46
@@ -50,6 +50,7 @@ static void cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
50 rdesc[i + 3] = rdesc[i + 1]; 50 rdesc[i + 3] = rdesc[i + 1];
51 rdesc[i + 1] = tmp; 51 rdesc[i + 1] = tmp;
52 } 52 }
53 return rdesc;
53} 54}
54 55
55static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi, 56static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 61a3e572224a..75c5e23d09d2 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -570,6 +570,8 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
570 buf[i]; 570 buf[i];
571 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; 571 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
572 } 572 }
573
574 wake_up_interruptible(&hdev->debug_wait);
573} 575}
574EXPORT_SYMBOL_GPL(hid_debug_event); 576EXPORT_SYMBOL_GPL(hid_debug_event);
575 577
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index 8ca7f65cf2f8..54b017ad258d 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -31,7 +31,7 @@ struct egalax_data {
31 bool first; /* is this the first finger in the frame? */ 31 bool first; /* is this the first finger in the frame? */
32 bool valid; /* valid finger data, or just placeholder? */ 32 bool valid; /* valid finger data, or just placeholder? */
33 bool activity; /* at least one active finger previously? */ 33 bool activity; /* at least one active finger previously? */
34 __u16 lastx, lasty; /* latest valid (x, y) in the frame */ 34 __u16 lastx, lasty, lastz; /* latest valid (x, y, z) in the frame */
35}; 35};
36 36
37static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi, 37static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -79,6 +79,10 @@ static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
79 case HID_DG_TIPPRESSURE: 79 case HID_DG_TIPPRESSURE:
80 hid_map_usage(hi, usage, bit, max, 80 hid_map_usage(hi, usage, bit, max,
81 EV_ABS, ABS_MT_PRESSURE); 81 EV_ABS, ABS_MT_PRESSURE);
82 /* touchscreen emulation */
83 input_set_abs_params(hi->input, ABS_PRESSURE,
84 field->logical_minimum,
85 field->logical_maximum, 0, 0);
82 return 1; 86 return 1;
83 } 87 }
84 return 0; 88 return 0;
@@ -109,8 +113,8 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
109 if (td->valid) { 113 if (td->valid) {
110 /* emit multitouch events */ 114 /* emit multitouch events */
111 input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); 115 input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
112 input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); 116 input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x >> 3);
113 input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); 117 input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y >> 3);
114 input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z); 118 input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
115 119
116 input_mt_sync(input); 120 input_mt_sync(input);
@@ -121,6 +125,7 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
121 */ 125 */
122 td->lastx = td->x; 126 td->lastx = td->x;
123 td->lasty = td->y; 127 td->lasty = td->y;
128 td->lastz = td->z;
124 } 129 }
125 130
126 /* 131 /*
@@ -129,8 +134,9 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
129 * the oldest on the panel, the one we want for single touch 134 * the oldest on the panel, the one we want for single touch
130 */ 135 */
131 if (!td->first && td->activity) { 136 if (!td->first && td->activity) {
132 input_event(input, EV_ABS, ABS_X, td->lastx); 137 input_event(input, EV_ABS, ABS_X, td->lastx >> 3);
133 input_event(input, EV_ABS, ABS_Y, td->lasty); 138 input_event(input, EV_ABS, ABS_Y, td->lasty >> 3);
139 input_event(input, EV_ABS, ABS_PRESSURE, td->lastz);
134 } 140 }
135 141
136 if (!td->valid) { 142 if (!td->valid) {
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 7a40878f46b4..6e31f305397d 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -20,14 +20,15 @@
20 20
21#include "hid-ids.h" 21#include "hid-ids.h"
22 22
23static void elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, 23static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
24 unsigned int rsize) 24 unsigned int *rsize)
25{ 25{
26 if (rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 26 if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
27 dev_info(&hdev->dev, "Fixing up Elecom BM084 " 27 dev_info(&hdev->dev, "Fixing up Elecom BM084 "
28 "report descriptor.\n"); 28 "report descriptor.\n");
29 rdesc[47] = 0x00; 29 rdesc[47] = 0x00;
30 } 30 }
31 return rdesc;
31} 32}
32 33
33static const struct hid_device_id elecom_devices[] = { 34static const struct hid_device_id elecom_devices[] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 855aa8e355f4..3ee999d33004 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -25,6 +25,7 @@
25#define USB_VENDOR_ID_A4TECH 0x09da 25#define USB_VENDOR_ID_A4TECH 0x09da
26#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 26#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
27#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a 27#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a
28#define USB_DEVICE_ID_A4TECH_RP_649 0x001a
28 29
29#define USB_VENDOR_ID_AASHIMA 0x06d6 30#define USB_VENDOR_ID_AASHIMA 0x06d6
30#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025 31#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025
@@ -63,6 +64,7 @@
63#define USB_VENDOR_ID_APPLE 0x05ac 64#define USB_VENDOR_ID_APPLE 0x05ac
64#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 65#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
65#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d 66#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
67#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
66#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e 68#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
67#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f 69#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
68#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 70#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -142,6 +144,7 @@
142#define USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE 0x0051 144#define USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE 0x0051
143#define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff 145#define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff
144#define USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK 0x00d3 146#define USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK 0x00d3
147#define USB_DEVICE_ID_CH_AXIS_295 0x001c
145 148
146#define USB_VENDOR_ID_CHERRY 0x046a 149#define USB_VENDOR_ID_CHERRY 0x046a
147#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 150#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@ -343,6 +346,7 @@
343#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 346#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
344#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 347#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
345#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 348#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
349#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a
346#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211 350#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211
347#define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215 351#define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215
348#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218 352#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
@@ -354,6 +358,7 @@
354#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 358#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
355#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 359#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
356#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299 360#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
361#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
357#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a 362#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
358#define USB_DEVICE_ID_S510_RECEIVER 0xc50c 363#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
359#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 364#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
@@ -466,6 +471,8 @@
466 471
467#define USB_VENDOR_ID_ROCCAT 0x1e7d 472#define USB_VENDOR_ID_ROCCAT 0x1e7d
468#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced 473#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
474#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
475#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
469 476
470#define USB_VENDOR_ID_SAITEK 0x06a3 477#define USB_VENDOR_ID_SAITEK 0x06a3
471#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 478#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -485,6 +492,12 @@
485#define USB_VENDOR_ID_STANTUM 0x1f87 492#define USB_VENDOR_ID_STANTUM 0x1f87
486#define USB_DEVICE_ID_MTP 0x0002 493#define USB_DEVICE_ID_MTP 0x0002
487 494
495#define USB_VENDOR_ID_STANTUM_STM 0x0483
496#define USB_DEVICE_ID_MTP_STM 0x3261
497
498#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
499#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
500
488#define USB_VENDOR_ID_SUN 0x0430 501#define USB_VENDOR_ID_SUN 0x0430
489#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab 502#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
490 503
@@ -514,8 +527,10 @@
514 527
515#define USB_VENDOR_ID_UCLOGIC 0x5543 528#define USB_VENDOR_ID_UCLOGIC 0x5543
516#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 529#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
517#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
518#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 530#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
531#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
532#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
533#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
519 534
520#define USB_VENDOR_ID_VERNIER 0x08f7 535#define USB_VENDOR_ID_VERNIER 0x08f7
521#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 536#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
@@ -527,6 +542,12 @@
527#define USB_VENDOR_ID_WACOM 0x056a 542#define USB_VENDOR_ID_WACOM 0x056a
528#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 543#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
529 544
545#define USB_VENDOR_ID_WALTOP 0x172f
546#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032
547#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH 0x0034
548#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH 0x0501
549#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
550
530#define USB_VENDOR_ID_WISEGROUP 0x0925 551#define USB_VENDOR_ID_WISEGROUP 0x0925
531#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 552#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
532#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 553#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6c03dcc5760a..834ef47b76d6 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -149,6 +149,83 @@ static int hidinput_setkeycode(struct input_dev *dev,
149} 149}
150 150
151 151
152/**
153 * hidinput_calc_abs_res - calculate an absolute axis resolution
154 * @field: the HID report field to calculate resolution for
155 * @code: axis code
156 *
157 * The formula is:
158 * (logical_maximum - logical_minimum)
159 * resolution = ----------------------------------------------------------
160 * (physical_maximum - physical_minimum) * 10 ^ unit_exponent
161 *
162 * as seen in the HID specification v1.11 6.2.2.7 Global Items.
163 *
164 * Only exponent 1 length units are processed. Centimeters are converted to
165 * inches. Degrees are converted to radians.
166 */
167static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
168{
169 __s32 unit_exponent = field->unit_exponent;
170 __s32 logical_extents = field->logical_maximum -
171 field->logical_minimum;
172 __s32 physical_extents = field->physical_maximum -
173 field->physical_minimum;
174 __s32 prev;
175
176 /* Check if the extents are sane */
177 if (logical_extents <= 0 || physical_extents <= 0)
178 return 0;
179
180 /*
181 * Verify and convert units.
182 * See HID specification v1.11 6.2.2.7 Global Items for unit decoding
183 */
184 if (code == ABS_X || code == ABS_Y || code == ABS_Z) {
185 if (field->unit == 0x11) { /* If centimeters */
186 /* Convert to inches */
187 prev = logical_extents;
188 logical_extents *= 254;
189 if (logical_extents < prev)
190 return 0;
191 unit_exponent += 2;
192 } else if (field->unit != 0x13) { /* If not inches */
193 return 0;
194 }
195 } else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) {
196 if (field->unit == 0x14) { /* If degrees */
197 /* Convert to radians */
198 prev = logical_extents;
199 logical_extents *= 573;
200 if (logical_extents < prev)
201 return 0;
202 unit_exponent += 1;
203 } else if (field->unit != 0x12) { /* If not radians */
204 return 0;
205 }
206 } else {
207 return 0;
208 }
209
210 /* Apply negative unit exponent */
211 for (; unit_exponent < 0; unit_exponent++) {
212 prev = logical_extents;
213 logical_extents *= 10;
214 if (logical_extents < prev)
215 return 0;
216 }
217 /* Apply positive unit exponent */
218 for (; unit_exponent > 0; unit_exponent--) {
219 prev = physical_extents;
220 physical_extents *= 10;
221 if (physical_extents < prev)
222 return 0;
223 }
224
225 /* Calculate resolution */
226 return logical_extents / physical_extents;
227}
228
152static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field, 229static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
153 struct hid_usage *usage) 230 struct hid_usage *usage)
154{ 231{
@@ -336,6 +413,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
336 map_key_clear(BTN_STYLUS); 413 map_key_clear(BTN_STYLUS);
337 break; 414 break;
338 415
416 case 0x46: /* TabletPick */
417 map_key_clear(BTN_STYLUS2);
418 break;
419
339 default: goto unknown; 420 default: goto unknown;
340 } 421 }
341 break; 422 break;
@@ -537,6 +618,9 @@ mapped:
537 input_set_abs_params(input, usage->code, a, b, (b - a) >> 8, (b - a) >> 4); 618 input_set_abs_params(input, usage->code, a, b, (b - a) >> 8, (b - a) >> 4);
538 else input_set_abs_params(input, usage->code, a, b, 0, 0); 619 else input_set_abs_params(input, usage->code, a, b, 0, 0);
539 620
621 input_abs_set_res(input, usage->code,
622 hidinput_calc_abs_res(field, usage->code));
623
540 /* use a larger default input buffer for MT devices */ 624 /* use a larger default input buffer for MT devices */
541 if (usage->code == ABS_MT_POSITION_X && input->hint_events_per_packet == 0) 625 if (usage->code == ABS_MT_POSITION_X && input->hint_events_per_packet == 0)
542 input_set_events_per_packet(input, 60); 626 input_set_events_per_packet(input, 60);
@@ -659,6 +743,9 @@ void hidinput_report_event(struct hid_device *hid, struct hid_report *report)
659{ 743{
660 struct hid_input *hidinput; 744 struct hid_input *hidinput;
661 745
746 if (hid->quirks & HID_QUIRK_NO_INPUT_SYNC)
747 return;
748
662 list_for_each_entry(hidinput, &hid->inputs, list) 749 list_for_each_entry(hidinput, &hid->inputs, list)
663 input_sync(hidinput->input); 750 input_sync(hidinput->input);
664} 751}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index f8871712b7b5..817247ee006c 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -23,10 +23,10 @@
23 * - report size 8 count 1 must be size 1 count 8 for button bitfield 23 * - report size 8 count 1 must be size 1 count 8 for button bitfield
24 * - change the button usage range to 4-7 for the extra buttons 24 * - change the button usage range to 4-7 for the extra buttons
25 */ 25 */
26static void kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, 26static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
27 unsigned int rsize) 27 unsigned int *rsize)
28{ 28{
29 if (rsize >= 74 && 29 if (*rsize >= 74 &&
30 rdesc[61] == 0x05 && rdesc[62] == 0x08 && 30 rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
31 rdesc[63] == 0x19 && rdesc[64] == 0x08 && 31 rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
32 rdesc[65] == 0x29 && rdesc[66] == 0x0f && 32 rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
@@ -40,6 +40,7 @@ static void kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
40 rdesc[72] = 0x01; 40 rdesc[72] = 0x01;
41 rdesc[74] = 0x08; 41 rdesc[74] = 0x08;
42 } 42 }
43 return rdesc;
43} 44}
44 45
45static const struct hid_device_id kye_devices[] = { 46static const struct hid_device_id kye_devices[] = {
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index f6433d8050a9..b629fba5a057 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2006-2007 Jiri Kosina 7 * Copyright (c) 2006-2007 Jiri Kosina
8 * Copyright (c) 2007 Paul Walmsley 8 * Copyright (c) 2007 Paul Walmsley
9 * Copyright (c) 2008 Jiri Slaby 9 * Copyright (c) 2008 Jiri Slaby
10 * Copyright (c) 2010 Hendrik Iben
10 */ 11 */
11 12
12/* 13/*
@@ -19,6 +20,9 @@
19#include <linux/device.h> 20#include <linux/device.h>
20#include <linux/hid.h> 21#include <linux/hid.h>
21#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/random.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
22 26
23#include "hid-ids.h" 27#include "hid-ids.h"
24#include "hid-lg.h" 28#include "hid-lg.h"
@@ -35,31 +39,43 @@
35#define LG_FF2 0x400 39#define LG_FF2 0x400
36#define LG_RDESC_REL_ABS 0x800 40#define LG_RDESC_REL_ABS 0x800
37#define LG_FF3 0x1000 41#define LG_FF3 0x1000
42#define LG_FF4 0x2000
38 43
39/* 44/*
40 * Certain Logitech keyboards send in report #3 keys which are far 45 * Certain Logitech keyboards send in report #3 keys which are far
41 * above the logical maximum described in descriptor. This extends 46 * above the logical maximum described in descriptor. This extends
42 * the original value of 0x28c of logical maximum to 0x104d 47 * the original value of 0x28c of logical maximum to 0x104d
43 */ 48 */
44static void lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, 49static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
45 unsigned int rsize) 50 unsigned int *rsize)
46{ 51{
47 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 52 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
48 53
49 if ((quirks & LG_RDESC) && rsize >= 90 && rdesc[83] == 0x26 && 54 if ((quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
50 rdesc[84] == 0x8c && rdesc[85] == 0x02) { 55 rdesc[84] == 0x8c && rdesc[85] == 0x02) {
51 dev_info(&hdev->dev, "fixing up Logitech keyboard report " 56 dev_info(&hdev->dev, "fixing up Logitech keyboard report "
52 "descriptor\n"); 57 "descriptor\n");
53 rdesc[84] = rdesc[89] = 0x4d; 58 rdesc[84] = rdesc[89] = 0x4d;
54 rdesc[85] = rdesc[90] = 0x10; 59 rdesc[85] = rdesc[90] = 0x10;
55 } 60 }
56 if ((quirks & LG_RDESC_REL_ABS) && rsize >= 50 && 61 if ((quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
57 rdesc[32] == 0x81 && rdesc[33] == 0x06 && 62 rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
58 rdesc[49] == 0x81 && rdesc[50] == 0x06) { 63 rdesc[49] == 0x81 && rdesc[50] == 0x06) {
59 dev_info(&hdev->dev, "fixing up rel/abs in Logitech " 64 dev_info(&hdev->dev, "fixing up rel/abs in Logitech "
60 "report descriptor\n"); 65 "report descriptor\n");
61 rdesc[33] = rdesc[50] = 0x02; 66 rdesc[33] = rdesc[50] = 0x02;
62 } 67 }
68 if ((quirks & LG_FF4) && *rsize >= 101 &&
69 rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
70 rdesc[47] == 0x05 && rdesc[48] == 0x09) {
71 dev_info(&hdev->dev, "fixing up Logitech Speed Force Wireless "
72 "button descriptor\n");
73 rdesc[41] = 0x05;
74 rdesc[42] = 0x09;
75 rdesc[47] = 0x95;
76 rdesc[48] = 0x0B;
77 }
78 return rdesc;
63} 79}
64 80
65#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 81#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
@@ -285,12 +301,33 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
285 goto err_free; 301 goto err_free;
286 } 302 }
287 303
304 if (quirks & LG_FF4) {
305 unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
306
307 ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
308
309 if (ret >= 0) {
310 /* insert a little delay of 10 jiffies ~ 40ms */
311 wait_queue_head_t wait;
312 init_waitqueue_head (&wait);
313 wait_event_interruptible_timeout(wait, 0, 10);
314
315 /* Select random Address */
316 buf[1] = 0xB2;
317 get_random_bytes(&buf[2], 2);
318
319 ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
320 }
321 }
322
288 if (quirks & LG_FF) 323 if (quirks & LG_FF)
289 lgff_init(hdev); 324 lgff_init(hdev);
290 if (quirks & LG_FF2) 325 if (quirks & LG_FF2)
291 lg2ff_init(hdev); 326 lg2ff_init(hdev);
292 if (quirks & LG_FF3) 327 if (quirks & LG_FF3)
293 lg3ff_init(hdev); 328 lg3ff_init(hdev);
329 if (quirks & LG_FF4)
330 lg4ff_init(hdev);
294 331
295 return 0; 332 return 0;
296err_free: 333err_free:
@@ -325,6 +362,8 @@ static const struct hid_device_id lg_devices[] = {
325 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), 362 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
326 .driver_data = LG_NOGET | LG_FF }, 363 .driver_data = LG_NOGET | LG_FF },
327 364
365 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),
366 .driver_data = LG_FF2 },
328 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD), 367 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD),
329 .driver_data = LG_FF }, 368 .driver_data = LG_FF },
330 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2), 369 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2),
@@ -339,6 +378,8 @@ static const struct hid_device_id lg_devices[] = {
339 .driver_data = LG_FF }, 378 .driver_data = LG_FF },
340 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), 379 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
341 .driver_data = LG_FF }, 380 .driver_data = LG_FF },
381 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
382 .driver_data = LG_FF4 },
342 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ), 383 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
343 .driver_data = LG_FF }, 384 .driver_data = LG_FF },
344 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), 385 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index ce2ac8672624..b0100ba2ae0b 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -19,4 +19,10 @@ int lg3ff_init(struct hid_device *hdev);
19static inline int lg3ff_init(struct hid_device *hdev) { return -1; } 19static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
20#endif 20#endif
21 21
22#ifdef CONFIG_LOGIWII_FF
23int lg4ff_init(struct hid_device *hdev);
24#else
25static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
26#endif
27
22#endif 28#endif
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index d888f1e6794f..4258253c36b3 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Force feedback support for Logitech Rumblepad 2 2 * Force feedback support for Logitech RumblePad and Rumblepad 2
3 * 3 *
4 * Copyright (c) 2008 Anssi Hannula <anssi.hannula@gmail.com> 4 * Copyright (c) 2008 Anssi Hannula <anssi.hannula@gmail.com>
5 */ 5 */
@@ -110,7 +110,7 @@ int lg2ff_init(struct hid_device *hid)
110 110
111 usbhid_submit_report(hid, report, USB_DIR_OUT); 111 usbhid_submit_report(hid, report, USB_DIR_OUT);
112 112
113 dev_info(&hid->dev, "Force feedback for Logitech Rumblepad 2 by " 113 dev_info(&hid->dev, "Force feedback for Logitech RumblePad/Rumblepad 2 by "
114 "Anssi Hannula <anssi.hannula@gmail.com>\n"); 114 "Anssi Hannula <anssi.hannula@gmail.com>\n");
115 115
116 return 0; 116 return 0;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
new file mode 100644
index 000000000000..7eef5a2ce948
--- /dev/null
+++ b/drivers/hid/hid-lg4ff.c
@@ -0,0 +1,136 @@
1/*
2 * Force feedback support for Logitech Speed Force Wireless
3 *
4 * http://wiibrew.org/wiki/Logitech_USB_steering_wheel
5 *
6 * Copyright (c) 2010 Simon Wood <simon@mungewell.org>
7 */
8
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
26#include <linux/input.h>
27#include <linux/usb.h>
28#include <linux/hid.h>
29
30#include "usbhid/usbhid.h"
31#include "hid-lg.h"
32
33struct lg4ff_device {
34 struct hid_report *report;
35};
36
37static const signed short ff4_wheel_ac[] = {
38 FF_CONSTANT,
39 FF_AUTOCENTER,
40 -1
41};
42
43static int hid_lg4ff_play(struct input_dev *dev, void *data,
44 struct ff_effect *effect)
45{
46 struct hid_device *hid = input_get_drvdata(dev);
47 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
48 struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
49 int x;
50
51#define CLAMP(x) if (x < 0) x = 0; if (x > 0xff) x = 0xff
52
53 switch (effect->type) {
54 case FF_CONSTANT:
55 x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
56 CLAMP(x);
57 report->field[0]->value[0] = 0x11; /* Slot 1 */
58 report->field[0]->value[1] = 0x10;
59 report->field[0]->value[2] = x;
60 report->field[0]->value[3] = 0x00;
61 report->field[0]->value[4] = 0x00;
62 report->field[0]->value[5] = 0x08;
63 report->field[0]->value[6] = 0x00;
64 dbg_hid("Autocenter, x=0x%02X\n", x);
65
66 usbhid_submit_report(hid, report, USB_DIR_OUT);
67 break;
68 }
69 return 0;
70}
71
72static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude)
73{
74 struct hid_device *hid = input_get_drvdata(dev);
75 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
76 struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
77 __s32 *value = report->field[0]->value;
78
79 *value++ = 0xfe;
80 *value++ = 0x0d;
81 *value++ = 0x07;
82 *value++ = 0x07;
83 *value++ = (magnitude >> 8) & 0xff;
84 *value++ = 0x00;
85 *value = 0x00;
86
87 usbhid_submit_report(hid, report, USB_DIR_OUT);
88}
89
90
91int lg4ff_init(struct hid_device *hid)
92{
93 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
94 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
95 struct input_dev *dev = hidinput->input;
96 struct hid_report *report;
97 struct hid_field *field;
98 const signed short *ff_bits = ff4_wheel_ac;
99 int error;
100 int i;
101
102 /* Find the report to use */
103 if (list_empty(report_list)) {
104 err_hid("No output report found");
105 return -1;
106 }
107
108 /* Check that the report looks ok */
109 report = list_entry(report_list->next, struct hid_report, list);
110 if (!report) {
111 err_hid("NULL output report");
112 return -1;
113 }
114
115 field = report->field[0];
116 if (!field) {
117 err_hid("NULL field");
118 return -1;
119 }
120
121 for (i = 0; ff_bits[i] >= 0; i++)
122 set_bit(ff_bits[i], dev->ffbit);
123
124 error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
125
126 if (error)
127 return error;
128
129 if (test_bit(FF_AUTOCENTER, dev->ffbit))
130 dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
131
132 dev_info(&hid->dev, "Force feedback for Logitech Speed Force Wireless by "
133 "Simon Wood <simon@mungewell.org>\n");
134 return 0;
135}
136
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 319b0e57ee41..e6dc15171664 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -2,6 +2,7 @@
2 * Apple "Magic" Wireless Mouse driver 2 * Apple "Magic" Wireless Mouse driver
3 * 3 *
4 * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> 4 * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org>
5 * Copyright (c) 2010 Chase Douglas <chase.douglas@canonical.com>
5 */ 6 */
6 7
7/* 8/*
@@ -53,7 +54,9 @@ static bool report_undeciphered;
53module_param(report_undeciphered, bool, 0644); 54module_param(report_undeciphered, bool, 0644);
54MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); 55MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
55 56
56#define TOUCH_REPORT_ID 0x29 57#define TRACKPAD_REPORT_ID 0x28
58#define MOUSE_REPORT_ID 0x29
59#define DOUBLE_REPORT_ID 0xf7
57/* These definitions are not precise, but they're close enough. (Bits 60/* These definitions are not precise, but they're close enough. (Bits
58 * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem 61 * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
59 * to be some kind of bit mask -- 0x20 may be a near-field reading, 62 * to be some kind of bit mask -- 0x20 may be a near-field reading,
@@ -67,15 +70,19 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
67 70
68#define SCROLL_ACCEL_DEFAULT 7 71#define SCROLL_ACCEL_DEFAULT 7
69 72
73/* Single touch emulation should only begin when no touches are currently down.
74 * This is true when single_touch_id is equal to NO_TOUCHES. If multiple touches
75 * are down and the touch providing for single touch emulation is lifted,
76 * single_touch_id is equal to SINGLE_TOUCH_UP. While single touch emulation is
77 * occuring, single_touch_id corresponds with the tracking id of the touch used.
78 */
79#define NO_TOUCHES -1
80#define SINGLE_TOUCH_UP -2
81
70/** 82/**
71 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 83 * struct magicmouse_sc - Tracks Magic Mouse-specific data.
72 * @input: Input device through which we report events. 84 * @input: Input device through which we report events.
73 * @quirks: Currently unused. 85 * @quirks: Currently unused.
74 * @last_timestamp: Timestamp from most recent (18-bit) touch report
75 * (units of milliseconds over short windows, but seems to
76 * increase faster when there are no touches).
77 * @delta_time: 18-bit difference between the two most recent touch
78 * reports from the mouse.
79 * @ntouches: Number of touches in most recent touch report. 86 * @ntouches: Number of touches in most recent touch report.
80 * @scroll_accel: Number of consecutive scroll motions. 87 * @scroll_accel: Number of consecutive scroll motions.
81 * @scroll_jiffies: Time of last scroll motion. 88 * @scroll_jiffies: Time of last scroll motion.
@@ -86,8 +93,6 @@ struct magicmouse_sc {
86 struct input_dev *input; 93 struct input_dev *input;
87 unsigned long quirks; 94 unsigned long quirks;
88 95
89 int last_timestamp;
90 int delta_time;
91 int ntouches; 96 int ntouches;
92 int scroll_accel; 97 int scroll_accel;
93 unsigned long scroll_jiffies; 98 unsigned long scroll_jiffies;
@@ -98,9 +103,9 @@ struct magicmouse_sc {
98 short scroll_x; 103 short scroll_x;
99 short scroll_y; 104 short scroll_y;
100 u8 size; 105 u8 size;
101 u8 down;
102 } touches[16]; 106 } touches[16];
103 int tracking_ids[16]; 107 int tracking_ids[16];
108 int single_touch_id;
104}; 109};
105 110
106static int magicmouse_firm_touch(struct magicmouse_sc *msc) 111static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -166,18 +171,35 @@ static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state)
166static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) 171static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata)
167{ 172{
168 struct input_dev *input = msc->input; 173 struct input_dev *input = msc->input;
169 __s32 x_y = tdata[0] << 8 | tdata[1] << 16 | tdata[2] << 24; 174 int id, x, y, size, orientation, touch_major, touch_minor, state, down;
170 int misc = tdata[5] | tdata[6] << 8; 175
171 int id = (misc >> 6) & 15; 176 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
172 int x = x_y << 12 >> 20; 177 id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
173 int y = -(x_y >> 20); 178 x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
174 int down = (tdata[7] & TOUCH_STATE_MASK) != TOUCH_STATE_NONE; 179 y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
180 size = tdata[5] & 0x3f;
181 orientation = (tdata[6] >> 2) - 32;
182 touch_major = tdata[3];
183 touch_minor = tdata[4];
184 state = tdata[7] & TOUCH_STATE_MASK;
185 down = state != TOUCH_STATE_NONE;
186 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
187 id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf;
188 x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
189 y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
190 size = tdata[6] & 0x3f;
191 orientation = (tdata[7] >> 2) - 32;
192 touch_major = tdata[4];
193 touch_minor = tdata[5];
194 state = tdata[8] & TOUCH_STATE_MASK;
195 down = state != TOUCH_STATE_NONE;
196 }
175 197
176 /* Store tracking ID and other fields. */ 198 /* Store tracking ID and other fields. */
177 msc->tracking_ids[raw_id] = id; 199 msc->tracking_ids[raw_id] = id;
178 msc->touches[id].x = x; 200 msc->touches[id].x = x;
179 msc->touches[id].y = y; 201 msc->touches[id].y = y;
180 msc->touches[id].size = misc & 63; 202 msc->touches[id].size = size;
181 203
182 /* If requested, emulate a scroll wheel by detecting small 204 /* If requested, emulate a scroll wheel by detecting small
183 * vertical touch motions. 205 * vertical touch motions.
@@ -188,7 +210,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
188 int step_y = msc->touches[id].scroll_y - y; 210 int step_y = msc->touches[id].scroll_y - y;
189 211
190 /* Calculate and apply the scroll motion. */ 212 /* Calculate and apply the scroll motion. */
191 switch (tdata[7] & TOUCH_STATE_MASK) { 213 switch (state) {
192 case TOUCH_STATE_START: 214 case TOUCH_STATE_START:
193 msc->touches[id].scroll_x = x; 215 msc->touches[id].scroll_x = x;
194 msc->touches[id].scroll_y = y; 216 msc->touches[id].scroll_y = y;
@@ -222,21 +244,28 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
222 } 244 }
223 } 245 }
224 246
247 if (down) {
248 msc->ntouches++;
249 if (msc->single_touch_id == NO_TOUCHES)
250 msc->single_touch_id = id;
251 } else if (msc->single_touch_id == id)
252 msc->single_touch_id = SINGLE_TOUCH_UP;
253
225 /* Generate the input events for this touch. */ 254 /* Generate the input events for this touch. */
226 if (report_touches && down) { 255 if (report_touches && down) {
227 int orientation = (misc >> 10) - 32;
228
229 msc->touches[id].down = 1;
230
231 input_report_abs(input, ABS_MT_TRACKING_ID, id); 256 input_report_abs(input, ABS_MT_TRACKING_ID, id);
232 input_report_abs(input, ABS_MT_TOUCH_MAJOR, tdata[3]); 257 input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2);
233 input_report_abs(input, ABS_MT_TOUCH_MINOR, tdata[4]); 258 input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2);
234 input_report_abs(input, ABS_MT_ORIENTATION, orientation); 259 input_report_abs(input, ABS_MT_ORIENTATION, orientation);
235 input_report_abs(input, ABS_MT_POSITION_X, x); 260 input_report_abs(input, ABS_MT_POSITION_X, x);
236 input_report_abs(input, ABS_MT_POSITION_Y, y); 261 input_report_abs(input, ABS_MT_POSITION_Y, y);
237 262
238 if (report_undeciphered) 263 if (report_undeciphered) {
239 input_event(input, EV_MSC, MSC_RAW, tdata[7]); 264 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
265 input_event(input, EV_MSC, MSC_RAW, tdata[7]);
266 else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
267 input_event(input, EV_MSC, MSC_RAW, tdata[8]);
268 }
240 269
241 input_mt_sync(input); 270 input_mt_sync(input);
242 } 271 }
@@ -247,39 +276,43 @@ static int magicmouse_raw_event(struct hid_device *hdev,
247{ 276{
248 struct magicmouse_sc *msc = hid_get_drvdata(hdev); 277 struct magicmouse_sc *msc = hid_get_drvdata(hdev);
249 struct input_dev *input = msc->input; 278 struct input_dev *input = msc->input;
250 int x, y, ts, ii, clicks, last_up; 279 int x = 0, y = 0, ii, clicks = 0, npoints;
251 280
252 switch (data[0]) { 281 switch (data[0]) {
253 case 0x10: 282 case TRACKPAD_REPORT_ID:
254 if (size != 6) 283 /* Expect four bytes of prefix, and N*9 bytes of touch data. */
284 if (size < 4 || ((size - 4) % 9) != 0)
255 return 0; 285 return 0;
256 x = (__s16)(data[2] | data[3] << 8); 286 npoints = (size - 4) / 9;
257 y = (__s16)(data[4] | data[5] << 8); 287 msc->ntouches = 0;
288 for (ii = 0; ii < npoints; ii++)
289 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
290
291 /* We don't need an MT sync here because trackpad emits a
292 * BTN_TOUCH event in a new frame when all touches are released.
293 */
294 if (msc->ntouches == 0)
295 msc->single_touch_id = NO_TOUCHES;
296
258 clicks = data[1]; 297 clicks = data[1];
298
299 /* The following bits provide a device specific timestamp. They
300 * are unused here.
301 *
302 * ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
303 */
259 break; 304 break;
260 case TOUCH_REPORT_ID: 305 case MOUSE_REPORT_ID:
261 /* Expect six bytes of prefix, and N*8 bytes of touch data. */ 306 /* Expect six bytes of prefix, and N*8 bytes of touch data. */
262 if (size < 6 || ((size - 6) % 8) != 0) 307 if (size < 6 || ((size - 6) % 8) != 0)
263 return 0; 308 return 0;
264 ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; 309 npoints = (size - 6) / 8;
265 msc->delta_time = (ts - msc->last_timestamp) & 0x3ffff; 310 msc->ntouches = 0;
266 msc->last_timestamp = ts; 311 for (ii = 0; ii < npoints; ii++)
267 msc->ntouches = (size - 6) / 8;
268 for (ii = 0; ii < msc->ntouches; ii++)
269 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); 312 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
270 313
271 if (report_touches) { 314 if (report_touches && msc->ntouches == 0)
272 last_up = 1; 315 input_mt_sync(input);
273 for (ii = 0; ii < ARRAY_SIZE(msc->touches); ii++) {
274 if (msc->touches[ii].down) {
275 last_up = 0;
276 msc->touches[ii].down = 0;
277 }
278 }
279 if (last_up) {
280 input_mt_sync(input);
281 }
282 }
283 316
284 /* When emulating three-button mode, it is important 317 /* When emulating three-button mode, it is important
285 * to have the current touch information before 318 * to have the current touch information before
@@ -288,68 +321,72 @@ static int magicmouse_raw_event(struct hid_device *hdev,
288 x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22; 321 x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22;
289 y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22; 322 y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22;
290 clicks = data[3]; 323 clicks = data[3];
324
325 /* The following bits provide a device specific timestamp. They
326 * are unused here.
327 *
328 * ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
329 */
330 break;
331 case DOUBLE_REPORT_ID:
332 /* Sometimes the trackpad sends two touch reports in one
333 * packet.
334 */
335 magicmouse_raw_event(hdev, report, data + 2, data[1]);
336 magicmouse_raw_event(hdev, report, data + 2 + data[1],
337 size - 2 - data[1]);
291 break; 338 break;
292 case 0x20: /* Theoretically battery status (0-100), but I have
293 * never seen it -- maybe it is only upon request.
294 */
295 case 0x60: /* Unknown, maybe laser on/off. */
296 case 0x61: /* Laser reflection status change.
297 * data[1]: 0 = spotted, 1 = lost
298 */
299 default: 339 default:
300 return 0; 340 return 0;
301 } 341 }
302 342
303 magicmouse_emit_buttons(msc, clicks & 3); 343 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
304 input_report_rel(input, REL_X, x); 344 magicmouse_emit_buttons(msc, clicks & 3);
305 input_report_rel(input, REL_Y, y); 345 input_report_rel(input, REL_X, x);
346 input_report_rel(input, REL_Y, y);
347 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
348 input_report_key(input, BTN_MOUSE, clicks & 1);
349 input_report_key(input, BTN_TOUCH, msc->ntouches > 0);
350 input_report_key(input, BTN_TOOL_FINGER, msc->ntouches == 1);
351 input_report_key(input, BTN_TOOL_DOUBLETAP, msc->ntouches == 2);
352 input_report_key(input, BTN_TOOL_TRIPLETAP, msc->ntouches == 3);
353 input_report_key(input, BTN_TOOL_QUADTAP, msc->ntouches == 4);
354 if (msc->single_touch_id >= 0) {
355 input_report_abs(input, ABS_X,
356 msc->touches[msc->single_touch_id].x);
357 input_report_abs(input, ABS_Y,
358 msc->touches[msc->single_touch_id].y);
359 }
360 }
361
306 input_sync(input); 362 input_sync(input);
307 return 1; 363 return 1;
308} 364}
309 365
310static int magicmouse_input_open(struct input_dev *dev)
311{
312 struct hid_device *hid = input_get_drvdata(dev);
313
314 return hid->ll_driver->open(hid);
315}
316
317static void magicmouse_input_close(struct input_dev *dev)
318{
319 struct hid_device *hid = input_get_drvdata(dev);
320
321 hid->ll_driver->close(hid);
322}
323
324static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) 366static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
325{ 367{
326 input_set_drvdata(input, hdev);
327 input->event = hdev->ll_driver->hidinput_input_event;
328 input->open = magicmouse_input_open;
329 input->close = magicmouse_input_close;
330
331 input->name = hdev->name;
332 input->phys = hdev->phys;
333 input->uniq = hdev->uniq;
334 input->id.bustype = hdev->bus;
335 input->id.vendor = hdev->vendor;
336 input->id.product = hdev->product;
337 input->id.version = hdev->version;
338 input->dev.parent = hdev->dev.parent;
339
340 __set_bit(EV_KEY, input->evbit); 368 __set_bit(EV_KEY, input->evbit);
341 __set_bit(BTN_LEFT, input->keybit); 369
342 __set_bit(BTN_RIGHT, input->keybit); 370 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
343 if (emulate_3button) 371 __set_bit(BTN_LEFT, input->keybit);
344 __set_bit(BTN_MIDDLE, input->keybit); 372 __set_bit(BTN_RIGHT, input->keybit);
345 __set_bit(BTN_TOOL_FINGER, input->keybit); 373 if (emulate_3button)
346 374 __set_bit(BTN_MIDDLE, input->keybit);
347 __set_bit(EV_REL, input->evbit); 375
348 __set_bit(REL_X, input->relbit); 376 __set_bit(EV_REL, input->evbit);
349 __set_bit(REL_Y, input->relbit); 377 __set_bit(REL_X, input->relbit);
350 if (emulate_scroll_wheel) { 378 __set_bit(REL_Y, input->relbit);
351 __set_bit(REL_WHEEL, input->relbit); 379 if (emulate_scroll_wheel) {
352 __set_bit(REL_HWHEEL, input->relbit); 380 __set_bit(REL_WHEEL, input->relbit);
381 __set_bit(REL_HWHEEL, input->relbit);
382 }
383 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
384 __set_bit(BTN_MOUSE, input->keybit);
385 __set_bit(BTN_TOOL_FINGER, input->keybit);
386 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
387 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
388 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
389 __set_bit(BTN_TOUCH, input->keybit);
353 } 390 }
354 391
355 if (report_touches) { 392 if (report_touches) {
@@ -359,16 +396,26 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
359 input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); 396 input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
360 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); 397 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
361 input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); 398 input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0);
362 input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 1358, 399
363 4, 0);
364 /* Note: Touch Y position from the device is inverted relative 400 /* Note: Touch Y position from the device is inverted relative
365 * to how pointer motion is reported (and relative to how USB 401 * to how pointer motion is reported (and relative to how USB
366 * HID recommends the coordinates work). This driver keeps 402 * HID recommends the coordinates work). This driver keeps
367 * the origin at the same position, and just uses the additive 403 * the origin at the same position, and just uses the additive
368 * inverse of the reported Y. 404 * inverse of the reported Y.
369 */ 405 */
370 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 2047, 406 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
371 4, 0); 407 input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
408 1358, 4, 0);
409 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
410 2047, 4, 0);
411 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
412 input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
413 input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
414 input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
415 3167, 4, 0);
416 input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
417 2565, 4, 0);
418 }
372 } 419 }
373 420
374 if (report_undeciphered) { 421 if (report_undeciphered) {
@@ -377,12 +424,22 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
377 } 424 }
378} 425}
379 426
427static int magicmouse_input_mapping(struct hid_device *hdev,
428 struct hid_input *hi, struct hid_field *field,
429 struct hid_usage *usage, unsigned long **bit, int *max)
430{
431 struct magicmouse_sc *msc = hid_get_drvdata(hdev);
432
433 if (!msc->input)
434 msc->input = hi->input;
435
436 return 0;
437}
438
380static int magicmouse_probe(struct hid_device *hdev, 439static int magicmouse_probe(struct hid_device *hdev,
381 const struct hid_device_id *id) 440 const struct hid_device_id *id)
382{ 441{
383 __u8 feature_1[] = { 0xd7, 0x01 }; 442 __u8 feature[] = { 0xd7, 0x01 };
384 __u8 feature_2[] = { 0xf8, 0x01, 0x32 };
385 struct input_dev *input;
386 struct magicmouse_sc *msc; 443 struct magicmouse_sc *msc;
387 struct hid_report *report; 444 struct hid_report *report;
388 int ret; 445 int ret;
@@ -398,6 +455,8 @@ static int magicmouse_probe(struct hid_device *hdev,
398 msc->quirks = id->driver_data; 455 msc->quirks = id->driver_data;
399 hid_set_drvdata(hdev, msc); 456 hid_set_drvdata(hdev, msc);
400 457
458 msc->single_touch_id = NO_TOUCHES;
459
401 ret = hid_parse(hdev); 460 ret = hid_parse(hdev);
402 if (ret) { 461 if (ret) {
403 dev_err(&hdev->dev, "magicmouse hid parse failed\n"); 462 dev_err(&hdev->dev, "magicmouse hid parse failed\n");
@@ -410,10 +469,22 @@ static int magicmouse_probe(struct hid_device *hdev,
410 goto err_free; 469 goto err_free;
411 } 470 }
412 471
413 /* we are handling the input ourselves */ 472 /* We do this after hid-input is done parsing reports so that
414 hidinput_disconnect(hdev); 473 * hid-input uses the most natural button and axis IDs.
474 */
475 if (msc->input)
476 magicmouse_setup_input(msc->input, hdev);
477
478 if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
479 report = hid_register_report(hdev, HID_INPUT_REPORT,
480 MOUSE_REPORT_ID);
481 else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
482 report = hid_register_report(hdev, HID_INPUT_REPORT,
483 TRACKPAD_REPORT_ID);
484 report = hid_register_report(hdev, HID_INPUT_REPORT,
485 DOUBLE_REPORT_ID);
486 }
415 487
416 report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
417 if (!report) { 488 if (!report) {
418 dev_err(&hdev->dev, "unable to register touch report\n"); 489 dev_err(&hdev->dev, "unable to register touch report\n");
419 ret = -ENOMEM; 490 ret = -ENOMEM;
@@ -421,39 +492,15 @@ static int magicmouse_probe(struct hid_device *hdev,
421 } 492 }
422 report->size = 6; 493 report->size = 6;
423 494
424 ret = hdev->hid_output_raw_report(hdev, feature_1, sizeof(feature_1), 495 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
425 HID_FEATURE_REPORT); 496 HID_FEATURE_REPORT);
426 if (ret != sizeof(feature_1)) { 497 if (ret != sizeof(feature)) {
427 dev_err(&hdev->dev, "unable to request touch data (1:%d)\n", 498 dev_err(&hdev->dev, "unable to request touch data (%d)\n",
428 ret);
429 goto err_stop_hw;
430 }
431 ret = hdev->hid_output_raw_report(hdev, feature_2,
432 sizeof(feature_2), HID_FEATURE_REPORT);
433 if (ret != sizeof(feature_2)) {
434 dev_err(&hdev->dev, "unable to request touch data (2:%d)\n",
435 ret); 499 ret);
436 goto err_stop_hw; 500 goto err_stop_hw;
437 } 501 }
438 502
439 input = input_allocate_device();
440 if (!input) {
441 dev_err(&hdev->dev, "can't alloc input device\n");
442 ret = -ENOMEM;
443 goto err_stop_hw;
444 }
445 magicmouse_setup_input(input, hdev);
446
447 ret = input_register_device(input);
448 if (ret) {
449 dev_err(&hdev->dev, "input device registration failed\n");
450 goto err_input;
451 }
452 msc->input = input;
453
454 return 0; 503 return 0;
455err_input:
456 input_free_device(input);
457err_stop_hw: 504err_stop_hw:
458 hid_hw_stop(hdev); 505 hid_hw_stop(hdev);
459err_free: 506err_free:
@@ -466,13 +513,14 @@ static void magicmouse_remove(struct hid_device *hdev)
466 struct magicmouse_sc *msc = hid_get_drvdata(hdev); 513 struct magicmouse_sc *msc = hid_get_drvdata(hdev);
467 514
468 hid_hw_stop(hdev); 515 hid_hw_stop(hdev);
469 input_unregister_device(msc->input);
470 kfree(msc); 516 kfree(msc);
471} 517}
472 518
473static const struct hid_device_id magic_mice[] = { 519static const struct hid_device_id magic_mice[] = {
474 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), 520 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
475 .driver_data = 0 }, 521 USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
522 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
523 USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
476 { } 524 { }
477}; 525};
478MODULE_DEVICE_TABLE(hid, magic_mice); 526MODULE_DEVICE_TABLE(hid, magic_mice);
@@ -483,6 +531,7 @@ static struct hid_driver magicmouse_driver = {
483 .probe = magicmouse_probe, 531 .probe = magicmouse_probe,
484 .remove = magicmouse_remove, 532 .remove = magicmouse_remove,
485 .raw_event = magicmouse_raw_event, 533 .raw_event = magicmouse_raw_event,
534 .input_mapping = magicmouse_input_mapping,
486}; 535};
487 536
488static int __init magicmouse_init(void) 537static int __init magicmouse_init(void)
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 359cc447c6c6..dc618c33d0a2 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -33,18 +33,19 @@
33 * Microsoft Wireless Desktop Receiver (Model 1028) has 33 * Microsoft Wireless Desktop Receiver (Model 1028) has
34 * 'Usage Min/Max' where it ought to have 'Physical Min/Max' 34 * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
35 */ 35 */
36static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, 36static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
37 unsigned int rsize) 37 unsigned int *rsize)
38{ 38{
39 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 39 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
40 40
41 if ((quirks & MS_RDESC) && rsize == 571 && rdesc[557] == 0x19 && 41 if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
42 rdesc[559] == 0x29) { 42 rdesc[559] == 0x29) {
43 dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver " 43 dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
44 "Model 1028 report descriptor\n"); 44 "Model 1028 report descriptor\n");
45 rdesc[557] = 0x35; 45 rdesc[557] = 0x35;
46 rdesc[559] = 0x45; 46 rdesc[559] = 0x45;
47 } 47 }
48 return rdesc;
48} 49}
49 50
50#define ms_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 51#define ms_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index 2cd05aa244b9..c95c31e2d869 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -22,14 +22,15 @@
22 22
23#include "hid-ids.h" 23#include "hid-ids.h"
24 24
25static void mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, 25static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
26 unsigned int rsize) 26 unsigned int *rsize)
27{ 27{
28 if (rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { 28 if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
29 dev_info(&hdev->dev, "fixing up button/consumer in HID report " 29 dev_info(&hdev->dev, "fixing up button/consumer in HID report "
30 "descriptor\n"); 30 "descriptor\n");
31 rdesc[30] = 0x0c; 31 rdesc[30] = 0x0c;
32 } 32 }
33 return rdesc;
33} 34}
34 35
35#define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 36#define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index fb69b8c4953f..69169efa1e16 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -90,6 +90,55 @@ struct ntrig_data {
90}; 90};
91 91
92 92
93/*
94 * This function converts the 4 byte raw firmware code into
95 * a string containing 5 comma separated numbers.
96 */
97static int ntrig_version_string(unsigned char *raw, char *buf)
98{
99 __u8 a = (raw[1] & 0x0e) >> 1;
100 __u8 b = (raw[0] & 0x3c) >> 2;
101 __u8 c = ((raw[0] & 0x03) << 3) | ((raw[3] & 0xe0) >> 5);
102 __u8 d = ((raw[3] & 0x07) << 3) | ((raw[2] & 0xe0) >> 5);
103 __u8 e = raw[2] & 0x07;
104
105 /*
106 * As yet unmapped bits:
107 * 0b11000000 0b11110001 0b00011000 0b00011000
108 */
109
110 return sprintf(buf, "%u.%u.%u.%u.%u", a, b, c, d, e);
111}
112
113static void ntrig_report_version(struct hid_device *hdev)
114{
115 int ret;
116 char buf[20];
117 struct usb_device *usb_dev = hid_to_usb_dev(hdev);
118 unsigned char *data = kmalloc(8, GFP_KERNEL);
119
120 if (!data)
121 goto err_free;
122
123 ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
124 USB_REQ_CLEAR_FEATURE,
125 USB_TYPE_CLASS | USB_RECIP_INTERFACE |
126 USB_DIR_IN,
127 0x30c, 1, data, 8,
128 USB_CTRL_SET_TIMEOUT);
129
130 if (ret == 8) {
131 ret = ntrig_version_string(&data[2], buf);
132
133 dev_info(&hdev->dev,
134 "Firmware version: %s (%02x%02x %02x%02x)\n",
135 buf, data[2], data[3], data[4], data[5]);
136 }
137
138err_free:
139 kfree(data);
140}
141
93static ssize_t show_phys_width(struct device *dev, 142static ssize_t show_phys_width(struct device *dev,
94 struct device_attribute *attr, 143 struct device_attribute *attr,
95 char *buf) 144 char *buf)
@@ -377,8 +426,8 @@ static struct attribute_group ntrig_attribute_group = {
377 */ 426 */
378 427
379static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, 428static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
380 struct hid_field *field, struct hid_usage *usage, 429 struct hid_field *field, struct hid_usage *usage,
381 unsigned long **bit, int *max) 430 unsigned long **bit, int *max)
382{ 431{
383 struct ntrig_data *nd = hid_get_drvdata(hdev); 432 struct ntrig_data *nd = hid_get_drvdata(hdev);
384 433
@@ -448,13 +497,13 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
448 /* width/height mapped on TouchMajor/TouchMinor/Orientation */ 497 /* width/height mapped on TouchMajor/TouchMinor/Orientation */
449 case HID_DG_WIDTH: 498 case HID_DG_WIDTH:
450 hid_map_usage(hi, usage, bit, max, 499 hid_map_usage(hi, usage, bit, max,
451 EV_ABS, ABS_MT_TOUCH_MAJOR); 500 EV_ABS, ABS_MT_TOUCH_MAJOR);
452 return 1; 501 return 1;
453 case HID_DG_HEIGHT: 502 case HID_DG_HEIGHT:
454 hid_map_usage(hi, usage, bit, max, 503 hid_map_usage(hi, usage, bit, max,
455 EV_ABS, ABS_MT_TOUCH_MINOR); 504 EV_ABS, ABS_MT_TOUCH_MINOR);
456 input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 505 input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
457 0, 1, 0, 0); 506 0, 1, 0, 0);
458 return 1; 507 return 1;
459 } 508 }
460 return 0; 509 return 0;
@@ -468,8 +517,8 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
468} 517}
469 518
470static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, 519static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
471 struct hid_field *field, struct hid_usage *usage, 520 struct hid_field *field, struct hid_usage *usage,
472 unsigned long **bit, int *max) 521 unsigned long **bit, int *max)
473{ 522{
474 /* No special mappings needed for the pen and single touch */ 523 /* No special mappings needed for the pen and single touch */
475 if (field->physical) 524 if (field->physical)
@@ -489,7 +538,7 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
489 * and call input_mt_sync after each point if necessary 538 * and call input_mt_sync after each point if necessary
490 */ 539 */
491static int ntrig_event (struct hid_device *hid, struct hid_field *field, 540static int ntrig_event (struct hid_device *hid, struct hid_field *field,
492 struct hid_usage *usage, __s32 value) 541 struct hid_usage *usage, __s32 value)
493{ 542{
494 struct input_dev *input = field->hidinput->input; 543 struct input_dev *input = field->hidinput->input;
495 struct ntrig_data *nd = hid_get_drvdata(hid); 544 struct ntrig_data *nd = hid_get_drvdata(hid);
@@ -848,6 +897,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
848 if (report) 897 if (report)
849 usbhid_submit_report(hdev, report, USB_DIR_OUT); 898 usbhid_submit_report(hdev, report, USB_DIR_OUT);
850 899
900 ntrig_report_version(hdev);
901
851 ret = sysfs_create_group(&hdev->dev.kobj, 902 ret = sysfs_create_group(&hdev->dev.kobj,
852 &ntrig_attribute_group); 903 &ntrig_attribute_group);
853 904
@@ -860,7 +911,7 @@ err_free:
860static void ntrig_remove(struct hid_device *hdev) 911static void ntrig_remove(struct hid_device *hdev)
861{ 912{
862 sysfs_remove_group(&hdev->dev.kobj, 913 sysfs_remove_group(&hdev->dev.kobj,
863 &ntrig_attribute_group); 914 &ntrig_attribute_group);
864 hid_hw_stop(hdev); 915 hid_hw_stop(hdev);
865 kfree(hid_get_drvdata(hdev)); 916 kfree(hid_get_drvdata(hdev));
866} 917}
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index aa9a960f73a4..2e79716dca31 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -19,14 +19,15 @@
19 19
20#include "hid-ids.h" 20#include "hid-ids.h"
21 21
22static void ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, 22static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
23 unsigned int rsize) 23 unsigned int *rsize)
24{ 24{
25 if (rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { 25 if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
26 dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 " 26 dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 "
27 "report descriptor.\n"); 27 "report descriptor.\n");
28 rdesc[55] = 0x92; 28 rdesc[55] = 0x92;
29 } 29 }
30 return rdesc;
30} 31}
31 32
32static const struct hid_device_id ortek_devices[] = { 33static const struct hid_device_id ortek_devices[] = {
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 500fbd0652dc..308d6ae48a3e 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -23,10 +23,10 @@
23#include "hid-ids.h" 23#include "hid-ids.h"
24 24
25/* Petalynx Maxter Remote has maximum for consumer page set too low */ 25/* Petalynx Maxter Remote has maximum for consumer page set too low */
26static void pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, 26static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
27 unsigned int rsize) 27 unsigned int *rsize)
28{ 28{
29 if (rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && 29 if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
30 rdesc[41] == 0x00 && rdesc[59] == 0x26 && 30 rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
31 rdesc[60] == 0xf9 && rdesc[61] == 0x00) { 31 rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
32 dev_info(&hdev->dev, "fixing up Petalynx Maxter Remote report " 32 dev_info(&hdev->dev, "fixing up Petalynx Maxter Remote report "
@@ -34,6 +34,7 @@ static void pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
34 rdesc[60] = 0xfa; 34 rdesc[60] = 0xfa;
35 rdesc[40] = 0xfa; 35 rdesc[40] = 0xfa;
36 } 36 }
37 return rdesc;
37} 38}
38 39
39#define pl_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 40#define pl_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 845f428b8090..48eab84f53b5 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -740,10 +740,10 @@ int pcmidi_snd_terminate(struct pcmidi_snd *pm)
740/* 740/*
741 * PC-MIDI report descriptor for report id is wrong. 741 * PC-MIDI report descriptor for report id is wrong.
742 */ 742 */
743static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, 743static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
744 unsigned int rsize) 744 unsigned int *rsize)
745{ 745{
746 if (rsize == 178 && 746 if (*rsize == 178 &&
747 rdesc[111] == 0x06 && rdesc[112] == 0x00 && 747 rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
748 rdesc[113] == 0xff) { 748 rdesc[113] == 0xff) {
749 dev_info(&hdev->dev, "fixing up pc-midi keyboard report " 749 dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
@@ -751,6 +751,7 @@ static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
751 751
752 rdesc[144] = 0x18; /* report 4: was 0x10 report count */ 752 rdesc[144] = 0x18; /* report 4: was 0x10 report count */
753 } 753 }
754 return rdesc;
754} 755}
755 756
756static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, 757static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
new file mode 100644
index 000000000000..9bf23047892a
--- /dev/null
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -0,0 +1,968 @@
1/*
2 * Roccat Pyra driver for Linux
3 *
4 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14/*
15 * Roccat Pyra is a mobile gamer mouse which comes in wired and wireless
16 * variant. Wireless variant is not tested.
17 * Userland tools can be found at http://sourceforge.net/projects/roccat
18 */
19
20#include <linux/device.h>
21#include <linux/input.h>
22#include <linux/hid.h>
23#include <linux/usb.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include "hid-ids.h"
27#include "hid-roccat.h"
28#include "hid-roccat-pyra.h"
29
30static void profile_activated(struct pyra_device *pyra,
31 unsigned int new_profile)
32{
33 pyra->actual_profile = new_profile;
34 pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
35}
36
37static int pyra_send_control(struct usb_device *usb_dev, int value,
38 enum pyra_control_requests request)
39{
40 int len;
41 struct pyra_control control;
42
43 if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
44 request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) &&
45 (value < 0 || value > 4))
46 return -EINVAL;
47
48 control.command = PYRA_COMMAND_CONTROL;
49 control.value = value;
50 control.request = request;
51
52 len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
53 USB_REQ_SET_CONFIGURATION,
54 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
55 PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
56 sizeof(struct pyra_control),
57 USB_CTRL_SET_TIMEOUT);
58
59 if (len != sizeof(struct pyra_control))
60 return len;
61
62 return 0;
63}
64
65static int pyra_receive_control_status(struct usb_device *usb_dev)
66{
67 int len;
68 struct pyra_control control;
69
70 do {
71 msleep(10);
72
73 len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
74 USB_REQ_CLEAR_FEATURE,
75 USB_TYPE_CLASS | USB_RECIP_INTERFACE |
76 USB_DIR_IN,
77 PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
78 sizeof(struct pyra_control),
79 USB_CTRL_SET_TIMEOUT);
80
81 /* requested too early, try again */
82 } while (len == -EPROTO);
83
84 if (len == sizeof(struct pyra_control) &&
85 control.command == PYRA_COMMAND_CONTROL &&
86 control.request == PYRA_CONTROL_REQUEST_STATUS &&
87 control.value == 1)
88 return 0;
89 else {
90 dev_err(&usb_dev->dev, "receive control status: "
91 "unknown response 0x%x 0x%x\n",
92 control.request, control.value);
93 return -EINVAL;
94 }
95}
96
97static int pyra_get_profile_settings(struct usb_device *usb_dev,
98 struct pyra_profile_settings *buf, int number)
99{
100 int retval;
101
102 retval = pyra_send_control(usb_dev, number,
103 PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
104
105 if (retval)
106 return retval;
107
108 retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
109 USB_REQ_CLEAR_FEATURE,
110 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
111 PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)buf,
112 sizeof(struct pyra_profile_settings),
113 USB_CTRL_SET_TIMEOUT);
114
115 if (retval != sizeof(struct pyra_profile_settings))
116 return retval;
117
118 return 0;
119}
120
121static int pyra_get_profile_buttons(struct usb_device *usb_dev,
122 struct pyra_profile_buttons *buf, int number)
123{
124 int retval;
125
126 retval = pyra_send_control(usb_dev, number,
127 PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
128
129 if (retval)
130 return retval;
131
132 retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
133 USB_REQ_CLEAR_FEATURE,
134 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
135 PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buf,
136 sizeof(struct pyra_profile_buttons),
137 USB_CTRL_SET_TIMEOUT);
138
139 if (retval != sizeof(struct pyra_profile_buttons))
140 return retval;
141
142 return 0;
143}
144
145static int pyra_get_settings(struct usb_device *usb_dev,
146 struct pyra_settings *buf)
147{
148 int len;
149 len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
150 USB_REQ_CLEAR_FEATURE,
151 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
152 PYRA_USB_COMMAND_SETTINGS, 0, buf,
153 sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
154 if (len != sizeof(struct pyra_settings))
155 return -EIO;
156 return 0;
157}
158
159static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
160{
161 int len;
162 len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
163 USB_REQ_CLEAR_FEATURE,
164 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
165 PYRA_USB_COMMAND_INFO, 0, buf,
166 sizeof(struct pyra_info), USB_CTRL_SET_TIMEOUT);
167 if (len != sizeof(struct pyra_info))
168 return -EIO;
169 return 0;
170}
171
172static int pyra_set_profile_settings(struct usb_device *usb_dev,
173 struct pyra_profile_settings const *settings)
174{
175 int len;
176 len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
177 USB_REQ_SET_CONFIGURATION,
178 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
179 PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)settings,
180 sizeof(struct pyra_profile_settings),
181 USB_CTRL_SET_TIMEOUT);
182 if (len != sizeof(struct pyra_profile_settings))
183 return -EIO;
184 if (pyra_receive_control_status(usb_dev))
185 return -EIO;
186 return 0;
187}
188
189static int pyra_set_profile_buttons(struct usb_device *usb_dev,
190 struct pyra_profile_buttons const *buttons)
191{
192 int len;
193 len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
194 USB_REQ_SET_CONFIGURATION,
195 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
196 PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buttons,
197 sizeof(struct pyra_profile_buttons),
198 USB_CTRL_SET_TIMEOUT);
199 if (len != sizeof(struct pyra_profile_buttons))
200 return -EIO;
201 if (pyra_receive_control_status(usb_dev))
202 return -EIO;
203 return 0;
204}
205
206static int pyra_set_settings(struct usb_device *usb_dev,
207 struct pyra_settings const *settings)
208{
209 int len;
210 len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
211 USB_REQ_SET_CONFIGURATION,
212 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
213 PYRA_USB_COMMAND_SETTINGS, 0, (char *)settings,
214 sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
215 if (len != sizeof(struct pyra_settings))
216 return -EIO;
217 if (pyra_receive_control_status(usb_dev))
218 return -EIO;
219 return 0;
220}
221
222static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
223 struct kobject *kobj, struct bin_attribute *attr, char *buf,
224 loff_t off, size_t count, int number)
225{
226 struct device *dev = container_of(kobj, struct device, kobj);
227 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
228
229 if (off >= sizeof(struct pyra_profile_settings))
230 return 0;
231
232 if (off + count > sizeof(struct pyra_profile_settings))
233 count = sizeof(struct pyra_profile_settings) - off;
234
235 mutex_lock(&pyra->pyra_lock);
236 memcpy(buf, ((char const *)&pyra->profile_settings[number]) + off,
237 count);
238 mutex_unlock(&pyra->pyra_lock);
239
240 return count;
241}
242
243static ssize_t pyra_sysfs_read_profile1_settings(struct file *fp,
244 struct kobject *kobj, struct bin_attribute *attr, char *buf,
245 loff_t off, size_t count)
246{
247 return pyra_sysfs_read_profilex_settings(fp, kobj,
248 attr, buf, off, count, 0);
249}
250
251static ssize_t pyra_sysfs_read_profile2_settings(struct file *fp,
252 struct kobject *kobj, struct bin_attribute *attr, char *buf,
253 loff_t off, size_t count)
254{
255 return pyra_sysfs_read_profilex_settings(fp, kobj,
256 attr, buf, off, count, 1);
257}
258
259static ssize_t pyra_sysfs_read_profile3_settings(struct file *fp,
260 struct kobject *kobj, struct bin_attribute *attr, char *buf,
261 loff_t off, size_t count)
262{
263 return pyra_sysfs_read_profilex_settings(fp, kobj,
264 attr, buf, off, count, 2);
265}
266
267static ssize_t pyra_sysfs_read_profile4_settings(struct file *fp,
268 struct kobject *kobj, struct bin_attribute *attr, char *buf,
269 loff_t off, size_t count)
270{
271 return pyra_sysfs_read_profilex_settings(fp, kobj,
272 attr, buf, off, count, 3);
273}
274
275static ssize_t pyra_sysfs_read_profile5_settings(struct file *fp,
276 struct kobject *kobj, struct bin_attribute *attr, char *buf,
277 loff_t off, size_t count)
278{
279 return pyra_sysfs_read_profilex_settings(fp, kobj,
280 attr, buf, off, count, 4);
281}
282
283static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
284 struct kobject *kobj, struct bin_attribute *attr, char *buf,
285 loff_t off, size_t count, int number)
286{
287 struct device *dev = container_of(kobj, struct device, kobj);
288 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
289
290 if (off >= sizeof(struct pyra_profile_buttons))
291 return 0;
292
293 if (off + count > sizeof(struct pyra_profile_buttons))
294 count = sizeof(struct pyra_profile_buttons) - off;
295
296 mutex_lock(&pyra->pyra_lock);
297 memcpy(buf, ((char const *)&pyra->profile_buttons[number]) + off,
298 count);
299 mutex_unlock(&pyra->pyra_lock);
300
301 return count;
302}
303
304static ssize_t pyra_sysfs_read_profile1_buttons(struct file *fp,
305 struct kobject *kobj, struct bin_attribute *attr, char *buf,
306 loff_t off, size_t count)
307{
308 return pyra_sysfs_read_profilex_buttons(fp, kobj,
309 attr, buf, off, count, 0);
310}
311
312static ssize_t pyra_sysfs_read_profile2_buttons(struct file *fp,
313 struct kobject *kobj, struct bin_attribute *attr, char *buf,
314 loff_t off, size_t count)
315{
316 return pyra_sysfs_read_profilex_buttons(fp, kobj,
317 attr, buf, off, count, 1);
318}
319
320static ssize_t pyra_sysfs_read_profile3_buttons(struct file *fp,
321 struct kobject *kobj, struct bin_attribute *attr, char *buf,
322 loff_t off, size_t count)
323{
324 return pyra_sysfs_read_profilex_buttons(fp, kobj,
325 attr, buf, off, count, 2);
326}
327
328static ssize_t pyra_sysfs_read_profile4_buttons(struct file *fp,
329 struct kobject *kobj, struct bin_attribute *attr, char *buf,
330 loff_t off, size_t count)
331{
332 return pyra_sysfs_read_profilex_buttons(fp, kobj,
333 attr, buf, off, count, 3);
334}
335
336static ssize_t pyra_sysfs_read_profile5_buttons(struct file *fp,
337 struct kobject *kobj, struct bin_attribute *attr, char *buf,
338 loff_t off, size_t count)
339{
340 return pyra_sysfs_read_profilex_buttons(fp, kobj,
341 attr, buf, off, count, 4);
342}
343
344static ssize_t pyra_sysfs_write_profile_settings(struct file *fp,
345 struct kobject *kobj, struct bin_attribute *attr, char *buf,
346 loff_t off, size_t count)
347{
348 struct device *dev = container_of(kobj, struct device, kobj);
349 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
350 struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
351 int retval = 0;
352 int difference;
353 int profile_number;
354 struct pyra_profile_settings *profile_settings;
355
356 if (off != 0 || count != sizeof(struct pyra_profile_settings))
357 return -EINVAL;
358
359 profile_number = ((struct pyra_profile_settings const *)buf)->number;
360 profile_settings = &pyra->profile_settings[profile_number];
361
362 mutex_lock(&pyra->pyra_lock);
363 difference = memcmp(buf, profile_settings,
364 sizeof(struct pyra_profile_settings));
365 if (difference) {
366 retval = pyra_set_profile_settings(usb_dev,
367 (struct pyra_profile_settings const *)buf);
368 if (!retval)
369 memcpy(profile_settings, buf,
370 sizeof(struct pyra_profile_settings));
371 }
372 mutex_unlock(&pyra->pyra_lock);
373
374 if (retval)
375 return retval;
376
377 return sizeof(struct pyra_profile_settings);
378}
379
380static ssize_t pyra_sysfs_write_profile_buttons(struct file *fp,
381 struct kobject *kobj, struct bin_attribute *attr, char *buf,
382 loff_t off, size_t count)
383{
384 struct device *dev = container_of(kobj, struct device, kobj);
385 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
386 struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
387 int retval = 0;
388 int difference;
389 int profile_number;
390 struct pyra_profile_buttons *profile_buttons;
391
392 if (off != 0 || count != sizeof(struct pyra_profile_buttons))
393 return -EINVAL;
394
395 profile_number = ((struct pyra_profile_buttons const *)buf)->number;
396 profile_buttons = &pyra->profile_buttons[profile_number];
397
398 mutex_lock(&pyra->pyra_lock);
399 difference = memcmp(buf, profile_buttons,
400 sizeof(struct pyra_profile_buttons));
401 if (difference) {
402 retval = pyra_set_profile_buttons(usb_dev,
403 (struct pyra_profile_buttons const *)buf);
404 if (!retval)
405 memcpy(profile_buttons, buf,
406 sizeof(struct pyra_profile_buttons));
407 }
408 mutex_unlock(&pyra->pyra_lock);
409
410 if (retval)
411 return retval;
412
413 return sizeof(struct pyra_profile_buttons);
414}
415
416static ssize_t pyra_sysfs_read_settings(struct file *fp,
417 struct kobject *kobj, struct bin_attribute *attr, char *buf,
418 loff_t off, size_t count)
419{
420 struct device *dev = container_of(kobj, struct device, kobj);
421 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
422
423 if (off >= sizeof(struct pyra_settings))
424 return 0;
425
426 if (off + count > sizeof(struct pyra_settings))
427 count = sizeof(struct pyra_settings) - off;
428
429 mutex_lock(&pyra->pyra_lock);
430 memcpy(buf, ((char const *)&pyra->settings) + off, count);
431 mutex_unlock(&pyra->pyra_lock);
432
433 return count;
434}
435
436static ssize_t pyra_sysfs_write_settings(struct file *fp,
437 struct kobject *kobj, struct bin_attribute *attr, char *buf,
438 loff_t off, size_t count)
439{
440 struct device *dev = container_of(kobj, struct device, kobj);
441 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
442 struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
443 int retval = 0;
444 int difference;
445
446 if (off != 0 || count != sizeof(struct pyra_settings))
447 return -EINVAL;
448
449 mutex_lock(&pyra->pyra_lock);
450 difference = memcmp(buf, &pyra->settings, sizeof(struct pyra_settings));
451 if (difference) {
452 retval = pyra_set_settings(usb_dev,
453 (struct pyra_settings const *)buf);
454 if (!retval)
455 memcpy(&pyra->settings, buf,
456 sizeof(struct pyra_settings));
457 }
458 mutex_unlock(&pyra->pyra_lock);
459
460 if (retval)
461 return retval;
462
463 profile_activated(pyra, pyra->settings.startup_profile);
464
465 return sizeof(struct pyra_settings);
466}
467
468
469static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
470 struct device_attribute *attr, char *buf)
471{
472 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
473 return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi);
474}
475
476static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
477 struct device_attribute *attr, char *buf)
478{
479 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
480 return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile);
481}
482
483static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
484 struct device_attribute *attr, char *buf)
485{
486 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
487 return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version);
488}
489
490static ssize_t pyra_sysfs_show_startup_profile(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
494 return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile);
495}
496
497static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL);
498
499static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
500
501static DEVICE_ATTR(firmware_version, 0440,
502 pyra_sysfs_show_firmware_version, NULL);
503
504static DEVICE_ATTR(startup_profile, 0440,
505 pyra_sysfs_show_startup_profile, NULL);
506
507static struct attribute *pyra_attributes[] = {
508 &dev_attr_actual_cpi.attr,
509 &dev_attr_actual_profile.attr,
510 &dev_attr_firmware_version.attr,
511 &dev_attr_startup_profile.attr,
512 NULL
513};
514
515static struct attribute_group pyra_attribute_group = {
516 .attrs = pyra_attributes
517};
518
519static struct bin_attribute pyra_profile_settings_attr = {
520 .attr = { .name = "profile_settings", .mode = 0220 },
521 .size = sizeof(struct pyra_profile_settings),
522 .write = pyra_sysfs_write_profile_settings
523};
524
525static struct bin_attribute pyra_profile1_settings_attr = {
526 .attr = { .name = "profile1_settings", .mode = 0440 },
527 .size = sizeof(struct pyra_profile_settings),
528 .read = pyra_sysfs_read_profile1_settings
529};
530
531static struct bin_attribute pyra_profile2_settings_attr = {
532 .attr = { .name = "profile2_settings", .mode = 0440 },
533 .size = sizeof(struct pyra_profile_settings),
534 .read = pyra_sysfs_read_profile2_settings
535};
536
537static struct bin_attribute pyra_profile3_settings_attr = {
538 .attr = { .name = "profile3_settings", .mode = 0440 },
539 .size = sizeof(struct pyra_profile_settings),
540 .read = pyra_sysfs_read_profile3_settings
541};
542
543static struct bin_attribute pyra_profile4_settings_attr = {
544 .attr = { .name = "profile4_settings", .mode = 0440 },
545 .size = sizeof(struct pyra_profile_settings),
546 .read = pyra_sysfs_read_profile4_settings
547};
548
549static struct bin_attribute pyra_profile5_settings_attr = {
550 .attr = { .name = "profile5_settings", .mode = 0440 },
551 .size = sizeof(struct pyra_profile_settings),
552 .read = pyra_sysfs_read_profile5_settings
553};
554
555static struct bin_attribute pyra_profile_buttons_attr = {
556 .attr = { .name = "profile_buttons", .mode = 0220 },
557 .size = sizeof(struct pyra_profile_buttons),
558 .write = pyra_sysfs_write_profile_buttons
559};
560
561static struct bin_attribute pyra_profile1_buttons_attr = {
562 .attr = { .name = "profile1_buttons", .mode = 0440 },
563 .size = sizeof(struct pyra_profile_buttons),
564 .read = pyra_sysfs_read_profile1_buttons
565};
566
567static struct bin_attribute pyra_profile2_buttons_attr = {
568 .attr = { .name = "profile2_buttons", .mode = 0440 },
569 .size = sizeof(struct pyra_profile_buttons),
570 .read = pyra_sysfs_read_profile2_buttons
571};
572
573static struct bin_attribute pyra_profile3_buttons_attr = {
574 .attr = { .name = "profile3_buttons", .mode = 0440 },
575 .size = sizeof(struct pyra_profile_buttons),
576 .read = pyra_sysfs_read_profile3_buttons
577};
578
579static struct bin_attribute pyra_profile4_buttons_attr = {
580 .attr = { .name = "profile4_buttons", .mode = 0440 },
581 .size = sizeof(struct pyra_profile_buttons),
582 .read = pyra_sysfs_read_profile4_buttons
583};
584
585static struct bin_attribute pyra_profile5_buttons_attr = {
586 .attr = { .name = "profile5_buttons", .mode = 0440 },
587 .size = sizeof(struct pyra_profile_buttons),
588 .read = pyra_sysfs_read_profile5_buttons
589};
590
591static struct bin_attribute pyra_settings_attr = {
592 .attr = { .name = "settings", .mode = 0660 },
593 .size = sizeof(struct pyra_settings),
594 .read = pyra_sysfs_read_settings,
595 .write = pyra_sysfs_write_settings
596};
597
598static int pyra_create_sysfs_attributes(struct usb_interface *intf)
599{
600 int retval;
601
602 retval = sysfs_create_group(&intf->dev.kobj, &pyra_attribute_group);
603 if (retval)
604 goto exit_1;
605
606 retval = sysfs_create_bin_file(&intf->dev.kobj,
607 &pyra_profile_settings_attr);
608 if (retval)
609 goto exit_2;
610
611 retval = sysfs_create_bin_file(&intf->dev.kobj,
612 &pyra_profile1_settings_attr);
613 if (retval)
614 goto exit_3;
615
616 retval = sysfs_create_bin_file(&intf->dev.kobj,
617 &pyra_profile2_settings_attr);
618 if (retval)
619 goto exit_4;
620
621 retval = sysfs_create_bin_file(&intf->dev.kobj,
622 &pyra_profile3_settings_attr);
623 if (retval)
624 goto exit_5;
625
626 retval = sysfs_create_bin_file(&intf->dev.kobj,
627 &pyra_profile4_settings_attr);
628 if (retval)
629 goto exit_6;
630
631 retval = sysfs_create_bin_file(&intf->dev.kobj,
632 &pyra_profile5_settings_attr);
633 if (retval)
634 goto exit_7;
635
636 retval = sysfs_create_bin_file(&intf->dev.kobj,
637 &pyra_profile_buttons_attr);
638 if (retval)
639 goto exit_8;
640
641 retval = sysfs_create_bin_file(&intf->dev.kobj,
642 &pyra_profile1_buttons_attr);
643 if (retval)
644 goto exit_9;
645
646 retval = sysfs_create_bin_file(&intf->dev.kobj,
647 &pyra_profile2_buttons_attr);
648 if (retval)
649 goto exit_10;
650
651 retval = sysfs_create_bin_file(&intf->dev.kobj,
652 &pyra_profile3_buttons_attr);
653 if (retval)
654 goto exit_11;
655
656 retval = sysfs_create_bin_file(&intf->dev.kobj,
657 &pyra_profile4_buttons_attr);
658 if (retval)
659 goto exit_12;
660
661 retval = sysfs_create_bin_file(&intf->dev.kobj,
662 &pyra_profile5_buttons_attr);
663 if (retval)
664 goto exit_13;
665
666 retval = sysfs_create_bin_file(&intf->dev.kobj,
667 &pyra_settings_attr);
668 if (retval)
669 goto exit_14;
670
671 return 0;
672
673exit_14:
674 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
675exit_13:
676 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
677exit_12:
678 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
679exit_11:
680 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
681exit_10:
682 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
683exit_9:
684 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
685exit_8:
686 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
687exit_7:
688 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
689exit_6:
690 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
691exit_5:
692 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
693exit_4:
694 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
695exit_3:
696 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
697exit_2:
698 sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
699exit_1:
700 return retval;
701}
702
703static void pyra_remove_sysfs_attributes(struct usb_interface *intf)
704{
705 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_settings_attr);
706 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
707 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
708 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
709 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
710 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
711 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
712 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
713 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
714 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
715 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
716 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
717 sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
718 sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
719}
720
721static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
722 struct pyra_device *pyra)
723{
724 struct pyra_info *info;
725 int retval, i;
726
727 mutex_init(&pyra->pyra_lock);
728
729 info = kmalloc(sizeof(struct pyra_info), GFP_KERNEL);
730 if (!info)
731 return -ENOMEM;
732 retval = pyra_get_info(usb_dev, info);
733 if (retval) {
734 kfree(info);
735 return retval;
736 }
737 pyra->firmware_version = info->firmware_version;
738 kfree(info);
739
740 retval = pyra_get_settings(usb_dev, &pyra->settings);
741 if (retval)
742 return retval;
743
744 for (i = 0; i < 5; ++i) {
745 retval = pyra_get_profile_settings(usb_dev,
746 &pyra->profile_settings[i], i);
747 if (retval)
748 return retval;
749
750 retval = pyra_get_profile_buttons(usb_dev,
751 &pyra->profile_buttons[i], i);
752 if (retval)
753 return retval;
754 }
755
756 profile_activated(pyra, pyra->settings.startup_profile);
757
758 return 0;
759}
760
761static int pyra_init_specials(struct hid_device *hdev)
762{
763 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
764 struct usb_device *usb_dev = interface_to_usbdev(intf);
765 struct pyra_device *pyra;
766 int retval;
767
768 if (intf->cur_altsetting->desc.bInterfaceProtocol
769 == USB_INTERFACE_PROTOCOL_MOUSE) {
770
771 pyra = kzalloc(sizeof(*pyra), GFP_KERNEL);
772 if (!pyra) {
773 dev_err(&hdev->dev, "can't alloc device descriptor\n");
774 return -ENOMEM;
775 }
776 hid_set_drvdata(hdev, pyra);
777
778 retval = pyra_init_pyra_device_struct(usb_dev, pyra);
779 if (retval) {
780 dev_err(&hdev->dev,
781 "couldn't init struct pyra_device\n");
782 goto exit_free;
783 }
784
785 retval = roccat_connect(hdev);
786 if (retval < 0) {
787 dev_err(&hdev->dev, "couldn't init char dev\n");
788 } else {
789 pyra->chrdev_minor = retval;
790 pyra->roccat_claimed = 1;
791 }
792
793 retval = pyra_create_sysfs_attributes(intf);
794 if (retval) {
795 dev_err(&hdev->dev, "cannot create sysfs files\n");
796 goto exit_free;
797 }
798 } else {
799 hid_set_drvdata(hdev, NULL);
800 }
801
802 return 0;
803exit_free:
804 kfree(pyra);
805 return retval;
806}
807
808static void pyra_remove_specials(struct hid_device *hdev)
809{
810 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
811 struct pyra_device *pyra;
812
813 if (intf->cur_altsetting->desc.bInterfaceProtocol
814 == USB_INTERFACE_PROTOCOL_MOUSE) {
815 pyra_remove_sysfs_attributes(intf);
816 pyra = hid_get_drvdata(hdev);
817 if (pyra->roccat_claimed)
818 roccat_disconnect(pyra->chrdev_minor);
819 kfree(hid_get_drvdata(hdev));
820 }
821}
822
823static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
824{
825 int retval;
826
827 retval = hid_parse(hdev);
828 if (retval) {
829 dev_err(&hdev->dev, "parse failed\n");
830 goto exit;
831 }
832
833 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
834 if (retval) {
835 dev_err(&hdev->dev, "hw start failed\n");
836 goto exit;
837 }
838
839 retval = pyra_init_specials(hdev);
840 if (retval) {
841 dev_err(&hdev->dev, "couldn't install mouse\n");
842 goto exit_stop;
843 }
844 return 0;
845
846exit_stop:
847 hid_hw_stop(hdev);
848exit:
849 return retval;
850}
851
852static void pyra_remove(struct hid_device *hdev)
853{
854 pyra_remove_specials(hdev);
855 hid_hw_stop(hdev);
856}
857
858static void pyra_keep_values_up_to_date(struct pyra_device *pyra,
859 u8 const *data)
860{
861 struct pyra_mouse_event_button const *button_event;
862
863 switch (data[0]) {
864 case PYRA_MOUSE_REPORT_NUMBER_BUTTON:
865 button_event = (struct pyra_mouse_event_button const *)data;
866 switch (button_event->type) {
867 case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2:
868 profile_activated(pyra, button_event->data1 - 1);
869 break;
870 case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI:
871 pyra->actual_cpi = button_event->data1;
872 break;
873 }
874 break;
875 }
876}
877
878static void pyra_report_to_chrdev(struct pyra_device const *pyra,
879 u8 const *data)
880{
881 struct pyra_roccat_report roccat_report;
882 struct pyra_mouse_event_button const *button_event;
883
884 if (data[0] != PYRA_MOUSE_REPORT_NUMBER_BUTTON)
885 return;
886
887 button_event = (struct pyra_mouse_event_button const *)data;
888
889 switch (button_event->type) {
890 case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2:
891 case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI:
892 roccat_report.type = button_event->type;
893 roccat_report.value = button_event->data1;
894 roccat_report.key = 0;
895 roccat_report_event(pyra->chrdev_minor,
896 (uint8_t const *)&roccat_report,
897 sizeof(struct pyra_roccat_report));
898 break;
899 case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO:
900 case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT:
901 case PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH:
902 if (button_event->data2 == PYRA_MOUSE_EVENT_BUTTON_PRESS) {
903 roccat_report.type = button_event->type;
904 roccat_report.key = button_event->data1;
905 /*
906 * pyra reports profile numbers with range 1-5.
907 * Keeping this behaviour.
908 */
909 roccat_report.value = pyra->actual_profile + 1;
910 roccat_report_event(pyra->chrdev_minor,
911 (uint8_t const *)&roccat_report,
912 sizeof(struct pyra_roccat_report));
913 }
914 break;
915 }
916}
917
918static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report,
919 u8 *data, int size)
920{
921 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
922 struct pyra_device *pyra = hid_get_drvdata(hdev);
923
924 if (intf->cur_altsetting->desc.bInterfaceProtocol
925 != USB_INTERFACE_PROTOCOL_MOUSE)
926 return 0;
927
928 pyra_keep_values_up_to_date(pyra, data);
929
930 if (pyra->roccat_claimed)
931 pyra_report_to_chrdev(pyra, data);
932
933 return 0;
934}
935
936static const struct hid_device_id pyra_devices[] = {
937 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT,
938 USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
939 /* TODO add USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS after testing */
940 { }
941};
942
943MODULE_DEVICE_TABLE(hid, pyra_devices);
944
945static struct hid_driver pyra_driver = {
946 .name = "pyra",
947 .id_table = pyra_devices,
948 .probe = pyra_probe,
949 .remove = pyra_remove,
950 .raw_event = pyra_raw_event
951};
952
953static int __init pyra_init(void)
954{
955 return hid_register_driver(&pyra_driver);
956}
957
958static void __exit pyra_exit(void)
959{
960 hid_unregister_driver(&pyra_driver);
961}
962
963module_init(pyra_init);
964module_exit(pyra_exit);
965
966MODULE_AUTHOR("Stefan Achatz");
967MODULE_DESCRIPTION("USB Roccat Pyra driver");
968MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
new file mode 100644
index 000000000000..22f80a8f26f9
--- /dev/null
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -0,0 +1,186 @@
1#ifndef __HID_ROCCAT_PYRA_H
2#define __HID_ROCCAT_PYRA_H
3
4/*
5 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
6 */
7
8/*
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14
15#include <linux/types.h>
16
17#pragma pack(push)
18#pragma pack(1)
19
20struct pyra_b {
21 uint8_t command; /* PYRA_COMMAND_B */
22 uint8_t size; /* always 3 */
23 uint8_t unknown; /* 1 */
24};
25
26struct pyra_control {
27 uint8_t command; /* PYRA_COMMAND_CONTROL */
28 /*
29 * value is profile number for request_settings and request_buttons
30 * 1 if status ok for request_status
31 */
32 uint8_t value; /* Range 0-4 */
33 uint8_t request;
34};
35
36enum pyra_control_requests {
37 PYRA_CONTROL_REQUEST_STATUS = 0x00,
38 PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
39 PYRA_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20
40};
41
42struct pyra_settings {
43 uint8_t command; /* PYRA_COMMAND_SETTINGS */
44 uint8_t size; /* always 3 */
45 uint8_t startup_profile; /* Range 0-4! */
46};
47
48struct pyra_profile_settings {
49 uint8_t command; /* PYRA_COMMAND_PROFILE_SETTINGS */
50 uint8_t size; /* always 0xd */
51 uint8_t number; /* Range 0-4 */
52 uint8_t xysync;
53 uint8_t x_sensitivity; /* 0x1-0xa */
54 uint8_t y_sensitivity;
55 uint8_t x_cpi; /* unused */
56 uint8_t y_cpi; /* this value is for x and y */
57 uint8_t lightswitch; /* 0 = off, 1 = on */
58 uint8_t light_effect;
59 uint8_t handedness;
60 uint16_t checksum; /* byte sum */
61};
62
63struct pyra_profile_buttons {
64 uint8_t command; /* PYRA_COMMAND_PROFILE_BUTTONS */
65 uint8_t size; /* always 0x13 */
66 uint8_t number; /* Range 0-4 */
67 uint8_t buttons[14];
68 uint16_t checksum; /* byte sum */
69};
70
71struct pyra_info {
72 uint8_t command; /* PYRA_COMMAND_INFO */
73 uint8_t size; /* always 6 */
74 uint8_t firmware_version;
75 uint8_t unknown1; /* always 0 */
76 uint8_t unknown2; /* always 1 */
77 uint8_t unknown3; /* always 0 */
78};
79
80enum pyra_commands {
81 PYRA_COMMAND_CONTROL = 0x4,
82 PYRA_COMMAND_SETTINGS = 0x5,
83 PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
84 PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
85 PYRA_COMMAND_INFO = 0x9,
86 PYRA_COMMAND_B = 0xb
87};
88
89enum pyra_usb_commands {
90 PYRA_USB_COMMAND_CONTROL = 0x304,
91 PYRA_USB_COMMAND_SETTINGS = 0x305,
92 PYRA_USB_COMMAND_PROFILE_SETTINGS = 0x306,
93 PYRA_USB_COMMAND_PROFILE_BUTTONS = 0x307,
94 PYRA_USB_COMMAND_INFO = 0x309,
95 PYRA_USB_COMMAND_B = 0x30b /* writes 3 bytes */
96};
97
98enum pyra_mouse_report_numbers {
99 PYRA_MOUSE_REPORT_NUMBER_HID = 1,
100 PYRA_MOUSE_REPORT_NUMBER_AUDIO = 2,
101 PYRA_MOUSE_REPORT_NUMBER_BUTTON = 3,
102};
103
104struct pyra_mouse_event_button {
105 uint8_t report_number; /* always 3 */
106 uint8_t unknown; /* always 0 */
107 uint8_t type;
108 uint8_t data1;
109 uint8_t data2;
110};
111
112struct pyra_mouse_event_audio {
113 uint8_t report_number; /* always 2 */
114 uint8_t type;
115 uint8_t unused; /* always 0 */
116};
117
118/* hid audio controls */
119enum pyra_mouse_event_audio_types {
120 PYRA_MOUSE_EVENT_AUDIO_TYPE_MUTE = 0xe2,
121 PYRA_MOUSE_EVENT_AUDIO_TYPE_VOLUME_UP = 0xe9,
122 PYRA_MOUSE_EVENT_AUDIO_TYPE_VOLUME_DOWN = 0xea,
123};
124
125enum pyra_mouse_event_button_types {
126 /*
127 * Mouse sends tilt events on report_number 1 and 3
128 * Tilt events are sent repeatedly with 0.94s between first and second
129 * event and 0.22s on subsequent
130 */
131 PYRA_MOUSE_EVENT_BUTTON_TYPE_TILT = 0x10,
132
133 /*
134 * These are sent sequentially
135 * data1 contains new profile number in range 1-5
136 */
137 PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_1 = 0x20,
138 PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2 = 0x30,
139
140 /*
141 * data1 = button_number (rmp index)
142 * data2 = pressed/released
143 */
144 PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO = 0x40,
145 PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT = 0x50,
146
147 /*
148 * data1 = button_number (rmp index)
149 */
150 PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
151
152 /* data1 = new cpi */
153 PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI = 0xb0,
154
155 /* data1 and data2 = new sensitivity */
156 PYRA_MOUSE_EVENT_BUTTON_TYPE_SENSITIVITY = 0xc0,
157
158 PYRA_MOUSE_EVENT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
159};
160
161enum {
162 PYRA_MOUSE_EVENT_BUTTON_PRESS = 0,
163 PYRA_MOUSE_EVENT_BUTTON_RELEASE = 1,
164};
165
166struct pyra_roccat_report {
167 uint8_t type;
168 uint8_t value;
169 uint8_t key;
170};
171
172#pragma pack(pop)
173
174struct pyra_device {
175 int actual_profile;
176 int actual_cpi;
177 int firmware_version;
178 int roccat_claimed;
179 int chrdev_minor;
180 struct mutex pyra_lock;
181 struct pyra_settings settings;
182 struct pyra_profile_settings profile_settings[5];
183 struct pyra_profile_buttons profile_buttons[5];
184};
185
186#endif
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index bda0fd60c98d..35894444e000 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -61,10 +61,10 @@ static inline void samsung_irda_dev_trace(struct hid_device *hdev,
61 "descriptor\n", rsize); 61 "descriptor\n", rsize);
62} 62}
63 63
64static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc, 64static __u8 *samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
65 unsigned int rsize) 65 unsigned int *rsize)
66{ 66{
67 if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 && 67 if (*rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
68 rdesc[177] == 0x75 && rdesc[178] == 0x30 && 68 rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
69 rdesc[179] == 0x95 && rdesc[180] == 0x01 && 69 rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
70 rdesc[182] == 0x40) { 70 rdesc[182] == 0x40) {
@@ -74,24 +74,25 @@ static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
74 rdesc[180] = 0x06; 74 rdesc[180] = 0x06;
75 rdesc[182] = 0x42; 75 rdesc[182] = 0x42;
76 } else 76 } else
77 if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 && 77 if (*rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
78 rdesc[194] == 0x25 && rdesc[195] == 0x12) { 78 rdesc[194] == 0x25 && rdesc[195] == 0x12) {
79 samsung_irda_dev_trace(hdev, 203); 79 samsung_irda_dev_trace(hdev, 203);
80 rdesc[193] = 0x1; 80 rdesc[193] = 0x1;
81 rdesc[195] = 0xf; 81 rdesc[195] = 0xf;
82 } else 82 } else
83 if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 && 83 if (*rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
84 rdesc[126] == 0x25 && rdesc[127] == 0x11) { 84 rdesc[126] == 0x25 && rdesc[127] == 0x11) {
85 samsung_irda_dev_trace(hdev, 135); 85 samsung_irda_dev_trace(hdev, 135);
86 rdesc[125] = 0x1; 86 rdesc[125] = 0x1;
87 rdesc[127] = 0xe; 87 rdesc[127] = 0xe;
88 } else 88 } else
89 if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 && 89 if (*rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
90 rdesc[162] == 0x25 && rdesc[163] == 0x01) { 90 rdesc[162] == 0x25 && rdesc[163] == 0x01) {
91 samsung_irda_dev_trace(hdev, 171); 91 samsung_irda_dev_trace(hdev, 171);
92 rdesc[161] = 0x1; 92 rdesc[161] = 0x1;
93 rdesc[163] = 0x3; 93 rdesc[163] = 0x3;
94 } 94 }
95 return rdesc;
95} 96}
96 97
97#define samsung_kbd_mouse_map_key_clear(c) \ 98#define samsung_kbd_mouse_map_key_clear(c) \
@@ -130,11 +131,12 @@ static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
130 return 1; 131 return 1;
131} 132}
132 133
133static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc, 134static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
134 unsigned int rsize) 135 unsigned int *rsize)
135{ 136{
136 if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) 137 if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
137 samsung_irda_report_fixup(hdev, rdesc, rsize); 138 rdesc = samsung_irda_report_fixup(hdev, rdesc, rsize);
139 return rdesc;
138} 140}
139 141
140static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi, 142static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 402d5574b574..677bb3da10e8 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -24,24 +24,46 @@
24 24
25#include "hid-ids.h" 25#include "hid-ids.h"
26 26
27#define VAIO_RDESC_CONSTANT 0x0001 27#define VAIO_RDESC_CONSTANT (1 << 0)
28#define SIXAXIS_CONTROLLER_USB (1 << 1)
29#define SIXAXIS_CONTROLLER_BT (1 << 2)
28 30
29struct sony_sc { 31struct sony_sc {
30 unsigned long quirks; 32 unsigned long quirks;
31}; 33};
32 34
33/* Sony Vaio VGX has wrongly mouse pointer declared as constant */ 35/* Sony Vaio VGX has wrongly mouse pointer declared as constant */
34static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, 36static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
35 unsigned int rsize) 37 unsigned int *rsize)
36{ 38{
37 struct sony_sc *sc = hid_get_drvdata(hdev); 39 struct sony_sc *sc = hid_get_drvdata(hdev);
38 40
39 if ((sc->quirks & VAIO_RDESC_CONSTANT) && 41 if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
40 rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) { 42 *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
41 dev_info(&hdev->dev, "Fixing up Sony Vaio VGX report " 43 dev_info(&hdev->dev, "Fixing up Sony Vaio VGX report "
42 "descriptor\n"); 44 "descriptor\n");
43 rdesc[55] = 0x06; 45 rdesc[55] = 0x06;
44 } 46 }
47 return rdesc;
48}
49
50static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
51 size_t count, unsigned char report_type)
52{
53 struct usb_interface *intf = to_usb_interface(hid->dev.parent);
54 struct usb_device *dev = interface_to_usbdev(intf);
55 struct usb_host_interface *interface = intf->cur_altsetting;
56 int report_id = buf[0];
57 int ret;
58
59 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
60 HID_REQ_SET_REPORT,
61 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
62 ((report_type + 1) << 8) | report_id,
63 interface->desc.bInterfaceNumber, buf, count,
64 USB_CTRL_SET_TIMEOUT);
65
66 return ret;
45} 67}
46 68
47/* 69/*
@@ -49,7 +71,7 @@ static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
49 * to "operational". Without this, the ps3 controller will not report any 71 * to "operational". Without this, the ps3 controller will not report any
50 * events. 72 * events.
51 */ 73 */
52static int sony_set_operational_usb(struct hid_device *hdev) 74static int sixaxis_set_operational_usb(struct hid_device *hdev)
53{ 75{
54 struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 76 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
55 struct usb_device *dev = interface_to_usbdev(intf); 77 struct usb_device *dev = interface_to_usbdev(intf);
@@ -74,7 +96,7 @@ static int sony_set_operational_usb(struct hid_device *hdev)
74 return ret; 96 return ret;
75} 97}
76 98
77static int sony_set_operational_bt(struct hid_device *hdev) 99static int sixaxis_set_operational_bt(struct hid_device *hdev)
78{ 100{
79 unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; 101 unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
80 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); 102 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
@@ -108,16 +130,14 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
108 goto err_free; 130 goto err_free;
109 } 131 }
110 132
111 switch (hdev->bus) { 133 if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
112 case BUS_USB: 134 hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
113 ret = sony_set_operational_usb(hdev); 135 ret = sixaxis_set_operational_usb(hdev);
114 break;
115 case BUS_BLUETOOTH:
116 ret = sony_set_operational_bt(hdev);
117 break;
118 default:
119 ret = 0;
120 } 136 }
137 else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
138 ret = sixaxis_set_operational_bt(hdev);
139 else
140 ret = 0;
121 141
122 if (ret < 0) 142 if (ret < 0)
123 goto err_stop; 143 goto err_stop;
@@ -137,8 +157,10 @@ static void sony_remove(struct hid_device *hdev)
137} 157}
138 158
139static const struct hid_device_id sony_devices[] = { 159static const struct hid_device_id sony_devices[] = {
140 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 160 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
141 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 161 .driver_data = SIXAXIS_CONTROLLER_USB },
162 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
163 .driver_data = SIXAXIS_CONTROLLER_BT },
142 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), 164 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
143 .driver_data = VAIO_RDESC_CONSTANT }, 165 .driver_data = VAIO_RDESC_CONSTANT },
144 { } 166 { }
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
index 90df886c5e04..3171be28c3d5 100644
--- a/drivers/hid/hid-stantum.c
+++ b/drivers/hid/hid-stantum.c
@@ -249,6 +249,8 @@ static void stantum_remove(struct hid_device *hdev)
249 249
250static const struct hid_device_id stantum_devices[] = { 250static const struct hid_device_id stantum_devices[] = {
251 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, 251 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
252 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM) },
253 { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX) },
252 { } 254 { }
253}; 255};
254MODULE_DEVICE_TABLE(hid, stantum_devices); 256MODULE_DEVICE_TABLE(hid, stantum_devices);
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 438107d9f1b2..164ed568f6cf 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -22,16 +22,17 @@
22 22
23#include "hid-ids.h" 23#include "hid-ids.h"
24 24
25static void sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, 25static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
26 unsigned int rsize) 26 unsigned int *rsize)
27{ 27{
28 if (rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && 28 if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
29 rdesc[106] == 0x03) { 29 rdesc[106] == 0x03) {
30 dev_info(&hdev->dev, "fixing up Sunplus Wireless Desktop " 30 dev_info(&hdev->dev, "fixing up Sunplus Wireless Desktop "
31 "report descriptor\n"); 31 "report descriptor\n");
32 rdesc[105] = rdesc[110] = 0x03; 32 rdesc[105] = rdesc[110] = 0x03;
33 rdesc[106] = rdesc[111] = 0x21; 33 rdesc[106] = rdesc[111] = 0x21;
34 } 34 }
35 return rdesc;
35} 36}
36 37
37#define sp_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 38#define sp_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
new file mode 100644
index 000000000000..05fdc85a76e5
--- /dev/null
+++ b/drivers/hid/hid-uclogic.c
@@ -0,0 +1,623 @@
1/*
2 * HID driver for UC-Logic devices not fully compliant with HID standard
3 *
4 * Copyright (c) 2010 Nikolai Kondrashov
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/hid.h>
16#include <linux/module.h>
17
18#include "hid-ids.h"
19
20/*
21 * The original descriptors of WPXXXXU tablets have three report IDs, of
22 * which only two are used (8 and 9), and the remaining (7) seems to have
23 * the originally intended pen description which was abandoned for some
24 * reason. From this unused description it is possible to extract the
25 * actual physical extents and resolution. All the models use the same
26 * descriptor with different extents for the unused report ID.
27 *
28 * Here it is:
29 *
30 * Usage Page (Digitizer), ; Digitizer (0Dh)
31 * Usage (Pen), ; Pen (02h, application collection)
32 * Collection (Application),
33 * Report ID (7),
34 * Usage (Stylus), ; Stylus (20h, logical collection)
35 * Collection (Physical),
36 * Usage (Tip Switch), ; Tip switch (42h, momentary control)
37 * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
38 * Usage (Eraser), ; Eraser (45h, momentary control)
39 * Logical Minimum (0),
40 * Logical Maximum (1),
41 * Report Size (1),
42 * Report Count (3),
43 * Input (Variable),
44 * Report Count (3),
45 * Input (Constant, Variable),
46 * Usage (In Range), ; In range (32h, momentary control)
47 * Report Count (1),
48 * Input (Variable),
49 * Report Count (1),
50 * Input (Constant, Variable),
51 * Usage Page (Desktop), ; Generic desktop controls (01h)
52 * Usage (X), ; X (30h, dynamic value)
53 * Report Size (16),
54 * Report Count (1),
55 * Push,
56 * Unit Exponent (13),
57 * Unit (Inch^3),
58 * Physical Minimum (0),
59 * Physical Maximum (Xpm),
60 * Logical Maximum (Xlm),
61 * Input (Variable),
62 * Usage (Y), ; Y (31h, dynamic value)
63 * Physical Maximum (Ypm),
64 * Logical Maximum (Ylm),
65 * Input (Variable),
66 * Pop,
67 * Usage Page (Digitizer), ; Digitizer (0Dh)
68 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
69 * Logical Maximum (1023),
70 * Input (Variable),
71 * Report Size (16),
72 * End Collection,
73 * End Collection,
74 * Usage Page (Desktop), ; Generic desktop controls (01h)
75 * Usage (Mouse), ; Mouse (02h, application collection)
76 * Collection (Application),
77 * Report ID (8),
78 * Usage (Pointer), ; Pointer (01h, physical collection)
79 * Collection (Physical),
80 * Usage Page (Button), ; Button (09h)
81 * Usage Minimum (01h),
82 * Usage Maximum (03h),
83 * Logical Minimum (0),
84 * Logical Maximum (1),
85 * Report Count (3),
86 * Report Size (1),
87 * Input (Variable),
88 * Report Count (5),
89 * Input (Constant),
90 * Usage Page (Desktop), ; Generic desktop controls (01h)
91 * Usage (X), ; X (30h, dynamic value)
92 * Usage (Y), ; Y (31h, dynamic value)
93 * Usage (Wheel), ; Wheel (38h, dynamic value)
94 * Usage (00h),
95 * Logical Minimum (-127),
96 * Logical Maximum (127),
97 * Report Size (8),
98 * Report Count (4),
99 * Input (Variable, Relative),
100 * End Collection,
101 * End Collection,
102 * Usage Page (Desktop), ; Generic desktop controls (01h)
103 * Usage (Mouse), ; Mouse (02h, application collection)
104 * Collection (Application),
105 * Report ID (9),
106 * Usage (Pointer), ; Pointer (01h, physical collection)
107 * Collection (Physical),
108 * Usage Page (Button), ; Button (09h)
109 * Usage Minimum (01h),
110 * Usage Maximum (03h),
111 * Logical Minimum (0),
112 * Logical Maximum (1),
113 * Report Count (3),
114 * Report Size (1),
115 * Input (Variable),
116 * Report Count (5),
117 * Input (Constant),
118 * Usage Page (Desktop), ; Generic desktop controls (01h)
119 * Usage (X), ; X (30h, dynamic value)
120 * Usage (Y), ; Y (31h, dynamic value)
121 * Logical Minimum (0),
122 * Logical Maximum (32767),
123 * Physical Minimum (0),
124 * Physical Maximum (32767),
125 * Report Count (2),
126 * Report Size (16),
127 * Input (Variable),
128 * Usage Page (Digitizer), ; Digitizer (0Dh)
129 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
130 * Logical Maximum (1023),
131 * Report Count (1),
132 * Report Size (16),
133 * Input (Variable),
134 * End Collection,
135 * End Collection
136 *
137 * Here are the extents values for the WPXXXXU models:
138 *
139 * Xpm Xlm Ypm Ylm
140 * WP4030U 4000 8000 3000 6000
141 * WP5540U 5500 11000 4000 8000
142 * WP8060U 8000 16000 6000 12000
143 *
144 * This suggests that all of them have 2000 LPI resolution, as advertised.
145 */
146
147/* Size of the original descriptor of WPXXXXU tablets */
148#define WPXXXXU_RDESC_ORIG_SIZE 212
149
150/*
151 * Fixed WP4030U report descriptor.
152 * Although the hardware might actually support it, the mouse description
153 * has been removed, since there seems to be no devices having one and it
154 * wouldn't make much sense because of the working area size.
155 */
156static __u8 wp4030u_rdesc_fixed[] = {
157 0x05, 0x0D, /* Usage Page (Digitizer), */
158 0x09, 0x02, /* Usage (Pen), */
159 0xA1, 0x01, /* Collection (Application), */
160 0x85, 0x09, /* Report ID (9), */
161 0x09, 0x20, /* Usage (Stylus), */
162 0xA0, /* Collection (Physical), */
163 0x75, 0x01, /* Report Size (1), */
164 0x09, 0x42, /* Usage (Tip Switch), */
165 0x09, 0x44, /* Usage (Barrel Switch), */
166 0x09, 0x46, /* Usage (Tablet Pick), */
167 0x14, /* Logical Minimum (0), */
168 0x25, 0x01, /* Logical Maximum (1), */
169 0x95, 0x03, /* Report Count (3), */
170 0x81, 0x02, /* Input (Variable), */
171 0x95, 0x05, /* Report Count (5), */
172 0x81, 0x01, /* Input (Constant), */
173 0x75, 0x10, /* Report Size (16), */
174 0x95, 0x01, /* Report Count (1), */
175 0x14, /* Logical Minimum (0), */
176 0xA4, /* Push, */
177 0x05, 0x01, /* Usage Page (Desktop), */
178 0x55, 0xFD, /* Unit Exponent (-3), */
179 0x65, 0x13, /* Unit (Inch), */
180 0x34, /* Physical Minimum (0), */
181 0x09, 0x30, /* Usage (X), */
182 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */
183 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
184 0x81, 0x02, /* Input (Variable), */
185 0x09, 0x31, /* Usage (Y), */
186 0x46, 0xB8, 0x0B, /* Physical Maximum (3000), */
187 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
188 0x81, 0x02, /* Input (Variable), */
189 0xB4, /* Pop, */
190 0x09, 0x30, /* Usage (Tip Pressure), */
191 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
192 0x81, 0x02, /* Input (Variable), */
193 0xC0, /* End Collection, */
194 0xC0 /* End Collection */
195};
196
197/* Fixed WP5540U report descriptor */
198static __u8 wp5540u_rdesc_fixed[] = {
199 0x05, 0x0D, /* Usage Page (Digitizer), */
200 0x09, 0x02, /* Usage (Pen), */
201 0xA1, 0x01, /* Collection (Application), */
202 0x85, 0x09, /* Report ID (9), */
203 0x09, 0x20, /* Usage (Stylus), */
204 0xA0, /* Collection (Physical), */
205 0x75, 0x01, /* Report Size (1), */
206 0x09, 0x42, /* Usage (Tip Switch), */
207 0x09, 0x44, /* Usage (Barrel Switch), */
208 0x09, 0x46, /* Usage (Tablet Pick), */
209 0x14, /* Logical Minimum (0), */
210 0x25, 0x01, /* Logical Maximum (1), */
211 0x95, 0x03, /* Report Count (3), */
212 0x81, 0x02, /* Input (Variable), */
213 0x95, 0x05, /* Report Count (5), */
214 0x81, 0x01, /* Input (Constant), */
215 0x75, 0x10, /* Report Size (16), */
216 0x95, 0x01, /* Report Count (1), */
217 0x14, /* Logical Minimum (0), */
218 0xA4, /* Push, */
219 0x05, 0x01, /* Usage Page (Desktop), */
220 0x55, 0xFD, /* Unit Exponent (-3), */
221 0x65, 0x13, /* Unit (Inch), */
222 0x34, /* Physical Minimum (0), */
223 0x09, 0x30, /* Usage (X), */
224 0x46, 0x7C, 0x15, /* Physical Maximum (5500), */
225 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
226 0x81, 0x02, /* Input (Variable), */
227 0x09, 0x31, /* Usage (Y), */
228 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */
229 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
230 0x81, 0x02, /* Input (Variable), */
231 0xB4, /* Pop, */
232 0x09, 0x30, /* Usage (Tip Pressure), */
233 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
234 0x81, 0x02, /* Input (Variable), */
235 0xC0, /* End Collection, */
236 0xC0, /* End Collection, */
237 0x05, 0x01, /* Usage Page (Desktop), */
238 0x09, 0x02, /* Usage (Mouse), */
239 0xA1, 0x01, /* Collection (Application), */
240 0x85, 0x08, /* Report ID (8), */
241 0x09, 0x01, /* Usage (Pointer), */
242 0xA0, /* Collection (Physical), */
243 0x75, 0x01, /* Report Size (1), */
244 0x05, 0x09, /* Usage Page (Button), */
245 0x19, 0x01, /* Usage Minimum (01h), */
246 0x29, 0x03, /* Usage Maximum (03h), */
247 0x14, /* Logical Minimum (0), */
248 0x25, 0x01, /* Logical Maximum (1), */
249 0x95, 0x03, /* Report Count (3), */
250 0x81, 0x02, /* Input (Variable), */
251 0x95, 0x05, /* Report Count (5), */
252 0x81, 0x01, /* Input (Constant), */
253 0x05, 0x01, /* Usage Page (Desktop), */
254 0x75, 0x08, /* Report Size (8), */
255 0x09, 0x30, /* Usage (X), */
256 0x09, 0x31, /* Usage (Y), */
257 0x15, 0x81, /* Logical Minimum (-127), */
258 0x25, 0x7F, /* Logical Maximum (127), */
259 0x95, 0x02, /* Report Count (2), */
260 0x81, 0x06, /* Input (Variable, Relative), */
261 0x09, 0x38, /* Usage (Wheel), */
262 0x15, 0xFF, /* Logical Minimum (-1), */
263 0x25, 0x01, /* Logical Maximum (1), */
264 0x95, 0x01, /* Report Count (1), */
265 0x81, 0x06, /* Input (Variable, Relative), */
266 0x81, 0x01, /* Input (Constant), */
267 0xC0, /* End Collection, */
268 0xC0 /* End Collection */
269};
270
271/* Fixed WP8060U report descriptor */
272static __u8 wp8060u_rdesc_fixed[] = {
273 0x05, 0x0D, /* Usage Page (Digitizer), */
274 0x09, 0x02, /* Usage (Pen), */
275 0xA1, 0x01, /* Collection (Application), */
276 0x85, 0x09, /* Report ID (9), */
277 0x09, 0x20, /* Usage (Stylus), */
278 0xA0, /* Collection (Physical), */
279 0x75, 0x01, /* Report Size (1), */
280 0x09, 0x42, /* Usage (Tip Switch), */
281 0x09, 0x44, /* Usage (Barrel Switch), */
282 0x09, 0x46, /* Usage (Tablet Pick), */
283 0x14, /* Logical Minimum (0), */
284 0x25, 0x01, /* Logical Maximum (1), */
285 0x95, 0x03, /* Report Count (3), */
286 0x81, 0x02, /* Input (Variable), */
287 0x95, 0x05, /* Report Count (5), */
288 0x81, 0x01, /* Input (Constant), */
289 0x75, 0x10, /* Report Size (16), */
290 0x95, 0x01, /* Report Count (1), */
291 0x14, /* Logical Minimum (0), */
292 0xA4, /* Push, */
293 0x05, 0x01, /* Usage Page (Desktop), */
294 0x55, 0xFD, /* Unit Exponent (-3), */
295 0x65, 0x13, /* Unit (Inch), */
296 0x34, /* Physical Minimum (0), */
297 0x09, 0x30, /* Usage (X), */
298 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
299 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
300 0x81, 0x02, /* Input (Variable), */
301 0x09, 0x31, /* Usage (Y), */
302 0x46, 0x70, 0x17, /* Physical Maximum (6000), */
303 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
304 0x81, 0x02, /* Input (Variable), */
305 0xB4, /* Pop, */
306 0x09, 0x30, /* Usage (Tip Pressure), */
307 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
308 0x81, 0x02, /* Input (Variable), */
309 0xC0, /* End Collection, */
310 0xC0, /* End Collection, */
311 0x05, 0x01, /* Usage Page (Desktop), */
312 0x09, 0x02, /* Usage (Mouse), */
313 0xA1, 0x01, /* Collection (Application), */
314 0x85, 0x08, /* Report ID (8), */
315 0x09, 0x01, /* Usage (Pointer), */
316 0xA0, /* Collection (Physical), */
317 0x75, 0x01, /* Report Size (1), */
318 0x05, 0x09, /* Usage Page (Button), */
319 0x19, 0x01, /* Usage Minimum (01h), */
320 0x29, 0x03, /* Usage Maximum (03h), */
321 0x14, /* Logical Minimum (0), */
322 0x25, 0x01, /* Logical Maximum (1), */
323 0x95, 0x03, /* Report Count (3), */
324 0x81, 0x02, /* Input (Variable), */
325 0x95, 0x05, /* Report Count (5), */
326 0x81, 0x01, /* Input (Constant), */
327 0x05, 0x01, /* Usage Page (Desktop), */
328 0x75, 0x08, /* Report Size (8), */
329 0x09, 0x30, /* Usage (X), */
330 0x09, 0x31, /* Usage (Y), */
331 0x15, 0x81, /* Logical Minimum (-127), */
332 0x25, 0x7F, /* Logical Maximum (127), */
333 0x95, 0x02, /* Report Count (2), */
334 0x81, 0x06, /* Input (Variable, Relative), */
335 0x09, 0x38, /* Usage (Wheel), */
336 0x15, 0xFF, /* Logical Minimum (-1), */
337 0x25, 0x01, /* Logical Maximum (1), */
338 0x95, 0x01, /* Report Count (1), */
339 0x81, 0x06, /* Input (Variable, Relative), */
340 0x81, 0x01, /* Input (Constant), */
341 0xC0, /* End Collection, */
342 0xC0 /* End Collection */
343};
344
345/*
346 * Original PF1209 report descriptor.
347 *
348 * The descriptor is similar to WPXXXXU descriptors, with an addition of a
349 * feature report (ID 4) of unknown purpose.
350 *
351 * Although the advertised resolution is 4000 LPI the unused report ID
352 * (taken from WPXXXXU, it seems) states 2000 LPI, but it is probably
353 * incorrect and is a result of blind copying without understanding. Anyway
354 * the real logical extents are always scaled to 0..32767, which IMHO spoils
355 * the precision.
356 *
357 * Usage Page (Digitizer), ; Digitizer (0Dh)
358 * Usage (Pen), ; Pen (02h, application collection)
359 * Collection (Application),
360 * Report ID (7),
361 * Usage (Stylus), ; Stylus (20h, logical collection)
362 * Collection (Physical),
363 * Usage (Tip Switch), ; Tip switch (42h, momentary control)
364 * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
365 * Usage (Eraser), ; Eraser (45h, momentary control)
366 * Logical Minimum (0),
367 * Logical Maximum (1),
368 * Report Size (1),
369 * Report Count (3),
370 * Input (Variable),
371 * Report Count (3),
372 * Input (Constant, Variable),
373 * Usage (In Range), ; In range (32h, momentary control)
374 * Report Count (1),
375 * Input (Variable),
376 * Report Count (1),
377 * Input (Constant, Variable),
378 * Usage Page (Desktop), ; Generic desktop controls (01h)
379 * Usage (X), ; X (30h, dynamic value)
380 * Report Size (16),
381 * Report Count (1),
382 * Push,
383 * Unit Exponent (13),
384 * Unit (Inch^3),
385 * Physical Minimum (0),
386 * Physical Maximum (12000),
387 * Logical Maximum (24000),
388 * Input (Variable),
389 * Usage (Y), ; Y (31h, dynamic value)
390 * Physical Maximum (9000),
391 * Logical Maximum (18000),
392 * Input (Variable),
393 * Pop,
394 * Usage Page (Digitizer), ; Digitizer (0Dh)
395 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
396 * Logical Maximum (1023),
397 * Input (Variable),
398 * Report Size (16),
399 * End Collection,
400 * End Collection,
401 * Usage Page (Desktop), ; Generic desktop controls (01h)
402 * Usage (Mouse), ; Mouse (02h, application collection)
403 * Collection (Application),
404 * Report ID (8),
405 * Usage (Pointer), ; Pointer (01h, physical collection)
406 * Collection (Physical),
407 * Usage Page (Button), ; Button (09h)
408 * Usage Minimum (01h),
409 * Usage Maximum (03h),
410 * Logical Minimum (0),
411 * Logical Maximum (1),
412 * Report Count (3),
413 * Report Size (1),
414 * Input (Variable),
415 * Report Count (5),
416 * Input (Constant),
417 * Usage Page (Desktop), ; Generic desktop controls (01h)
418 * Usage (X), ; X (30h, dynamic value)
419 * Usage (Y), ; Y (31h, dynamic value)
420 * Usage (Wheel), ; Wheel (38h, dynamic value)
421 * Usage (00h),
422 * Logical Minimum (-127),
423 * Logical Maximum (127),
424 * Report Size (8),
425 * Report Count (4),
426 * Input (Variable, Relative),
427 * End Collection,
428 * End Collection,
429 * Usage Page (Desktop), ; Generic desktop controls (01h)
430 * Usage (Mouse), ; Mouse (02h, application collection)
431 * Collection (Application),
432 * Report ID (9),
433 * Usage (Pointer), ; Pointer (01h, physical collection)
434 * Collection (Physical),
435 * Usage Page (Button), ; Button (09h)
436 * Usage Minimum (01h),
437 * Usage Maximum (03h),
438 * Logical Minimum (0),
439 * Logical Maximum (1),
440 * Report Count (3),
441 * Report Size (1),
442 * Input (Variable),
443 * Report Count (5),
444 * Input (Constant),
445 * Usage Page (Desktop), ; Generic desktop controls (01h)
446 * Usage (X), ; X (30h, dynamic value)
447 * Usage (Y), ; Y (31h, dynamic value)
448 * Logical Minimum (0),
449 * Logical Maximum (32767),
450 * Physical Minimum (0),
451 * Physical Maximum (32767),
452 * Report Count (2),
453 * Report Size (16),
454 * Input (Variable),
455 * Usage Page (Digitizer), ; Digitizer (0Dh)
456 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
457 * Logical Maximum (1023),
458 * Report Count (1),
459 * Report Size (16),
460 * Input (Variable),
461 * End Collection,
462 * End Collection,
463 * Usage Page (Desktop), ; Generic desktop controls (01h)
464 * Usage (00h),
465 * Collection (Application),
466 * Report ID (4),
467 * Logical Minimum (0),
468 * Logical Maximum (255),
469 * Usage (00h),
470 * Report Size (8),
471 * Report Count (3),
472 * Feature (Variable),
473 * End Collection
474 */
475
476/* Size of the original descriptor of PF1209 tablet */
477#define PF1209_RDESC_ORIG_SIZE 234
478
479/*
480 * Fixed PF1209 report descriptor
481 *
482 * The descriptor is fixed similarly to WP5540U and WP8060U, plus the
483 * feature report is removed, because its purpose is unknown and it is of no
484 * use to the generic HID driver anyway for now.
485 */
486static __u8 pf1209_rdesc_fixed[] = {
487 0x05, 0x0D, /* Usage Page (Digitizer), */
488 0x09, 0x02, /* Usage (Pen), */
489 0xA1, 0x01, /* Collection (Application), */
490 0x85, 0x09, /* Report ID (9), */
491 0x09, 0x20, /* Usage (Stylus), */
492 0xA0, /* Collection (Physical), */
493 0x75, 0x01, /* Report Size (1), */
494 0x09, 0x42, /* Usage (Tip Switch), */
495 0x09, 0x44, /* Usage (Barrel Switch), */
496 0x09, 0x46, /* Usage (Tablet Pick), */
497 0x14, /* Logical Minimum (0), */
498 0x25, 0x01, /* Logical Maximum (1), */
499 0x95, 0x03, /* Report Count (3), */
500 0x81, 0x02, /* Input (Variable), */
501 0x95, 0x05, /* Report Count (5), */
502 0x81, 0x01, /* Input (Constant), */
503 0x75, 0x10, /* Report Size (16), */
504 0x95, 0x01, /* Report Count (1), */
505 0x14, /* Logical Minimum (0), */
506 0xA4, /* Push, */
507 0x05, 0x01, /* Usage Page (Desktop), */
508 0x55, 0xFD, /* Unit Exponent (-3), */
509 0x65, 0x13, /* Unit (Inch), */
510 0x34, /* Physical Minimum (0), */
511 0x09, 0x30, /* Usage (X), */
512 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */
513 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
514 0x81, 0x02, /* Input (Variable), */
515 0x09, 0x31, /* Usage (Y), */
516 0x46, 0x28, 0x23, /* Physical Maximum (9000), */
517 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
518 0x81, 0x02, /* Input (Variable), */
519 0xB4, /* Pop, */
520 0x09, 0x30, /* Usage (Tip Pressure), */
521 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
522 0x81, 0x02, /* Input (Variable), */
523 0xC0, /* End Collection, */
524 0xC0, /* End Collection, */
525 0x05, 0x01, /* Usage Page (Desktop), */
526 0x09, 0x02, /* Usage (Mouse), */
527 0xA1, 0x01, /* Collection (Application), */
528 0x85, 0x08, /* Report ID (8), */
529 0x09, 0x01, /* Usage (Pointer), */
530 0xA0, /* Collection (Physical), */
531 0x75, 0x01, /* Report Size (1), */
532 0x05, 0x09, /* Usage Page (Button), */
533 0x19, 0x01, /* Usage Minimum (01h), */
534 0x29, 0x03, /* Usage Maximum (03h), */
535 0x14, /* Logical Minimum (0), */
536 0x25, 0x01, /* Logical Maximum (1), */
537 0x95, 0x03, /* Report Count (3), */
538 0x81, 0x02, /* Input (Variable), */
539 0x95, 0x05, /* Report Count (5), */
540 0x81, 0x01, /* Input (Constant), */
541 0x05, 0x01, /* Usage Page (Desktop), */
542 0x75, 0x08, /* Report Size (8), */
543 0x09, 0x30, /* Usage (X), */
544 0x09, 0x31, /* Usage (Y), */
545 0x15, 0x81, /* Logical Minimum (-127), */
546 0x25, 0x7F, /* Logical Maximum (127), */
547 0x95, 0x02, /* Report Count (2), */
548 0x81, 0x06, /* Input (Variable, Relative), */
549 0x09, 0x38, /* Usage (Wheel), */
550 0x15, 0xFF, /* Logical Minimum (-1), */
551 0x25, 0x01, /* Logical Maximum (1), */
552 0x95, 0x01, /* Report Count (1), */
553 0x81, 0x06, /* Input (Variable, Relative), */
554 0x81, 0x01, /* Input (Constant), */
555 0xC0, /* End Collection, */
556 0xC0 /* End Collection */
557};
558
559static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
560 unsigned int *rsize)
561{
562 switch (hdev->product) {
563 case USB_DEVICE_ID_UCLOGIC_TABLET_PF1209:
564 if (*rsize == PF1209_RDESC_ORIG_SIZE) {
565 rdesc = pf1209_rdesc_fixed;
566 *rsize = sizeof(pf1209_rdesc_fixed);
567 }
568 break;
569 case USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U:
570 if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
571 rdesc = wp4030u_rdesc_fixed;
572 *rsize = sizeof(wp4030u_rdesc_fixed);
573 }
574 break;
575 case USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U:
576 if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
577 rdesc = wp5540u_rdesc_fixed;
578 *rsize = sizeof(wp5540u_rdesc_fixed);
579 }
580 break;
581 case USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U:
582 if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
583 rdesc = wp8060u_rdesc_fixed;
584 *rsize = sizeof(wp8060u_rdesc_fixed);
585 }
586 break;
587 }
588
589 return rdesc;
590}
591
592static const struct hid_device_id uclogic_devices[] = {
593 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
594 USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
595 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
596 USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
597 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
598 USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
599 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
600 USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
601 { }
602};
603MODULE_DEVICE_TABLE(hid, uclogic_devices);
604
605static struct hid_driver uclogic_driver = {
606 .name = "uclogic",
607 .id_table = uclogic_devices,
608 .report_fixup = uclogic_report_fixup,
609};
610
611static int __init uclogic_init(void)
612{
613 return hid_register_driver(&uclogic_driver);
614}
615
616static void __exit uclogic_exit(void)
617{
618 hid_unregister_driver(&uclogic_driver);
619}
620
621module_init(uclogic_init);
622module_exit(uclogic_exit);
623MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
new file mode 100644
index 000000000000..b3a4163f2e67
--- /dev/null
+++ b/drivers/hid/hid-waltop.c
@@ -0,0 +1,1099 @@
1/*
2 * HID driver for Waltop devices not fully compliant with HID standard
3 *
4 * Copyright (c) 2010 Nikolai Kondrashov
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/hid.h>
16#include <linux/module.h>
17
18#include "hid-ids.h"
19
20/*
21 * There exists an official driver on the manufacturer's website, which
22 * wasn't submitted to the kernel, for some reason. The official driver
23 * doesn't seem to support extra features of some tablets, like wheels.
24 *
25 * It shows that the feature report ID 2 could be used to control any waltop
26 * tablet input mode, switching it between "default", "tablet" and "ink".
27 *
28 * This driver only uses "default" mode for all the supported tablets. This
29 * mode tries to be HID-compatible (not very successfully), but cripples the
30 * resolution of some tablets.
31 *
32 * The "tablet" mode uses some proprietary, yet decipherable protocol, which
33 * represents the correct resolution, but is possibly HID-incompatible (i.e.
34 * indescribable by a report descriptor).
35 *
36 * The purpose of the "ink" mode is unknown.
37 *
38 * The feature reports needed for switching to each mode are these:
39 *
40 * 02 16 00 default
41 * 02 16 01 tablet
42 * 02 16 02 ink
43 */
44
45/*
46 * Original Slim Tablet 5.8 inch report descriptor.
47 *
48 * All the reports except the report with ID 16 (the stylus) are unused,
49 * possibly because the tablet is not configured to, or because they were
50 * just copied from a more capable model. The full purpose of features
51 * described for report ID 2 is unknown.
52 *
53 * The stylus buttons are described as three bit fields, whereas actually
54 * it's an "array", i.e. they're reported as button numbers (1, 2 and 3).
55 * The "eraser" field is not used. There is also a "push" without a "pop" in
56 * the stylus description.
57 *
58 * Usage Page (Desktop), ; Generic desktop controls (01h)
59 * Usage (Mouse), ; Mouse (02h, application collection)
60 * Collection (Application),
61 * Report ID (1),
62 * Usage (Pointer), ; Pointer (01h, physical collection)
63 * Collection (Physical),
64 * Usage Page (Button), ; Button (09h)
65 * Usage Minimum (01h),
66 * Usage Maximum (05h),
67 * Logical Minimum (0),
68 * Logical Maximum (1),
69 * Report Size (1),
70 * Report Count (5),
71 * Input (Variable),
72 * Report Size (3),
73 * Report Count (1),
74 * Input (Constant, Variable),
75 * Usage Page (Desktop), ; Generic desktop controls (01h)
76 * Usage (X), ; X (30h, dynamic value)
77 * Usage (Y), ; Y (31h, dynamic value)
78 * Usage (Wheel), ; Wheel (38h, dynamic value)
79 * Logical Minimum (-127),
80 * Logical Maximum (127),
81 * Report Size (8),
82 * Report Count (3),
83 * Input (Variable, Relative),
84 * End Collection,
85 * End Collection,
86 * Usage Page (Digitizer), ; Digitizer (0Dh)
87 * Usage (Pen), ; Pen (02h, application collection)
88 * Collection (Application),
89 * Report ID (2),
90 * Usage (Stylus), ; Stylus (20h, logical collection)
91 * Collection (Physical),
92 * Usage (00h),
93 * Logical Minimum (0),
94 * Logical Maximum (255),
95 * Report Size (8),
96 * Report Count (7),
97 * Input (Variable),
98 * Usage (Azimuth), ; Azimuth (3Fh, dynamic value)
99 * Usage (Altitude), ; Altitude (40h, dynamic value)
100 * Logical Minimum (0),
101 * Logical Maximum (255),
102 * Report Size (8),
103 * Report Count (2),
104 * Feature (Variable),
105 * End Collection,
106 * Report ID (5),
107 * Usage Page (Digitizer), ; Digitizer (0Dh)
108 * Usage (Stylus), ; Stylus (20h, logical collection)
109 * Collection (Physical),
110 * Usage (00h),
111 * Logical Minimum (0),
112 * Logical Maximum (255),
113 * Report Size (8),
114 * Report Count (7),
115 * Input (Variable),
116 * End Collection,
117 * Report ID (10),
118 * Usage Page (Digitizer), ; Digitizer (0Dh)
119 * Usage (Stylus), ; Stylus (20h, logical collection)
120 * Collection (Physical),
121 * Usage (00h),
122 * Logical Minimum (0),
123 * Logical Maximum (255),
124 * Report Size (8),
125 * Report Count (3),
126 * Input (Variable),
127 * End Collection,
128 * Report ID (16),
129 * Usage (Stylus), ; Stylus (20h, logical collection)
130 * Collection (Physical),
131 * Usage (Tip Switch), ; Tip switch (42h, momentary control)
132 * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
133 * Usage (Invert), ; Invert (3Ch, momentary control)
134 * Usage (Eraser), ; Eraser (45h, momentary control)
135 * Usage (In Range), ; In range (32h, momentary control)
136 * Logical Minimum (0),
137 * Logical Maximum (1),
138 * Report Size (1),
139 * Report Count (5),
140 * Input (Variable),
141 * Report Count (3),
142 * Input (Constant, Variable),
143 * Usage Page (Desktop), ; Generic desktop controls (01h)
144 * Usage (X), ; X (30h, dynamic value)
145 * Report Size (16),
146 * Report Count (1),
147 * Push,
148 * Unit Exponent (13),
149 * Unit (Inch^3),
150 * Logical Minimum (0),
151 * Logical Maximum (10000),
152 * Physical Minimum (0),
153 * Physical Maximum (10000),
154 * Input (Variable),
155 * Usage (Y), ; Y (31h, dynamic value)
156 * Logical Maximum (6000),
157 * Physical Maximum (6000),
158 * Input (Variable),
159 * Usage Page (Digitizer), ; Digitizer (0Dh)
160 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
161 * Logical Minimum (0),
162 * Logical Maximum (1023),
163 * Physical Minimum (0),
164 * Physical Maximum (1023),
165 * Input (Variable),
166 * End Collection,
167 * End Collection
168 */
169
170/* Size of the original report descriptor of Slim Tablet 5.8 inch */
171#define SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE 222
172
173/*
174 * Fixed Slim Tablet 5.8 inch descriptor.
175 *
176 * All the reports except the stylus report (ID 16) were removed as unused.
177 * The stylus buttons description was fixed.
178 */
179static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = {
180 0x05, 0x0D, /* Usage Page (Digitizer), */
181 0x09, 0x02, /* Usage (Pen), */
182 0xA1, 0x01, /* Collection (Application), */
183 0x85, 0x10, /* Report ID (16), */
184 0x09, 0x20, /* Usage (Stylus), */
185 0xA0, /* Collection (Physical), */
186 0x09, 0x42, /* Usage (Tip Switch), */
187 0x09, 0x44, /* Usage (Barrel Switch), */
188 0x09, 0x46, /* Usage (Tablet Pick), */
189 0x15, 0x01, /* Logical Minimum (1), */
190 0x25, 0x03, /* Logical Maximum (3), */
191 0x75, 0x04, /* Report Size (4), */
192 0x95, 0x01, /* Report Count (1), */
193 0x80, /* Input, */
194 0x09, 0x32, /* Usage (In Range), */
195 0x14, /* Logical Minimum (0), */
196 0x25, 0x01, /* Logical Maximum (1), */
197 0x75, 0x01, /* Report Size (1), */
198 0x95, 0x01, /* Report Count (1), */
199 0x81, 0x02, /* Input (Variable), */
200 0x95, 0x03, /* Report Count (3), */
201 0x81, 0x03, /* Input (Constant, Variable), */
202 0x75, 0x10, /* Report Size (16), */
203 0x95, 0x01, /* Report Count (1), */
204 0x14, /* Logical Minimum (0), */
205 0xA4, /* Push, */
206 0x05, 0x01, /* Usage Page (Desktop), */
207 0x65, 0x13, /* Unit (Inch), */
208 0x55, 0xFD, /* Unit Exponent (-3), */
209 0x34, /* Physical Minimum (0), */
210 0x09, 0x30, /* Usage (X), */
211 0x46, 0x88, 0x13, /* Physical Maximum (5000), */
212 0x26, 0x10, 0x27, /* Logical Maximum (10000), */
213 0x81, 0x02, /* Input (Variable), */
214 0x09, 0x31, /* Usage (Y), */
215 0x46, 0xB8, 0x0B, /* Physical Maximum (3000), */
216 0x26, 0x70, 0x17, /* Logical Maximum (6000), */
217 0x81, 0x02, /* Input (Variable), */
218 0xB4, /* Pop, */
219 0x09, 0x30, /* Usage (Tip Pressure), */
220 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
221 0x81, 0x02, /* Input (Variable), */
222 0xC0, /* End Collection, */
223 0xC0 /* End Collection */
224};
225
226/*
227 * Original Slim Tablet 12.1 inch report descriptor.
228 *
229 * The descriptor is similar to the Slim Tablet 5.8 inch descriptor with the
230 * addition of a keyboard report, seemingly unused. It may have get here
231 * from a Media Tablet - probably an unimplemented feature.
232 *
233 * Usage Page (Desktop), ; Generic desktop controls (01h)
234 * Usage (Mouse), ; Mouse (02h, application collection)
235 * Collection (Application),
236 * Report ID (1),
237 * Usage (Pointer), ; Pointer (01h, physical collection)
238 * Collection (Physical),
239 * Usage Page (Button), ; Button (09h)
240 * Usage Minimum (01h),
241 * Usage Maximum (05h),
242 * Logical Minimum (0),
243 * Logical Maximum (1),
244 * Report Size (1),
245 * Report Count (5),
246 * Input (Variable),
247 * Report Size (3),
248 * Report Count (1),
249 * Input (Constant, Variable),
250 * Usage Page (Desktop), ; Generic desktop controls (01h)
251 * Usage (X), ; X (30h, dynamic value)
252 * Usage (Y), ; Y (31h, dynamic value)
253 * Usage (Wheel), ; Wheel (38h, dynamic value)
254 * Logical Minimum (-127),
255 * Logical Maximum (127),
256 * Report Size (8),
257 * Report Count (3),
258 * Input (Variable, Relative),
259 * End Collection,
260 * End Collection,
261 * Usage Page (Digitizer), ; Digitizer (0Dh)
262 * Usage (Pen), ; Pen (02h, application collection)
263 * Collection (Application),
264 * Report ID (2),
265 * Usage (Stylus), ; Stylus (20h, logical collection)
266 * Collection (Physical),
267 * Usage (00h),
268 * Logical Minimum (0),
269 * Logical Maximum (255),
270 * Report Size (8),
271 * Report Count (7),
272 * Input (Variable),
273 * Usage (Azimuth), ; Azimuth (3Fh, dynamic value)
274 * Usage (Altitude), ; Altitude (40h, dynamic value)
275 * Logical Minimum (0),
276 * Logical Maximum (255),
277 * Report Size (8),
278 * Report Count (2),
279 * Feature (Variable),
280 * End Collection,
281 * Report ID (5),
282 * Usage Page (Digitizer), ; Digitizer (0Dh)
283 * Usage (Stylus), ; Stylus (20h, logical collection)
284 * Collection (Physical),
285 * Usage (00h),
286 * Logical Minimum (0),
287 * Logical Maximum (255),
288 * Report Size (8),
289 * Report Count (7),
290 * Input (Variable),
291 * End Collection,
292 * Report ID (10),
293 * Usage Page (Digitizer), ; Digitizer (0Dh)
294 * Usage (Stylus), ; Stylus (20h, logical collection)
295 * Collection (Physical),
296 * Usage (00h),
297 * Logical Minimum (0),
298 * Logical Maximum (255),
299 * Report Size (8),
300 * Report Count (3),
301 * Input (Variable),
302 * End Collection,
303 * Report ID (16),
304 * Usage (Stylus), ; Stylus (20h, logical collection)
305 * Collection (Physical),
306 * Usage (Tip Switch), ; Tip switch (42h, momentary control)
307 * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
308 * Usage (Invert), ; Invert (3Ch, momentary control)
309 * Usage (Eraser), ; Eraser (45h, momentary control)
310 * Usage (In Range), ; In range (32h, momentary control)
311 * Logical Minimum (0),
312 * Logical Maximum (1),
313 * Report Size (1),
314 * Report Count (5),
315 * Input (Variable),
316 * Report Count (3),
317 * Input (Constant, Variable),
318 * Usage Page (Desktop), ; Generic desktop controls (01h)
319 * Usage (X), ; X (30h, dynamic value)
320 * Report Size (16),
321 * Report Count (1),
322 * Push,
323 * Unit Exponent (13),
324 * Unit (Inch^3),
325 * Logical Minimum (0),
326 * Logical Maximum (20000),
327 * Physical Minimum (0),
328 * Physical Maximum (20000),
329 * Input (Variable),
330 * Usage (Y), ; Y (31h, dynamic value)
331 * Logical Maximum (12500),
332 * Physical Maximum (12500),
333 * Input (Variable),
334 * Usage Page (Digitizer), ; Digitizer (0Dh)
335 * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
336 * Logical Minimum (0),
337 * Logical Maximum (1023),
338 * Physical Minimum (0),
339 * Physical Maximum (1023),
340 * Input (Variable),
341 * End Collection,
342 * End Collection,
343 * Usage Page (Desktop), ; Generic desktop controls (01h)
344 * Usage (Keyboard), ; Keyboard (06h, application collection)
345 * Collection (Application),
346 * Report ID (13),
347 * Usage Page (Keyboard), ; Keyboard/keypad (07h)
348 * Usage Minimum (KB Leftcontrol), ; Keyboard left control
349 * ; (E0h, dynamic value)
350 * Usage Maximum (KB Right GUI), ; Keyboard right GUI (E7h, dynamic value)
351 * Logical Minimum (0),
352 * Logical Maximum (1),
353 * Report Size (1),
354 * Report Count (8),
355 * Input (Variable),
356 * Report Size (8),
357 * Report Count (1),
358 * Input (Constant),
359 * Usage Page (Keyboard), ; Keyboard/keypad (07h)
360 * Usage Minimum (None), ; No event (00h, selector)
361 * Usage Maximum (KB Application), ; Keyboard Application (65h, selector)
362 * Logical Minimum (0),
363 * Logical Maximum (101),
364 * Report Size (8),
365 * Report Count (5),
366 * Input,
367 * End Collection
368 */
369
370/* Size of the original report descriptor of Slim Tablet 12.1 inch */
371#define SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE 269
372
373/*
374 * Fixed Slim Tablet 12.1 inch descriptor.
375 *
376 * All the reports except the stylus report (ID 16) were removed as unused.
377 * The stylus buttons description was fixed.
378 */
379static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = {
380 0x05, 0x0D, /* Usage Page (Digitizer), */
381 0x09, 0x02, /* Usage (Pen), */
382 0xA1, 0x01, /* Collection (Application), */
383 0x85, 0x10, /* Report ID (16), */
384 0x09, 0x20, /* Usage (Stylus), */
385 0xA0, /* Collection (Physical), */
386 0x09, 0x42, /* Usage (Tip Switch), */
387 0x09, 0x44, /* Usage (Barrel Switch), */
388 0x09, 0x46, /* Usage (Tablet Pick), */
389 0x15, 0x01, /* Logical Minimum (1), */
390 0x25, 0x03, /* Logical Maximum (3), */
391 0x75, 0x04, /* Report Size (4), */
392 0x95, 0x01, /* Report Count (1), */
393 0x80, /* Input, */
394 0x09, 0x32, /* Usage (In Range), */
395 0x14, /* Logical Minimum (0), */
396 0x25, 0x01, /* Logical Maximum (1), */
397 0x75, 0x01, /* Report Size (1), */
398 0x95, 0x01, /* Report Count (1), */
399 0x81, 0x02, /* Input (Variable), */
400 0x95, 0x03, /* Report Count (3), */
401 0x81, 0x03, /* Input (Constant, Variable), */
402 0x75, 0x10, /* Report Size (16), */
403 0x95, 0x01, /* Report Count (1), */
404 0x14, /* Logical Minimum (0), */
405 0xA4, /* Push, */
406 0x05, 0x01, /* Usage Page (Desktop), */
407 0x65, 0x13, /* Unit (Inch), */
408 0x55, 0xFD, /* Unit Exponent (-3), */
409 0x34, /* Physical Minimum (0), */
410 0x09, 0x30, /* Usage (X), */
411 0x46, 0x10, 0x27, /* Physical Maximum (10000), */
412 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
413 0x81, 0x02, /* Input (Variable), */
414 0x09, 0x31, /* Usage (Y), */
415 0x46, 0x6A, 0x18, /* Physical Maximum (6250), */
416 0x26, 0xD4, 0x30, /* Logical Maximum (12500), */
417 0x81, 0x02, /* Input (Variable), */
418 0xB4, /* Pop, */
419 0x09, 0x30, /* Usage (Tip Pressure), */
420 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
421 0x81, 0x02, /* Input (Variable), */
422 0xC0, /* End Collection, */
423 0xC0 /* End Collection */
424};
425
426/*
427 * Original Media Tablet 10.6 inch report descriptor.
428 *
429 * There are at least two versions of this model in the wild. They are
430 * represented by Genius G-Pen M609 (older version) and Genius G-Pen M609X
431 * (newer version).
432 *
433 * Both versions have the usual pen with two barrel buttons and two
434 * identical wheels with center buttons in the top corners of the tablet
435 * base. They also have buttons on the top, between the wheels, for
436 * selecting the wheels' functions and wide/standard mode. In the wide mode
437 * the whole working surface is sensed, in the standard mode a narrower area
438 * is sensed, but the logical report extents remain the same. These modes
439 * correspond roughly to 16:9 and 4:3 aspect ratios respectively.
440 *
441 * The older version has three wheel function buttons ("scroll", "zoom" and
442 * "volume") and two separate buttons for wide and standard mode. The newer
443 * version has four wheel function buttons (plus "brush") and only one
444 * button is used for selecting wide/standard mode. So, the total number of
445 * buttons remains the same, but one of the mode buttons is repurposed as a
446 * wheels' function button in the newer version.
447 *
448 * The wheel functions are:
449 * scroll - the wheels act as scroll wheels, the center buttons switch
450 * between vertical and horizontal scrolling;
451 * zoom - the wheels zoom in/out, the buttons supposedly reset to 100%;
452 * volume - the wheels control the sound volume, the buttons mute;
453 * brush - the wheels are supposed to control brush width in a graphics
454 * editor, the buttons do nothing.
455 *
456 * Below is the newer version's report descriptor. It may very well be that
457 * the older version's descriptor is different and thus it won't be
458 * supported.
459 *
460 * The mouse report (ID 1) only uses the wheel field for reporting the tablet
461 * wheels' scroll mode. The keyboard report (ID 13) is used to report the
462 * wheels' zoom and brush control functions as key presses. The report ID 12
463 * is used to report the wheels' volume control functions. The stylus report
464 * (ID 16) has the same problems as the Slim Tablet 5.8 inch report has.
465 *
466 * The rest of the reports are unused, at least in the default configuration.
467 * The purpose of the features is unknown.
468 *
469 * Usage Page (Desktop),
470 * Usage (Mouse),
471 * Collection (Application),
472 * Report ID (1),
473 * Usage (Pointer),
474 * Collection (Physical),
475 * Usage Page (Button),
476 * Usage Minimum (01h),
477 * Usage Maximum (05h),
478 * Logical Minimum (0),
479 * Logical Maximum (1),
480 * Report Size (1),
481 * Report Count (5),
482 * Input (Variable),
483 * Report Size (3),
484 * Report Count (1),
485 * Input (Constant, Variable),
486 * Usage Page (Desktop),
487 * Usage (X),
488 * Usage (Y),
489 * Usage (Wheel),
490 * Logical Minimum (-127),
491 * Logical Maximum (127),
492 * Report Size (8),
493 * Report Count (3),
494 * Input (Variable, Relative),
495 * End Collection,
496 * End Collection,
497 * Usage Page (Digitizer),
498 * Usage (Pen),
499 * Collection (Application),
500 * Report ID (2),
501 * Usage (Stylus),
502 * Collection (Physical),
503 * Usage (00h),
504 * Logical Minimum (0),
505 * Logical Maximum (255),
506 * Report Size (8),
507 * Report Count (7),
508 * Input (Variable),
509 * Usage (Azimuth),
510 * Usage (Altitude),
511 * Logical Minimum (0),
512 * Logical Maximum (255),
513 * Report Size (8),
514 * Report Count (2),
515 * Feature (Variable),
516 * End Collection,
517 * Report ID (5),
518 * Usage Page (Digitizer),
519 * Usage (Stylus),
520 * Collection (Physical),
521 * Usage (00h),
522 * Logical Minimum (0),
523 * Logical Maximum (255),
524 * Report Size (8),
525 * Report Count (7),
526 * Input (Variable),
527 * End Collection,
528 * Report ID (10),
529 * Usage Page (Digitizer),
530 * Usage (Stylus),
531 * Collection (Physical),
532 * Usage (00h),
533 * Logical Minimum (0),
534 * Logical Maximum (255),
535 * Report Size (8),
536 * Report Count (7),
537 * Input (Variable),
538 * End Collection,
539 * Report ID (16),
540 * Usage (Stylus),
541 * Collection (Physical),
542 * Usage (Tip Switch),
543 * Usage (Barrel Switch),
544 * Usage (Invert),
545 * Usage (Eraser),
546 * Usage (In Range),
547 * Logical Minimum (0),
548 * Logical Maximum (1),
549 * Report Size (1),
550 * Report Count (5),
551 * Input (Variable),
552 * Report Count (3),
553 * Input (Constant, Variable),
554 * Usage Page (Desktop),
555 * Usage (X),
556 * Report Size (16),
557 * Report Count (1),
558 * Push,
559 * Unit Exponent (13),
560 * Unit (Inch^3),
561 * Logical Minimum (0),
562 * Logical Maximum (18000),
563 * Physical Minimum (0),
564 * Physical Maximum (18000),
565 * Input (Variable),
566 * Usage (Y),
567 * Logical Maximum (11000),
568 * Physical Maximum (11000),
569 * Input (Variable),
570 * Usage Page (Digitizer),
571 * Usage (Tip Pressure),
572 * Logical Minimum (0),
573 * Logical Maximum (1023),
574 * Physical Minimum (0),
575 * Physical Maximum (1023),
576 * Input (Variable),
577 * End Collection,
578 * End Collection,
579 * Usage Page (Desktop),
580 * Usage (Keyboard),
581 * Collection (Application),
582 * Report ID (13),
583 * Usage Page (Keyboard),
584 * Usage Minimum (KB Leftcontrol),
585 * Usage Maximum (KB Right GUI),
586 * Logical Minimum (0),
587 * Logical Maximum (1),
588 * Report Size (1),
589 * Report Count (8),
590 * Input (Variable),
591 * Report Size (8),
592 * Report Count (1),
593 * Input (Constant),
594 * Usage Page (Keyboard),
595 * Usage Minimum (None),
596 * Usage Maximum (KB Application),
597 * Logical Minimum (0),
598 * Logical Maximum (101),
599 * Report Size (8),
600 * Report Count (5),
601 * Input,
602 * End Collection,
603 * Usage Page (Consumer),
604 * Usage (Consumer Control),
605 * Collection (Application),
606 * Report ID (12),
607 * Usage (Volume Inc),
608 * Usage (Volume Dec),
609 * Usage (Mute),
610 * Logical Minimum (0),
611 * Logical Maximum (1),
612 * Report Size (1),
613 * Report Count (3),
614 * Input (Variable, Relative),
615 * Report Size (5),
616 * Report Count (1),
617 * Input (Constant, Variable, Relative),
618 * End Collection
619 */
620
621/* Size of the original report descriptor of Media Tablet 10.6 inch */
622#define MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE 300
623
624/*
625 * Fixed Media Tablet 10.6 inch descriptor.
626 *
627 * The descriptions of reports unused in the default configuration are
628 * removed. The stylus report (ID 16) is fixed similarly to Slim Tablet 5.8
629 * inch. The unused mouse report (ID 1) fields are replaced with constant
630 * padding.
631 *
632 * The keyboard report (ID 13) is hacked to instead have an "array" field
633 * reporting consumer page controls, and all the unused bits are masked out
634 * with constant padding. The "brush" wheels' function is represented as "Scan
635 * Previous/Next Track" controls due to the lack of brush controls in the
636 * usage tables specification.
637 */
638static __u8 media_tablet_10_6_inch_rdesc_fixed[] = {
639 0x05, 0x0D, /* Usage Page (Digitizer), */
640 0x09, 0x02, /* Usage (Pen), */
641 0xA1, 0x01, /* Collection (Application), */
642 0x85, 0x10, /* Report ID (16), */
643 0x09, 0x20, /* Usage (Stylus), */
644 0xA0, /* Collection (Physical), */
645 0x09, 0x42, /* Usage (Tip Switch), */
646 0x09, 0x44, /* Usage (Barrel Switch), */
647 0x09, 0x46, /* Usage (Tablet Pick), */
648 0x15, 0x01, /* Logical Minimum (1), */
649 0x25, 0x03, /* Logical Maximum (3), */
650 0x75, 0x04, /* Report Size (4), */
651 0x95, 0x01, /* Report Count (1), */
652 0x80, /* Input, */
653 0x75, 0x01, /* Report Size (1), */
654 0x09, 0x32, /* Usage (In Range), */
655 0x14, /* Logical Minimum (0), */
656 0x25, 0x01, /* Logical Maximum (1), */
657 0x95, 0x01, /* Report Count (1), */
658 0x81, 0x02, /* Input (Variable), */
659 0x95, 0x03, /* Report Count (3), */
660 0x81, 0x03, /* Input (Constant, Variable), */
661 0x75, 0x10, /* Report Size (16), */
662 0x95, 0x01, /* Report Count (1), */
663 0x14, /* Logical Minimum (0), */
664 0xA4, /* Push, */
665 0x05, 0x01, /* Usage Page (Desktop), */
666 0x65, 0x13, /* Unit (Inch), */
667 0x55, 0xFD, /* Unit Exponent (-3), */
668 0x34, /* Physical Minimum (0), */
669 0x09, 0x30, /* Usage (X), */
670 0x46, 0x28, 0x23, /* Physical Maximum (9000), */
671 0x26, 0x50, 0x46, /* Logical Maximum (18000), */
672 0x81, 0x02, /* Input (Variable), */
673 0x09, 0x31, /* Usage (Y), */
674 0x46, 0x7C, 0x15, /* Physical Maximum (5500), */
675 0x26, 0xF8, 0x2A, /* Logical Maximum (11000), */
676 0x81, 0x02, /* Input (Variable), */
677 0xB4, /* Pop, */
678 0x09, 0x30, /* Usage (Tip Pressure), */
679 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
680 0x81, 0x02, /* Input (Variable), */
681 0xC0, /* End Collection, */
682 0xC0, /* End Collection, */
683 0x05, 0x01, /* Usage Page (Desktop), */
684 0x09, 0x02, /* Usage (Mouse), */
685 0xA1, 0x01, /* Collection (Application), */
686 0x85, 0x01, /* Report ID (1), */
687 0x09, 0x01, /* Usage (Pointer), */
688 0xA0, /* Collection (Physical), */
689 0x75, 0x08, /* Report Size (8), */
690 0x95, 0x03, /* Report Count (3), */
691 0x81, 0x03, /* Input (Constant, Variable), */
692 0x95, 0x02, /* Report Count (2), */
693 0x15, 0xFF, /* Logical Minimum (-1), */
694 0x25, 0x01, /* Logical Maximum (1), */
695 0x09, 0x38, /* Usage (Wheel), */
696 0x0B, 0x38, 0x02, /* Usage (Consumer AC Pan), */
697 0x0C, 0x00,
698 0x81, 0x06, /* Input (Variable, Relative), */
699 0x95, 0x02, /* Report Count (2), */
700 0x81, 0x03, /* Input (Constant, Variable), */
701 0xC0, /* End Collection, */
702 0xC0, /* End Collection, */
703 0x05, 0x0C, /* Usage Page (Consumer), */
704 0x09, 0x01, /* Usage (Consumer Control), */
705 0xA1, 0x01, /* Collection (Application), */
706 0x85, 0x0D, /* Report ID (13), */
707 0x95, 0x01, /* Report Count (1), */
708 0x75, 0x10, /* Report Size (16), */
709 0x81, 0x03, /* Input (Constant, Variable), */
710 0x0A, 0x2F, 0x02, /* Usage (AC Zoom), */
711 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */
712 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */
713 0x09, 0xB6, /* Usage (Scan Previous Track), */
714 0x09, 0xB5, /* Usage (Scan Next Track), */
715 0x08, /* Usage (00h), */
716 0x08, /* Usage (00h), */
717 0x08, /* Usage (00h), */
718 0x08, /* Usage (00h), */
719 0x08, /* Usage (00h), */
720 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */
721 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */
722 0x15, 0x0C, /* Logical Minimum (12), */
723 0x25, 0x17, /* Logical Maximum (23), */
724 0x75, 0x05, /* Report Size (5), */
725 0x80, /* Input, */
726 0x75, 0x03, /* Report Size (3), */
727 0x81, 0x03, /* Input (Constant, Variable), */
728 0x75, 0x20, /* Report Size (32), */
729 0x81, 0x03, /* Input (Constant, Variable), */
730 0xC0, /* End Collection, */
731 0x09, 0x01, /* Usage (Consumer Control), */
732 0xA1, 0x01, /* Collection (Application), */
733 0x85, 0x0C, /* Report ID (12), */
734 0x75, 0x01, /* Report Size (1), */
735 0x09, 0xE9, /* Usage (Volume Inc), */
736 0x09, 0xEA, /* Usage (Volume Dec), */
737 0x09, 0xE2, /* Usage (Mute), */
738 0x14, /* Logical Minimum (0), */
739 0x25, 0x01, /* Logical Maximum (1), */
740 0x95, 0x03, /* Report Count (3), */
741 0x81, 0x06, /* Input (Variable, Relative), */
742 0x95, 0x35, /* Report Count (53), */
743 0x81, 0x03, /* Input (Constant, Variable), */
744 0xC0 /* End Collection */
745};
746
747/*
748 * Original Media Tablet 14.1 inch report descriptor.
749 *
750 * There are at least two versions of this model in the wild. They are
751 * represented by Genius G-Pen M712 (older version) and Genius G-Pen M712X
752 * (newer version). The hardware difference between these versions is the same
753 * as between older and newer versions of Media Tablet 10.6 inch. The report
754 * descriptors are identical for both versions.
755 *
756 * The function, behavior and report descriptor of this tablet is similar to
757 * that of Media Tablet 10.6 inch. However, there is one more field (with
758 * Consumer AC Pan usage) in the mouse description. Then the tablet X and Y
759 * logical extents both get scaled to 0..16383 range (a hardware limit?),
760 * which kind of defeats the advertised 4000 LPI resolution, considering the
761 * physical extents of 12x7.25 inches. Plus, reports 5, 10 and 255 are used
762 * sometimes (while moving the pen) with unknown purpose. Also, the key codes
763 * generated for zoom in/out are different.
764 *
765 * Usage Page (Desktop),
766 * Usage (Mouse),
767 * Collection (Application),
768 * Report ID (1),
769 * Usage (Pointer),
770 * Collection (Physical),
771 * Usage Page (Button),
772 * Usage Minimum (01h),
773 * Usage Maximum (05h),
774 * Logical Minimum (0),
775 * Logical Maximum (1),
776 * Report Size (1),
777 * Report Count (5),
778 * Input (Variable),
779 * Report Size (3),
780 * Report Count (1),
781 * Input (Constant, Variable),
782 * Usage Page (Desktop),
783 * Usage (X),
784 * Usage (Y),
785 * Usage (Wheel),
786 * Logical Minimum (-127),
787 * Logical Maximum (127),
788 * Report Size (8),
789 * Report Count (3),
790 * Input (Variable, Relative),
791 * Usage Page (Consumer),
792 * Logical Minimum (-127),
793 * Logical Maximum (127),
794 * Report Size (8),
795 * Report Count (1),
796 * Usage (AC Pan),
797 * Input (Variable, Relative),
798 * End Collection,
799 * End Collection,
800 * Usage Page (Digitizer),
801 * Usage (Pen),
802 * Collection (Application),
803 * Report ID (2),
804 * Usage (Stylus),
805 * Collection (Physical),
806 * Usage (00h),
807 * Logical Minimum (0),
808 * Logical Maximum (255),
809 * Report Size (8),
810 * Report Count (7),
811 * Input (Variable),
812 * Usage (Azimuth),
813 * Usage (Altitude),
814 * Logical Minimum (0),
815 * Logical Maximum (255),
816 * Report Size (8),
817 * Report Count (2),
818 * Feature (Variable),
819 * End Collection,
820 * Report ID (5),
821 * Usage Page (Digitizer),
822 * Usage (Stylus),
823 * Collection (Physical),
824 * Usage (00h),
825 * Logical Minimum (0),
826 * Logical Maximum (255),
827 * Report Size (8),
828 * Report Count (7),
829 * Input (Variable),
830 * End Collection,
831 * Report ID (10),
832 * Usage Page (Digitizer),
833 * Usage (Stylus),
834 * Collection (Physical),
835 * Usage (00h),
836 * Logical Minimum (0),
837 * Logical Maximum (255),
838 * Report Size (8),
839 * Report Count (7),
840 * Input (Variable),
841 * End Collection,
842 * Report ID (16),
843 * Usage (Stylus),
844 * Collection (Physical),
845 * Usage (Tip Switch),
846 * Usage (Barrel Switch),
847 * Usage (Invert),
848 * Usage (Eraser),
849 * Usage (In Range),
850 * Logical Minimum (0),
851 * Logical Maximum (1),
852 * Report Size (1),
853 * Report Count (5),
854 * Input (Variable),
855 * Report Count (3),
856 * Input (Constant, Variable),
857 * Usage Page (Desktop),
858 * Usage (X),
859 * Report Size (16),
860 * Report Count (1),
861 * Push,
862 * Unit Exponent (13),
863 * Unit (Inch^3),
864 * Logical Minimum (0),
865 * Logical Maximum (16383),
866 * Physical Minimum (0),
867 * Physical Maximum (16383),
868 * Input (Variable),
869 * Usage (Y),
870 * Input (Variable),
871 * Usage Page (Digitizer),
872 * Usage (Tip Pressure),
873 * Logical Minimum (0),
874 * Logical Maximum (1023),
875 * Physical Minimum (0),
876 * Physical Maximum (1023),
877 * Input (Variable),
878 * End Collection,
879 * End Collection,
880 * Usage Page (Desktop),
881 * Usage (Keyboard),
882 * Collection (Application),
883 * Report ID (13),
884 * Usage Page (Keyboard),
885 * Usage Minimum (KB Leftcontrol),
886 * Usage Maximum (KB Right GUI),
887 * Logical Minimum (0),
888 * Logical Maximum (1),
889 * Report Size (1),
890 * Report Count (8),
891 * Input (Variable),
892 * Report Size (8),
893 * Report Count (1),
894 * Input (Constant),
895 * Usage Page (Keyboard),
896 * Usage Minimum (None),
897 * Usage Maximum (KB Application),
898 * Logical Minimum (0),
899 * Logical Maximum (101),
900 * Report Size (8),
901 * Report Count (5),
902 * Input,
903 * End Collection,
904 * Usage Page (Consumer),
905 * Usage (Consumer Control),
906 * Collection (Application),
907 * Report ID (12),
908 * Usage (Volume Inc),
909 * Usage (Volume Dec),
910 * Usage (Mute),
911 * Logical Minimum (0),
912 * Logical Maximum (1),
913 * Report Size (1),
914 * Report Count (3),
915 * Input (Variable, Relative),
916 * Report Size (5),
917 * Report Count (1),
918 * Input (Constant, Variable, Relative),
919 * End Collection
920 */
921
922/* Size of the original report descriptor of Media Tablet 14.1 inch */
923#define MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE 309
924
925/*
926 * Fixed Media Tablet 14.1 inch descriptor.
927 * It is fixed similarly to the Media Tablet 10.6 inch descriptor.
928 */
929static __u8 media_tablet_14_1_inch_rdesc_fixed[] = {
930 0x05, 0x0D, /* Usage Page (Digitizer), */
931 0x09, 0x02, /* Usage (Pen), */
932 0xA1, 0x01, /* Collection (Application), */
933 0x85, 0x10, /* Report ID (16), */
934 0x09, 0x20, /* Usage (Stylus), */
935 0xA0, /* Collection (Physical), */
936 0x09, 0x42, /* Usage (Tip Switch), */
937 0x09, 0x44, /* Usage (Barrel Switch), */
938 0x09, 0x46, /* Usage (Tablet Pick), */
939 0x15, 0x01, /* Logical Minimum (1), */
940 0x25, 0x03, /* Logical Maximum (3), */
941 0x75, 0x04, /* Report Size (4), */
942 0x95, 0x01, /* Report Count (1), */
943 0x80, /* Input, */
944 0x75, 0x01, /* Report Size (1), */
945 0x09, 0x32, /* Usage (In Range), */
946 0x14, /* Logical Minimum (0), */
947 0x25, 0x01, /* Logical Maximum (1), */
948 0x95, 0x01, /* Report Count (1), */
949 0x81, 0x02, /* Input (Variable), */
950 0x95, 0x03, /* Report Count (3), */
951 0x81, 0x03, /* Input (Constant, Variable), */
952 0x75, 0x10, /* Report Size (16), */
953 0x95, 0x01, /* Report Count (1), */
954 0x14, /* Logical Minimum (0), */
955 0xA4, /* Push, */
956 0x05, 0x01, /* Usage Page (Desktop), */
957 0x65, 0x13, /* Unit (Inch), */
958 0x55, 0xFD, /* Unit Exponent (-3), */
959 0x34, /* Physical Minimum (0), */
960 0x09, 0x30, /* Usage (X), */
961 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */
962 0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */
963 0x81, 0x02, /* Input (Variable), */
964 0x09, 0x31, /* Usage (Y), */
965 0x46, 0x52, 0x1C, /* Physical Maximum (7250), */
966 0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */
967 0x81, 0x02, /* Input (Variable), */
968 0xB4, /* Pop, */
969 0x09, 0x30, /* Usage (Tip Pressure), */
970 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
971 0x81, 0x02, /* Input (Variable), */
972 0xC0, /* End Collection, */
973 0xC0, /* End Collection, */
974 0x05, 0x01, /* Usage Page (Desktop), */
975 0x09, 0x02, /* Usage (Mouse), */
976 0xA1, 0x01, /* Collection (Application), */
977 0x85, 0x01, /* Report ID (1), */
978 0x09, 0x01, /* Usage (Pointer), */
979 0xA0, /* Collection (Physical), */
980 0x75, 0x08, /* Report Size (8), */
981 0x95, 0x03, /* Report Count (3), */
982 0x81, 0x03, /* Input (Constant, Variable), */
983 0x95, 0x02, /* Report Count (2), */
984 0x15, 0xFF, /* Logical Minimum (-1), */
985 0x25, 0x01, /* Logical Maximum (1), */
986 0x09, 0x38, /* Usage (Wheel), */
987 0x0B, 0x38, 0x02, /* Usage (Consumer AC Pan), */
988 0x0C, 0x00,
989 0x81, 0x06, /* Input (Variable, Relative), */
990 0xC0, /* End Collection, */
991 0xC0, /* End Collection, */
992 0x05, 0x0C, /* Usage Page (Consumer), */
993 0x09, 0x01, /* Usage (Consumer Control), */
994 0xA1, 0x01, /* Collection (Application), */
995 0x85, 0x0D, /* Report ID (13), */
996 0x95, 0x01, /* Report Count (1), */
997 0x75, 0x10, /* Report Size (16), */
998 0x81, 0x03, /* Input (Constant, Variable), */
999 0x0A, 0x2F, 0x02, /* Usage (AC Zoom), */
1000 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */
1001 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */
1002 0x09, 0xB6, /* Usage (Scan Previous Track), */
1003 0x09, 0xB5, /* Usage (Scan Next Track), */
1004 0x08, /* Usage (00h), */
1005 0x08, /* Usage (00h), */
1006 0x08, /* Usage (00h), */
1007 0x08, /* Usage (00h), */
1008 0x08, /* Usage (00h), */
1009 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */
1010 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */
1011 0x15, 0x0C, /* Logical Minimum (12), */
1012 0x25, 0x17, /* Logical Maximum (23), */
1013 0x75, 0x05, /* Report Size (5), */
1014 0x80, /* Input, */
1015 0x75, 0x03, /* Report Size (3), */
1016 0x81, 0x03, /* Input (Constant, Variable), */
1017 0x75, 0x20, /* Report Size (32), */
1018 0x81, 0x03, /* Input (Constant, Variable), */
1019 0xC0, /* End Collection, */
1020 0x09, 0x01, /* Usage (Consumer Control), */
1021 0xA1, 0x01, /* Collection (Application), */
1022 0x85, 0x0C, /* Report ID (12), */
1023 0x75, 0x01, /* Report Size (1), */
1024 0x09, 0xE9, /* Usage (Volume Inc), */
1025 0x09, 0xEA, /* Usage (Volume Dec), */
1026 0x09, 0xE2, /* Usage (Mute), */
1027 0x14, /* Logical Minimum (0), */
1028 0x25, 0x01, /* Logical Maximum (1), */
1029 0x95, 0x03, /* Report Count (3), */
1030 0x81, 0x06, /* Input (Variable, Relative), */
1031 0x75, 0x05, /* Report Size (5), */
1032 0x81, 0x03, /* Input (Constant, Variable), */
1033 0xC0 /* End Collection */
1034};
1035
1036static __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1037 unsigned int *rsize)
1038{
1039 switch (hdev->product) {
1040 case USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH:
1041 if (*rsize == SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE) {
1042 rdesc = slim_tablet_5_8_inch_rdesc_fixed;
1043 *rsize = sizeof(slim_tablet_5_8_inch_rdesc_fixed);
1044 }
1045 break;
1046 case USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH:
1047 if (*rsize == SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE) {
1048 rdesc = slim_tablet_12_1_inch_rdesc_fixed;
1049 *rsize = sizeof(slim_tablet_12_1_inch_rdesc_fixed);
1050 }
1051 break;
1052 case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH:
1053 if (*rsize == MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE) {
1054 rdesc = media_tablet_10_6_inch_rdesc_fixed;
1055 *rsize = sizeof(media_tablet_10_6_inch_rdesc_fixed);
1056 }
1057 break;
1058 case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH:
1059 if (*rsize == MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE) {
1060 rdesc = media_tablet_14_1_inch_rdesc_fixed;
1061 *rsize = sizeof(media_tablet_14_1_inch_rdesc_fixed);
1062 }
1063 break;
1064 }
1065 return rdesc;
1066}
1067
1068static const struct hid_device_id waltop_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP,
1070 USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP,
1072 USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP,
1074 USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
1075 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP,
1076 USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
1077 { }
1078};
1079MODULE_DEVICE_TABLE(hid, waltop_devices);
1080
1081static struct hid_driver waltop_driver = {
1082 .name = "waltop",
1083 .id_table = waltop_devices,
1084 .report_fixup = waltop_report_fixup,
1085};
1086
1087static int __init waltop_init(void)
1088{
1089 return hid_register_driver(&waltop_driver);
1090}
1091
1092static void __exit waltop_exit(void)
1093{
1094 hid_unregister_driver(&waltop_driver);
1095}
1096
1097module_init(waltop_init);
1098module_exit(waltop_exit);
1099MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 9e8d35a203e4..aac1f9273149 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -27,10 +27,10 @@ struct zc_device {
27* Zydacron remote control has an invalid HID report descriptor, 27* Zydacron remote control has an invalid HID report descriptor,
28* that needs fixing before we can parse it. 28* that needs fixing before we can parse it.
29*/ 29*/
30static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc, 30static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
31 unsigned int rsize) 31 unsigned int *rsize)
32{ 32{
33 if (rsize >= 253 && 33 if (*rsize >= 253 &&
34 rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff && 34 rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
35 rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff && 35 rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
36 rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) { 36 rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
@@ -40,6 +40,7 @@ static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
40 rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c; 40 rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
41 rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00; 41 rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
42 } 42 }
43 return rdesc;
43} 44}
44 45
45#define zc_map_key_clear(c) \ 46#define zc_map_key_clear(c) \
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 925992f549f0..8a4b32dca9f7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -218,9 +218,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
218 unsigned int minor = iminor(inode); 218 unsigned int minor = iminor(inode);
219 struct hidraw *dev; 219 struct hidraw *dev;
220 struct hidraw_list *list = file->private_data; 220 struct hidraw_list *list = file->private_data;
221 int ret;
221 222
222 if (!hidraw_table[minor]) 223 mutex_lock(&minors_lock);
223 return -ENODEV; 224 if (!hidraw_table[minor]) {
225 ret = -ENODEV;
226 goto unlock;
227 }
224 228
225 list_del(&list->node); 229 list_del(&list->node);
226 dev = hidraw_table[minor]; 230 dev = hidraw_table[minor];
@@ -233,10 +237,12 @@ static int hidraw_release(struct inode * inode, struct file * file)
233 kfree(list->hidraw); 237 kfree(list->hidraw);
234 } 238 }
235 } 239 }
236
237 kfree(list); 240 kfree(list);
241 ret = 0;
242unlock:
243 mutex_unlock(&minors_lock);
238 244
239 return 0; 245 return ret;
240} 246}
241 247
242static long hidraw_ioctl(struct file *file, unsigned int cmd, 248static long hidraw_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 599041a7f670..5489eab3a6bd 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -807,9 +807,10 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
807 struct usb_host_interface *interface = intf->cur_altsetting; 807 struct usb_host_interface *interface = intf->cur_altsetting;
808 int ret; 808 int ret;
809 809
810 if (usbhid->urbout) { 810 if (usbhid->urbout && report_type != HID_FEATURE_REPORT) {
811 int actual_length; 811 int actual_length;
812 int skipped_report_id = 0; 812 int skipped_report_id = 0;
813
813 if (buf[0] == 0x0) { 814 if (buf[0] == 0x0) {
814 /* Don't send the Report ID */ 815 /* Don't send the Report ID */
815 buf++; 816 buf++;
@@ -1469,9 +1470,6 @@ static int __init hid_init(void)
1469 retval = usbhid_quirks_init(quirks_param); 1470 retval = usbhid_quirks_init(quirks_param);
1470 if (retval) 1471 if (retval)
1471 goto usbhid_quirks_init_fail; 1472 goto usbhid_quirks_init_fail;
1472 retval = hiddev_init();
1473 if (retval)
1474 goto hiddev_init_fail;
1475 retval = usb_register(&hid_driver); 1473 retval = usb_register(&hid_driver);
1476 if (retval) 1474 if (retval)
1477 goto usb_register_fail; 1475 goto usb_register_fail;
@@ -1479,8 +1477,6 @@ static int __init hid_init(void)
1479 1477
1480 return 0; 1478 return 0;
1481usb_register_fail: 1479usb_register_fail:
1482 hiddev_exit();
1483hiddev_init_fail:
1484 usbhid_quirks_exit(); 1480 usbhid_quirks_exit();
1485usbhid_quirks_init_fail: 1481usbhid_quirks_init_fail:
1486 hid_unregister_driver(&hid_usb_driver); 1482 hid_unregister_driver(&hid_usb_driver);
@@ -1493,7 +1489,6 @@ no_queue:
1493static void __exit hid_exit(void) 1489static void __exit hid_exit(void)
1494{ 1490{
1495 usb_deregister(&hid_driver); 1491 usb_deregister(&hid_driver);
1496 hiddev_exit();
1497 usbhid_quirks_exit(); 1492 usbhid_quirks_exit();
1498 hid_unregister_driver(&hid_usb_driver); 1493 hid_unregister_driver(&hid_usb_driver);
1499 destroy_workqueue(resumption_waker); 1494 destroy_workqueue(resumption_waker);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f0260c699adb..2c185477eeb3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -34,7 +34,6 @@ static const struct hid_blacklist {
34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, 36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
39 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
40 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -63,6 +62,7 @@ static const struct hid_blacklist {
63 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, 64 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
66 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 66 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
67 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 67 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
68 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, 68 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
@@ -72,6 +72,10 @@ static const struct hid_blacklist {
72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
76 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
77 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
78 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 79 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
76 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 80 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
77 81
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index dfcb27613ec5..fedd88df9a18 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -67,8 +67,6 @@ struct hiddev_list {
67 struct mutex thread_lock; 67 struct mutex thread_lock;
68}; 68};
69 69
70static struct usb_driver hiddev_driver;
71
72/* 70/*
73 * Find a report, given the report's type and ID. The ID can be specified 71 * Find a report, given the report's type and ID. The ID can be specified
74 * indirectly by REPORT_ID_FIRST (which returns the first report of the given 72 * indirectly by REPORT_ID_FIRST (which returns the first report of the given
@@ -926,41 +924,3 @@ void hiddev_disconnect(struct hid_device *hid)
926 kfree(hiddev); 924 kfree(hiddev);
927 } 925 }
928} 926}
929
930/* Currently this driver is a USB driver. It's not a conventional one in
931 * the sense that it doesn't probe at the USB level. Instead it waits to
932 * be connected by HID through the hiddev_connect / hiddev_disconnect
933 * routines. The reason to register as a USB device is to gain part of the
934 * minor number space from the USB major.
935 *
936 * In theory, should the HID code be generalized to more than one physical
937 * medium (say, IEEE 1384), this driver will probably need to register its
938 * own major number, and in doing so, no longer need to register with USB.
939 * At that point the probe routine and hiddev_driver struct below will no
940 * longer be useful.
941 */
942
943
944/* We never attach in this manner, and rely on HID to connect us. This
945 * is why there is no disconnect routine defined in the usb_driver either.
946 */
947static int hiddev_usbd_probe(struct usb_interface *intf,
948 const struct usb_device_id *hiddev_info)
949{
950 return -ENODEV;
951}
952
953static /* const */ struct usb_driver hiddev_driver = {
954 .name = "hiddev",
955 .probe = hiddev_usbd_probe,
956};
957
958int __init hiddev_init(void)
959{
960 return usb_register(&hiddev_driver);
961}
962
963void hiddev_exit(void)
964{
965 usb_deregister(&hiddev_driver);
966}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 251b63165e2a..60befc0ee65f 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -12,7 +12,7 @@
12 * resolution of about 0.5% of the nominal value). Temperature values are 12 * resolution of about 0.5% of the nominal value). Temperature values are
13 * reported with a 1 deg resolution and a 3 deg accuracy. Complete 13 * reported with a 1 deg resolution and a 3 deg accuracy. Complete
14 * datasheet can be obtained from Analog's website at: 14 * datasheet can be obtained from Analog's website at:
15 * http://www.analog.com/Analog_Root/productPage/productHome/0,2121,ADM1025,00.html 15 * http://www.onsemi.com/PowerSolutions/product.do?id=ADM1025
16 * 16 *
17 * This driver also supports the ADM1025A, which differs from the ADM1025 17 * This driver also supports the ADM1025A, which differs from the ADM1025
18 * only in that it has "open-drain VID inputs while the ADM1025 has 18 * only in that it has "open-drain VID inputs while the ADM1025 has
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 65335b268fa9..4bf969c0a32b 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -6,7 +6,7 @@
6 6
7 Chip details at: 7 Chip details at:
8 8
9 <http://www.analog.com/UploadedFiles/Data_Sheets/779263102ADM1026_a.pdf> 9 <http://www.onsemi.com/PowerSolutions/product.do?id=ADM1026>
10 10
11 This program is free software; you can redistribute it and/or modify 11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by 12 it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 9638d58f99fd..95cbfb3a7077 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -6,10 +6,10 @@
6 * Datasheets available at: 6 * Datasheets available at:
7 * 7 *
8 * f75375: 8 * f75375:
9 * http://www.fintek.com.tw/files/productfiles/2005111152950.pdf 9 * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
10 * 10 *
11 * f75373: 11 * f75373:
12 * http://www.fintek.com.tw/files/productfiles/2005111153128.pdf 12 * http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 1f63d1a3af5e..1d6a6fa31fb4 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -5,7 +5,7 @@
5 Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> 5 Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
6 6
7 Complete datasheet is available at GMT's website: 7 Complete datasheet is available at GMT's website:
8 http://www.gmt.com.tw/datasheet/g760a.pdf 8 http://www.gmt.com.tw/product/datasheet/EDS-760A.pdf
9 9
10 This program is free software; you can redistribute it and/or modify 10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by 11 it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index bf0862a803c0..2b2ca1694f95 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -38,7 +38,7 @@
38 * available at http://developer.intel.com/. 38 * available at http://developer.intel.com/.
39 * 39 *
40 * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094, 40 * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094,
41 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26094.PDF 41 * http://support.amd.com/us/Processor_TechDocs/26094.PDF
42 * Table 74. VID Code Voltages 42 * Table 74. VID Code Voltages
43 * This corresponds to an arbitrary VRM code of 24 in the functions below. 43 * This corresponds to an arbitrary VRM code of 24 in the functions below.
44 * These CPU models (K8 revision <= E) have 5 VID pins. See also: 44 * These CPU models (K8 revision <= E) have 5 VID pins. See also:
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 30f06e956bfb..b923074b2cbe 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -75,7 +75,8 @@ config I2C_HELPER_AUTO
75 In doubt, say Y. 75 In doubt, say Y.
76 76
77config I2C_SMBUS 77config I2C_SMBUS
78 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO 78 tristate
79 prompt "SMBus-specific protocols" if !I2C_HELPER_AUTO
79 help 80 help
80 Say Y here if you want support for SMBus extensions to the I2C 81 Say Y here if you want support for SMBus extensions to the I2C
81 specification. At the moment, the only supported extension is 82 specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index c00fd66388f5..23ac61e2db39 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -9,6 +9,4 @@ obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
9obj-$(CONFIG_I2C_MUX) += i2c-mux.o 9obj-$(CONFIG_I2C_MUX) += i2c-mux.o
10obj-y += algos/ busses/ muxes/ 10obj-y += algos/ busses/ muxes/
11 11
12ifeq ($(CONFIG_I2C_DEBUG_CORE),y) 12ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
13EXTRA_CFLAGS += -DDEBUG
14endif
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 7b2ce4a08524..3998dd620a03 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -15,3 +15,15 @@ config I2C_ALGOPCA
15 tristate "I2C PCA 9564 interfaces" 15 tristate "I2C PCA 9564 interfaces"
16 16
17endmenu 17endmenu
18
19# In automatic configuration mode, we still have to define the
20# symbols to avoid unmet dependencies.
21
22if I2C_HELPER_AUTO
23config I2C_ALGOBIT
24 tristate
25config I2C_ALGOPCF
26 tristate
27config I2C_ALGOPCA
28 tristate
29endif
diff --git a/drivers/i2c/algos/Makefile b/drivers/i2c/algos/Makefile
index 18b3e962ec09..215303f60d61 100644
--- a/drivers/i2c/algos/Makefile
+++ b/drivers/i2c/algos/Makefile
@@ -6,6 +6,4 @@ obj-$(CONFIG_I2C_ALGOBIT) += i2c-algo-bit.o
6obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o 6obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o
7obj-$(CONFIG_I2C_ALGOPCA) += i2c-algo-pca.o 7obj-$(CONFIG_I2C_ALGOPCA) += i2c-algo-pca.o
8 8
9ifeq ($(CONFIG_I2C_DEBUG_ALGO),y) 9ccflags-$(CONFIG_I2C_DEBUG_ALGO) := -DDEBUG
10EXTRA_CFLAGS += -DDEBUG
11endif
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c3ef49230cba..033ad413f328 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,4 @@ obj-$(CONFIG_I2C_STUB) += i2c-stub.o
76obj-$(CONFIG_SCx200_ACB) += scx200_acb.o 76obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
77obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o 77obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
78 78
79ifeq ($(CONFIG_I2C_DEBUG_BUS),y) 79ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
80EXTRA_CFLAGS += -DDEBUG
81endif
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index af1e5e254b7b..6b6a6b1d7025 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -69,7 +69,7 @@ static struct pci_driver amd8111_driver;
69 * ACPI 2.0 chapter 13 access of registers of the EC 69 * ACPI 2.0 chapter 13 access of registers of the EC
70 */ 70 */
71 71
72static unsigned int amd_ec_wait_write(struct amd_smbus *smbus) 72static int amd_ec_wait_write(struct amd_smbus *smbus)
73{ 73{
74 int timeout = 500; 74 int timeout = 500;
75 75
@@ -85,7 +85,7 @@ static unsigned int amd_ec_wait_write(struct amd_smbus *smbus)
85 return 0; 85 return 0;
86} 86}
87 87
88static unsigned int amd_ec_wait_read(struct amd_smbus *smbus) 88static int amd_ec_wait_read(struct amd_smbus *smbus)
89{ 89{
90 int timeout = 500; 90 int timeout = 500;
91 91
@@ -101,7 +101,7 @@ static unsigned int amd_ec_wait_read(struct amd_smbus *smbus)
101 return 0; 101 return 0;
102} 102}
103 103
104static unsigned int amd_ec_read(struct amd_smbus *smbus, unsigned char address, 104static int amd_ec_read(struct amd_smbus *smbus, unsigned char address,
105 unsigned char *data) 105 unsigned char *data)
106{ 106{
107 int status; 107 int status;
@@ -124,7 +124,7 @@ static unsigned int amd_ec_read(struct amd_smbus *smbus, unsigned char address,
124 return 0; 124 return 0;
125} 125}
126 126
127static unsigned int amd_ec_write(struct amd_smbus *smbus, unsigned char address, 127static int amd_ec_write(struct amd_smbus *smbus, unsigned char address,
128 unsigned char data) 128 unsigned char data)
129{ 129{
130 int status; 130 int status;
@@ -196,7 +196,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
196{ 196{
197 struct amd_smbus *smbus = adap->algo_data; 197 struct amd_smbus *smbus = adap->algo_data;
198 unsigned char protocol, len, pec, temp[2]; 198 unsigned char protocol, len, pec, temp[2];
199 int i; 199 int i, status;
200 200
201 protocol = (read_write == I2C_SMBUS_READ) ? AMD_SMB_PRTCL_READ 201 protocol = (read_write == I2C_SMBUS_READ) ? AMD_SMB_PRTCL_READ
202 : AMD_SMB_PRTCL_WRITE; 202 : AMD_SMB_PRTCL_WRITE;
@@ -209,38 +209,62 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
209 break; 209 break;
210 210
211 case I2C_SMBUS_BYTE: 211 case I2C_SMBUS_BYTE:
212 if (read_write == I2C_SMBUS_WRITE) 212 if (read_write == I2C_SMBUS_WRITE) {
213 amd_ec_write(smbus, AMD_SMB_CMD, command); 213 status = amd_ec_write(smbus, AMD_SMB_CMD,
214 command);
215 if (status)
216 return status;
217 }
214 protocol |= AMD_SMB_PRTCL_BYTE; 218 protocol |= AMD_SMB_PRTCL_BYTE;
215 break; 219 break;
216 220
217 case I2C_SMBUS_BYTE_DATA: 221 case I2C_SMBUS_BYTE_DATA:
218 amd_ec_write(smbus, AMD_SMB_CMD, command); 222 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
219 if (read_write == I2C_SMBUS_WRITE) 223 if (status)
220 amd_ec_write(smbus, AMD_SMB_DATA, data->byte); 224 return status;
225 if (read_write == I2C_SMBUS_WRITE) {
226 status = amd_ec_write(smbus, AMD_SMB_DATA,
227 data->byte);
228 if (status)
229 return status;
230 }
221 protocol |= AMD_SMB_PRTCL_BYTE_DATA; 231 protocol |= AMD_SMB_PRTCL_BYTE_DATA;
222 break; 232 break;
223 233
224 case I2C_SMBUS_WORD_DATA: 234 case I2C_SMBUS_WORD_DATA:
225 amd_ec_write(smbus, AMD_SMB_CMD, command); 235 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
236 if (status)
237 return status;
226 if (read_write == I2C_SMBUS_WRITE) { 238 if (read_write == I2C_SMBUS_WRITE) {
227 amd_ec_write(smbus, AMD_SMB_DATA, 239 status = amd_ec_write(smbus, AMD_SMB_DATA,
228 data->word & 0xff); 240 data->word & 0xff);
229 amd_ec_write(smbus, AMD_SMB_DATA + 1, 241 if (status)
230 data->word >> 8); 242 return status;
243 status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
244 data->word >> 8);
245 if (status)
246 return status;
231 } 247 }
232 protocol |= AMD_SMB_PRTCL_WORD_DATA | pec; 248 protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
233 break; 249 break;
234 250
235 case I2C_SMBUS_BLOCK_DATA: 251 case I2C_SMBUS_BLOCK_DATA:
236 amd_ec_write(smbus, AMD_SMB_CMD, command); 252 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
253 if (status)
254 return status;
237 if (read_write == I2C_SMBUS_WRITE) { 255 if (read_write == I2C_SMBUS_WRITE) {
238 len = min_t(u8, data->block[0], 256 len = min_t(u8, data->block[0],
239 I2C_SMBUS_BLOCK_MAX); 257 I2C_SMBUS_BLOCK_MAX);
240 amd_ec_write(smbus, AMD_SMB_BCNT, len); 258 status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
241 for (i = 0; i < len; i++) 259 if (status)
242 amd_ec_write(smbus, AMD_SMB_DATA + i, 260 return status;
243 data->block[i + 1]); 261 for (i = 0; i < len; i++) {
262 status =
263 amd_ec_write(smbus, AMD_SMB_DATA + i,
264 data->block[i + 1]);
265 if (status)
266 return status;
267 }
244 } 268 }
245 protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec; 269 protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
246 break; 270 break;
@@ -248,19 +272,35 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
248 case I2C_SMBUS_I2C_BLOCK_DATA: 272 case I2C_SMBUS_I2C_BLOCK_DATA:
249 len = min_t(u8, data->block[0], 273 len = min_t(u8, data->block[0],
250 I2C_SMBUS_BLOCK_MAX); 274 I2C_SMBUS_BLOCK_MAX);
251 amd_ec_write(smbus, AMD_SMB_CMD, command); 275 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
252 amd_ec_write(smbus, AMD_SMB_BCNT, len); 276 if (status)
277 return status;
278 status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
279 if (status)
280 return status;
253 if (read_write == I2C_SMBUS_WRITE) 281 if (read_write == I2C_SMBUS_WRITE)
254 for (i = 0; i < len; i++) 282 for (i = 0; i < len; i++) {
255 amd_ec_write(smbus, AMD_SMB_DATA + i, 283 status =
256 data->block[i + 1]); 284 amd_ec_write(smbus, AMD_SMB_DATA + i,
285 data->block[i + 1]);
286 if (status)
287 return status;
288 }
257 protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA; 289 protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
258 break; 290 break;
259 291
260 case I2C_SMBUS_PROC_CALL: 292 case I2C_SMBUS_PROC_CALL:
261 amd_ec_write(smbus, AMD_SMB_CMD, command); 293 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
262 amd_ec_write(smbus, AMD_SMB_DATA, data->word & 0xff); 294 if (status)
263 amd_ec_write(smbus, AMD_SMB_DATA + 1, data->word >> 8); 295 return status;
296 status = amd_ec_write(smbus, AMD_SMB_DATA,
297 data->word & 0xff);
298 if (status)
299 return status;
300 status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
301 data->word >> 8);
302 if (status)
303 return status;
264 protocol = AMD_SMB_PRTCL_PROC_CALL | pec; 304 protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
265 read_write = I2C_SMBUS_READ; 305 read_write = I2C_SMBUS_READ;
266 break; 306 break;
@@ -268,11 +308,18 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
268 case I2C_SMBUS_BLOCK_PROC_CALL: 308 case I2C_SMBUS_BLOCK_PROC_CALL:
269 len = min_t(u8, data->block[0], 309 len = min_t(u8, data->block[0],
270 I2C_SMBUS_BLOCK_MAX - 1); 310 I2C_SMBUS_BLOCK_MAX - 1);
271 amd_ec_write(smbus, AMD_SMB_CMD, command); 311 status = amd_ec_write(smbus, AMD_SMB_CMD, command);
272 amd_ec_write(smbus, AMD_SMB_BCNT, len); 312 if (status)
273 for (i = 0; i < len; i++) 313 return status;
274 amd_ec_write(smbus, AMD_SMB_DATA + i, 314 status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
275 data->block[i + 1]); 315 if (status)
316 return status;
317 for (i = 0; i < len; i++) {
318 status = amd_ec_write(smbus, AMD_SMB_DATA + i,
319 data->block[i + 1]);
320 if (status)
321 return status;
322 }
276 protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec; 323 protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
277 read_write = I2C_SMBUS_READ; 324 read_write = I2C_SMBUS_READ;
278 break; 325 break;
@@ -282,24 +329,29 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
282 return -EOPNOTSUPP; 329 return -EOPNOTSUPP;
283 } 330 }
284 331
285 amd_ec_write(smbus, AMD_SMB_ADDR, addr << 1); 332 status = amd_ec_write(smbus, AMD_SMB_ADDR, addr << 1);
286 amd_ec_write(smbus, AMD_SMB_PRTCL, protocol); 333 if (status)
334 return status;
335 status = amd_ec_write(smbus, AMD_SMB_PRTCL, protocol);
336 if (status)
337 return status;
287 338
288 /* FIXME this discards status from ec_read(); so temp[0] will 339 status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0);
289 * hold stack garbage ... the rest of this routine will act 340 if (status)
290 * nonsensically. Ignored ec_write() status might explain 341 return status;
291 * some such failures...
292 */
293 amd_ec_read(smbus, AMD_SMB_STS, temp + 0);
294 342
295 if (~temp[0] & AMD_SMB_STS_DONE) { 343 if (~temp[0] & AMD_SMB_STS_DONE) {
296 udelay(500); 344 udelay(500);
297 amd_ec_read(smbus, AMD_SMB_STS, temp + 0); 345 status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0);
346 if (status)
347 return status;
298 } 348 }
299 349
300 if (~temp[0] & AMD_SMB_STS_DONE) { 350 if (~temp[0] & AMD_SMB_STS_DONE) {
301 msleep(1); 351 msleep(1);
302 amd_ec_read(smbus, AMD_SMB_STS, temp + 0); 352 status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0);
353 if (status)
354 return status;
303 } 355 }
304 356
305 if ((~temp[0] & AMD_SMB_STS_DONE) || (temp[0] & AMD_SMB_STS_STATUS)) 357 if ((~temp[0] & AMD_SMB_STS_DONE) || (temp[0] & AMD_SMB_STS_STATUS))
@@ -311,24 +363,35 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
311 switch (size) { 363 switch (size) {
312 case I2C_SMBUS_BYTE: 364 case I2C_SMBUS_BYTE:
313 case I2C_SMBUS_BYTE_DATA: 365 case I2C_SMBUS_BYTE_DATA:
314 amd_ec_read(smbus, AMD_SMB_DATA, &data->byte); 366 status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte);
367 if (status)
368 return status;
315 break; 369 break;
316 370
317 case I2C_SMBUS_WORD_DATA: 371 case I2C_SMBUS_WORD_DATA:
318 case I2C_SMBUS_PROC_CALL: 372 case I2C_SMBUS_PROC_CALL:
319 amd_ec_read(smbus, AMD_SMB_DATA, temp + 0); 373 status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0);
320 amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1); 374 if (status)
375 return status;
376 status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1);
377 if (status)
378 return status;
321 data->word = (temp[1] << 8) | temp[0]; 379 data->word = (temp[1] << 8) | temp[0];
322 break; 380 break;
323 381
324 case I2C_SMBUS_BLOCK_DATA: 382 case I2C_SMBUS_BLOCK_DATA:
325 case I2C_SMBUS_BLOCK_PROC_CALL: 383 case I2C_SMBUS_BLOCK_PROC_CALL:
326 amd_ec_read(smbus, AMD_SMB_BCNT, &len); 384 status = amd_ec_read(smbus, AMD_SMB_BCNT, &len);
385 if (status)
386 return status;
327 len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX); 387 len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
328 case I2C_SMBUS_I2C_BLOCK_DATA: 388 case I2C_SMBUS_I2C_BLOCK_DATA:
329 for (i = 0; i < len; i++) 389 for (i = 0; i < len; i++) {
330 amd_ec_read(smbus, AMD_SMB_DATA + i, 390 status = amd_ec_read(smbus, AMD_SMB_DATA + i,
331 data->block + i + 1); 391 data->block + i + 1);
392 if (status)
393 return status;
394 }
332 data->block[0] = len; 395 data->block[0] = len;
333 break; 396 break;
334 } 397 }
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 89eedf45d30e..6e3c38240336 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -41,7 +41,6 @@
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/i2c.h> 43#include <linux/i2c.h>
44#include <linux/i2c-id.h>
45#include <linux/of_platform.h> 44#include <linux/of_platform.h>
46#include <linux/of_i2c.h> 45#include <linux/of_i2c.h>
47 46
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 92d770d7bbc2..72434263787b 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/i2c-id.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/time.h> 20#include <linux/time.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5f6d7f89e225..ace67995d7de 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -224,7 +224,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
224 224
225 if (irq) { 225 if (irq) {
226 ret = request_irq(irq, i2c_pca_pf_handler, 226 ret = request_irq(irq, i2c_pca_pf_handler,
227 IRQF_TRIGGER_FALLING, i2c->adap.name, i2c); 227 IRQF_TRIGGER_FALLING, pdev->name, i2c);
228 if (ret) 228 if (ret)
229 goto e_reqirq; 229 goto e_reqirq;
230 } 230 }
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index c94e51b2651e..f4c19a97e0b3 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/i2c-id.h>
26#include <linux/init.h> 25#include <linux/init.h>
27#include <linux/time.h> 26#include <linux/time.h>
28#include <linux/sched.h> 27#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bf831bf81587..6a292ea5e35c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -24,7 +24,6 @@
24#include <linux/module.h> 24#include <linux/module.h>
25 25
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c-id.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/time.h> 28#include <linux/time.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 4c6fff5f330d..0b012f1f8ac5 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -185,14 +185,8 @@ static int vt596_transaction(u8 size)
185 } 185 }
186 186
187 if (temp & 0x04) { 187 if (temp & 0x04) {
188 int read = inb_p(SMBHSTADD) & 0x01;
189 result = -ENXIO; 188 result = -ENXIO;
190 /* The quick and receive byte commands are used to probe 189 dev_dbg(&vt596_adapter.dev, "No response\n");
191 for chips, so errors are expected, and we don't want
192 to frighten the user. */
193 if (!((size == VT596_QUICK && !read) ||
194 (size == VT596_BYTE && read)))
195 dev_err(&vt596_adapter.dev, "Transaction error!\n");
196 } 190 }
197 191
198 /* Resetting status register */ 192 /* Resetting status register */
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index bea4c5021d26..d231f683f576 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -425,14 +425,14 @@ static int __i2c_check_addr_busy(struct device *dev, void *addrp)
425/* walk up mux tree */ 425/* walk up mux tree */
426static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr) 426static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr)
427{ 427{
428 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
428 int result; 429 int result;
429 430
430 result = device_for_each_child(&adapter->dev, &addr, 431 result = device_for_each_child(&adapter->dev, &addr,
431 __i2c_check_addr_busy); 432 __i2c_check_addr_busy);
432 433
433 if (!result && i2c_parent_is_i2c_adapter(adapter)) 434 if (!result && parent)
434 result = i2c_check_mux_parents( 435 result = i2c_check_mux_parents(parent, addr);
435 to_i2c_adapter(adapter->dev.parent), addr);
436 436
437 return result; 437 return result;
438} 438}
@@ -453,11 +453,11 @@ static int i2c_check_mux_children(struct device *dev, void *addrp)
453 453
454static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) 454static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
455{ 455{
456 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
456 int result = 0; 457 int result = 0;
457 458
458 if (i2c_parent_is_i2c_adapter(adapter)) 459 if (parent)
459 result = i2c_check_mux_parents( 460 result = i2c_check_mux_parents(parent, addr);
460 to_i2c_adapter(adapter->dev.parent), addr);
461 461
462 if (!result) 462 if (!result)
463 result = device_for_each_child(&adapter->dev, &addr, 463 result = device_for_each_child(&adapter->dev, &addr,
@@ -472,8 +472,10 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
472 */ 472 */
473void i2c_lock_adapter(struct i2c_adapter *adapter) 473void i2c_lock_adapter(struct i2c_adapter *adapter)
474{ 474{
475 if (i2c_parent_is_i2c_adapter(adapter)) 475 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
476 i2c_lock_adapter(to_i2c_adapter(adapter->dev.parent)); 476
477 if (parent)
478 i2c_lock_adapter(parent);
477 else 479 else
478 rt_mutex_lock(&adapter->bus_lock); 480 rt_mutex_lock(&adapter->bus_lock);
479} 481}
@@ -485,8 +487,10 @@ EXPORT_SYMBOL_GPL(i2c_lock_adapter);
485 */ 487 */
486static int i2c_trylock_adapter(struct i2c_adapter *adapter) 488static int i2c_trylock_adapter(struct i2c_adapter *adapter)
487{ 489{
488 if (i2c_parent_is_i2c_adapter(adapter)) 490 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
489 return i2c_trylock_adapter(to_i2c_adapter(adapter->dev.parent)); 491
492 if (parent)
493 return i2c_trylock_adapter(parent);
490 else 494 else
491 return rt_mutex_trylock(&adapter->bus_lock); 495 return rt_mutex_trylock(&adapter->bus_lock);
492} 496}
@@ -497,8 +501,10 @@ static int i2c_trylock_adapter(struct i2c_adapter *adapter)
497 */ 501 */
498void i2c_unlock_adapter(struct i2c_adapter *adapter) 502void i2c_unlock_adapter(struct i2c_adapter *adapter)
499{ 503{
500 if (i2c_parent_is_i2c_adapter(adapter)) 504 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
501 i2c_unlock_adapter(to_i2c_adapter(adapter->dev.parent)); 505
506 if (parent)
507 i2c_unlock_adapter(parent);
502 else 508 else
503 rt_mutex_unlock(&adapter->bus_lock); 509 rt_mutex_unlock(&adapter->bus_lock);
504} 510}
@@ -677,8 +683,6 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
677 char *blank, end; 683 char *blank, end;
678 int res; 684 int res;
679 685
680 dev_warn(dev, "The new_device interface is still experimental "
681 "and may change in a near future\n");
682 memset(&info, 0, sizeof(struct i2c_board_info)); 686 memset(&info, 0, sizeof(struct i2c_board_info));
683 687
684 blank = strchr(buf, ' '); 688 blank = strchr(buf, ' ');
@@ -1504,26 +1508,25 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
1504 if (!driver->detect || !address_list) 1508 if (!driver->detect || !address_list)
1505 return 0; 1509 return 0;
1506 1510
1511 /* Stop here if the classes do not match */
1512 if (!(adapter->class & driver->class))
1513 return 0;
1514
1507 /* Set up a temporary client to help detect callback */ 1515 /* Set up a temporary client to help detect callback */
1508 temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); 1516 temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
1509 if (!temp_client) 1517 if (!temp_client)
1510 return -ENOMEM; 1518 return -ENOMEM;
1511 temp_client->adapter = adapter; 1519 temp_client->adapter = adapter;
1512 1520
1513 /* Stop here if the classes do not match */
1514 if (!(adapter->class & driver->class))
1515 goto exit_free;
1516
1517 for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { 1521 for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
1518 dev_dbg(&adapter->dev, "found normal entry for adapter %d, " 1522 dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
1519 "addr 0x%02x\n", adap_id, address_list[i]); 1523 "addr 0x%02x\n", adap_id, address_list[i]);
1520 temp_client->addr = address_list[i]; 1524 temp_client->addr = address_list[i];
1521 err = i2c_detect_address(temp_client, driver); 1525 err = i2c_detect_address(temp_client, driver);
1522 if (err) 1526 if (unlikely(err))
1523 goto exit_free; 1527 break;
1524 } 1528 }
1525 1529
1526 exit_free:
1527 kfree(temp_client); 1530 kfree(temp_client);
1528 return err; 1531 return err;
1529} 1532}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 5f3a52d517c3..cec0f3ba97f8 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -192,13 +192,12 @@ static int i2cdev_check(struct device *dev, void *addrp)
192/* walk up mux tree */ 192/* walk up mux tree */
193static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr) 193static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr)
194{ 194{
195 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
195 int result; 196 int result;
196 197
197 result = device_for_each_child(&adapter->dev, &addr, i2cdev_check); 198 result = device_for_each_child(&adapter->dev, &addr, i2cdev_check);
198 199 if (!result && parent)
199 if (!result && i2c_parent_is_i2c_adapter(adapter)) 200 result = i2cdev_check_mux_parents(parent, addr);
200 result = i2cdev_check_mux_parents(
201 to_i2c_adapter(adapter->dev.parent), addr);
202 201
203 return result; 202 return result;
204} 203}
@@ -222,11 +221,11 @@ static int i2cdev_check_mux_children(struct device *dev, void *addrp)
222 driver bound to it, as NOT busy. */ 221 driver bound to it, as NOT busy. */
223static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) 222static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
224{ 223{
224 struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
225 int result = 0; 225 int result = 0;
226 226
227 if (i2c_parent_is_i2c_adapter(adapter)) 227 if (parent)
228 result = i2cdev_check_mux_parents( 228 result = i2cdev_check_mux_parents(parent, addr);
229 to_i2c_adapter(adapter->dev.parent), addr);
230 229
231 if (!result) 230 if (!result)
232 result = device_for_each_child(&adapter->dev, &addr, 231 result = device_for_each_child(&adapter->dev, &addr,
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4c9a99c4fcb0..4d91d80bfd23 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,16 @@
5menu "Multiplexer I2C Chip support" 5menu "Multiplexer I2C Chip support"
6 depends on I2C_MUX 6 depends on I2C_MUX
7 7
8config I2C_MUX_PCA9541
9 tristate "NXP PCA9541 I2C Master Selector"
10 depends on EXPERIMENTAL
11 help
12 If you say yes here you get support for the NXP PCA9541
13 I2C Master Selector.
14
15 This driver can also be built as a module. If so, the module
16 will be called pca9541.
17
8config I2C_MUX_PCA954x 18config I2C_MUX_PCA954x
9 tristate "Philips PCA954x I2C Mux/switches" 19 tristate "Philips PCA954x I2C Mux/switches"
10 depends on EXPERIMENTAL 20 depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index bd83b5274815..d743806d9b42 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,7 @@
1# 1#
2# Makefile for multiplexer I2C chip drivers. 2# Makefile for multiplexer I2C chip drivers.
3 3
4obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
4obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o 5obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
5 6
6ifeq ($(CONFIG_I2C_DEBUG_BUS),y) 7ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
7EXTRA_CFLAGS += -DDEBUG
8endif
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/pca9541.c
new file mode 100644
index 000000000000..ed699c5aa79d
--- /dev/null
+++ b/drivers/i2c/muxes/pca9541.c
@@ -0,0 +1,411 @@
1/*
2 * I2C multiplexer driver for PCA9541 bus master selector
3 *
4 * Copyright (c) 2010 Ericsson AB.
5 *
6 * Author: Guenter Roeck <guenter.roeck@ericsson.com>
7 *
8 * Derived from:
9 * pca954x.c
10 *
11 * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
12 * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
13 *
14 * This file is licensed under the terms of the GNU General Public
15 * License version 2. This program is licensed "as is" without any
16 * warranty of any kind, whether express or implied.
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/jiffies.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/device.h>
25#include <linux/i2c.h>
26#include <linux/i2c-mux.h>
27
28#include <linux/i2c/pca954x.h>
29
30/*
31 * The PCA9541 is a bus master selector. It supports two I2C masters connected
32 * to a single slave bus.
33 *
34 * Before each bus transaction, a master has to acquire bus ownership. After the
35 * transaction is complete, bus ownership has to be released. This fits well
36 * into the I2C multiplexer framework, which provides select and release
37 * functions for this purpose. For this reason, this driver is modeled as
38 * single-channel I2C bus multiplexer.
39 *
40 * This driver assumes that the two bus masters are controlled by two different
41 * hosts. If a single host controls both masters, platform code has to ensure
42 * that only one of the masters is instantiated at any given time.
43 */
44
45#define PCA9541_CONTROL 0x01
46#define PCA9541_ISTAT 0x02
47
48#define PCA9541_CTL_MYBUS (1 << 0)
49#define PCA9541_CTL_NMYBUS (1 << 1)
50#define PCA9541_CTL_BUSON (1 << 2)
51#define PCA9541_CTL_NBUSON (1 << 3)
52#define PCA9541_CTL_BUSINIT (1 << 4)
53#define PCA9541_CTL_TESTON (1 << 6)
54#define PCA9541_CTL_NTESTON (1 << 7)
55
56#define PCA9541_ISTAT_INTIN (1 << 0)
57#define PCA9541_ISTAT_BUSINIT (1 << 1)
58#define PCA9541_ISTAT_BUSOK (1 << 2)
59#define PCA9541_ISTAT_BUSLOST (1 << 3)
60#define PCA9541_ISTAT_MYTEST (1 << 6)
61#define PCA9541_ISTAT_NMYTEST (1 << 7)
62
63#define BUSON (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
64#define MYBUS (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
65#define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
66#define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON)
67
68/* arbitration timeouts, in jiffies */
69#define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */
70#define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */
71
72/* arbitration retry delays, in us */
73#define SELECT_DELAY_SHORT 50
74#define SELECT_DELAY_LONG 1000
75
76struct pca9541 {
77 struct i2c_adapter *mux_adap;
78 unsigned long select_timeout;
79 unsigned long arb_timeout;
80};
81
82static const struct i2c_device_id pca9541_id[] = {
83 {"pca9541", 0},
84 {}
85};
86
87MODULE_DEVICE_TABLE(i2c, pca9541_id);
88
89/*
90 * Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
91 * as they will try to lock the adapter a second time.
92 */
93static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
94{
95 struct i2c_adapter *adap = client->adapter;
96 int ret;
97
98 if (adap->algo->master_xfer) {
99 struct i2c_msg msg;
100 char buf[2];
101
102 msg.addr = client->addr;
103 msg.flags = 0;
104 msg.len = 2;
105 buf[0] = command;
106 buf[1] = val;
107 msg.buf = buf;
108 ret = adap->algo->master_xfer(adap, &msg, 1);
109 } else {
110 union i2c_smbus_data data;
111
112 data.byte = val;
113 ret = adap->algo->smbus_xfer(adap, client->addr,
114 client->flags,
115 I2C_SMBUS_WRITE,
116 command,
117 I2C_SMBUS_BYTE_DATA, &data);
118 }
119
120 return ret;
121}
122
123/*
124 * Read from chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
125 * as they will try to lock adapter a second time.
126 */
127static int pca9541_reg_read(struct i2c_client *client, u8 command)
128{
129 struct i2c_adapter *adap = client->adapter;
130 int ret;
131 u8 val;
132
133 if (adap->algo->master_xfer) {
134 struct i2c_msg msg[2] = {
135 {
136 .addr = client->addr,
137 .flags = 0,
138 .len = 1,
139 .buf = &command
140 },
141 {
142 .addr = client->addr,
143 .flags = I2C_M_RD,
144 .len = 1,
145 .buf = &val
146 }
147 };
148 ret = adap->algo->master_xfer(adap, msg, 2);
149 if (ret == 2)
150 ret = val;
151 else if (ret >= 0)
152 ret = -EIO;
153 } else {
154 union i2c_smbus_data data;
155
156 ret = adap->algo->smbus_xfer(adap, client->addr,
157 client->flags,
158 I2C_SMBUS_READ,
159 command,
160 I2C_SMBUS_BYTE_DATA, &data);
161 if (!ret)
162 ret = data.byte;
163 }
164 return ret;
165}
166
167/*
168 * Arbitration management functions
169 */
170
171/* Release bus. Also reset NTESTON and BUSINIT if it was set. */
172static void pca9541_release_bus(struct i2c_client *client)
173{
174 int reg;
175
176 reg = pca9541_reg_read(client, PCA9541_CONTROL);
177 if (reg >= 0 && !busoff(reg) && mybus(reg))
178 pca9541_reg_write(client, PCA9541_CONTROL,
179 (reg & PCA9541_CTL_NBUSON) >> 1);
180}
181
182/*
183 * Arbitration is defined as a two-step process. A bus master can only activate
184 * the slave bus if it owns it; otherwise it has to request ownership first.
185 * This multi-step process ensures that access contention is resolved
186 * gracefully.
187 *
188 * Bus Ownership Other master Action
189 * state requested access
190 * ----------------------------------------------------
191 * off - yes wait for arbitration timeout or
192 * for other master to drop request
193 * off no no take ownership
194 * off yes no turn on bus
195 * on yes - done
196 * on no - wait for arbitration timeout or
197 * for other master to release bus
198 *
199 * The main contention point occurs if the slave bus is off and both masters
200 * request ownership at the same time. In this case, one master will turn on
201 * the slave bus, believing that it owns it. The other master will request
202 * bus ownership. Result is that the bus is turned on, and master which did
203 * _not_ own the slave bus before ends up owning it.
204 */
205
206/* Control commands per PCA9541 datasheet */
207static const u8 pca9541_control[16] = {
208 4, 0, 1, 5, 4, 4, 5, 5, 0, 0, 1, 1, 0, 4, 5, 1
209};
210
211/*
212 * Channel arbitration
213 *
214 * Return values:
215 * <0: error
216 * 0 : bus not acquired
217 * 1 : bus acquired
218 */
219static int pca9541_arbitrate(struct i2c_client *client)
220{
221 struct pca9541 *data = i2c_get_clientdata(client);
222 int reg;
223
224 reg = pca9541_reg_read(client, PCA9541_CONTROL);
225 if (reg < 0)
226 return reg;
227
228 if (busoff(reg)) {
229 int istat;
230 /*
231 * Bus is off. Request ownership or turn it on unless
232 * other master requested ownership.
233 */
234 istat = pca9541_reg_read(client, PCA9541_ISTAT);
235 if (!(istat & PCA9541_ISTAT_NMYTEST)
236 || time_is_before_eq_jiffies(data->arb_timeout)) {
237 /*
238 * Other master did not request ownership,
239 * or arbitration timeout expired. Take the bus.
240 */
241 pca9541_reg_write(client,
242 PCA9541_CONTROL,
243 pca9541_control[reg & 0x0f]
244 | PCA9541_CTL_NTESTON);
245 data->select_timeout = SELECT_DELAY_SHORT;
246 } else {
247 /*
248 * Other master requested ownership.
249 * Set extra long timeout to give it time to acquire it.
250 */
251 data->select_timeout = SELECT_DELAY_LONG * 2;
252 }
253 } else if (mybus(reg)) {
254 /*
255 * Bus is on, and we own it. We are done with acquisition.
256 * Reset NTESTON and BUSINIT, then return success.
257 */
258 if (reg & (PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT))
259 pca9541_reg_write(client,
260 PCA9541_CONTROL,
261 reg & ~(PCA9541_CTL_NTESTON
262 | PCA9541_CTL_BUSINIT));
263 return 1;
264 } else {
265 /*
266 * Other master owns the bus.
267 * If arbitration timeout has expired, force ownership.
268 * Otherwise request it.
269 */
270 data->select_timeout = SELECT_DELAY_LONG;
271 if (time_is_before_eq_jiffies(data->arb_timeout)) {
272 /* Time is up, take the bus and reset it. */
273 pca9541_reg_write(client,
274 PCA9541_CONTROL,
275 pca9541_control[reg & 0x0f]
276 | PCA9541_CTL_BUSINIT
277 | PCA9541_CTL_NTESTON);
278 } else {
279 /* Request bus ownership if needed */
280 if (!(reg & PCA9541_CTL_NTESTON))
281 pca9541_reg_write(client,
282 PCA9541_CONTROL,
283 reg | PCA9541_CTL_NTESTON);
284 }
285 }
286 return 0;
287}
288
289static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
290{
291 struct pca9541 *data = i2c_get_clientdata(client);
292 int ret;
293 unsigned long timeout = jiffies + ARB2_TIMEOUT;
294 /* give up after this time */
295
296 data->arb_timeout = jiffies + ARB_TIMEOUT;
297 /* force bus ownership after this time */
298
299 do {
300 ret = pca9541_arbitrate(client);
301 if (ret)
302 return ret < 0 ? ret : 0;
303
304 if (data->select_timeout == SELECT_DELAY_SHORT)
305 udelay(data->select_timeout);
306 else
307 msleep(data->select_timeout / 1000);
308 } while (time_is_after_eq_jiffies(timeout));
309
310 return -ETIMEDOUT;
311}
312
313static int pca9541_release_chan(struct i2c_adapter *adap,
314 void *client, u32 chan)
315{
316 pca9541_release_bus(client);
317 return 0;
318}
319
320/*
321 * I2C init/probing/exit functions
322 */
323static int pca9541_probe(struct i2c_client *client,
324 const struct i2c_device_id *id)
325{
326 struct i2c_adapter *adap = client->adapter;
327 struct pca954x_platform_data *pdata = client->dev.platform_data;
328 struct pca9541 *data;
329 int force;
330 int ret = -ENODEV;
331
332 if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
333 goto err;
334
335 data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
336 if (!data) {
337 ret = -ENOMEM;
338 goto err;
339 }
340
341 i2c_set_clientdata(client, data);
342
343 /*
344 * I2C accesses are unprotected here.
345 * We have to lock the adapter before releasing the bus.
346 */
347 i2c_lock_adapter(adap);
348 pca9541_release_bus(client);
349 i2c_unlock_adapter(adap);
350
351 /* Create mux adapter */
352
353 force = 0;
354 if (pdata)
355 force = pdata->modes[0].adap_id;
356 data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
357 pca9541_select_chan,
358 pca9541_release_chan);
359
360 if (data->mux_adap == NULL) {
361 dev_err(&client->dev, "failed to register master selector\n");
362 goto exit_free;
363 }
364
365 dev_info(&client->dev, "registered master selector for I2C %s\n",
366 client->name);
367
368 return 0;
369
370exit_free:
371 kfree(data);
372err:
373 return ret;
374}
375
376static int pca9541_remove(struct i2c_client *client)
377{
378 struct pca9541 *data = i2c_get_clientdata(client);
379
380 i2c_del_mux_adapter(data->mux_adap);
381
382 kfree(data);
383 return 0;
384}
385
386static struct i2c_driver pca9541_driver = {
387 .driver = {
388 .name = "pca9541",
389 .owner = THIS_MODULE,
390 },
391 .probe = pca9541_probe,
392 .remove = pca9541_remove,
393 .id_table = pca9541_id,
394};
395
396static int __init pca9541_init(void)
397{
398 return i2c_add_driver(&pca9541_driver);
399}
400
401static void __exit pca9541_exit(void)
402{
403 i2c_del_driver(&pca9541_driver);
404}
405
406module_init(pca9541_init);
407module_exit(pca9541_exit);
408
409MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
410MODULE_DESCRIPTION("PCA9541 I2C master selector driver");
411MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 6f9accf3189d..54e1ce73534b 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -181,8 +181,8 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap,
181/* 181/*
182 * I2C init/probing/exit functions 182 * I2C init/probing/exit functions
183 */ 183 */
184static int __devinit pca954x_probe(struct i2c_client *client, 184static int pca954x_probe(struct i2c_client *client,
185 const struct i2c_device_id *id) 185 const struct i2c_device_id *id)
186{ 186{
187 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); 187 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
188 struct pca954x_platform_data *pdata = client->dev.platform_data; 188 struct pca954x_platform_data *pdata = client->dev.platform_data;
@@ -255,7 +255,7 @@ err:
255 return ret; 255 return ret;
256} 256}
257 257
258static int __devexit pca954x_remove(struct i2c_client *client) 258static int pca954x_remove(struct i2c_client *client)
259{ 259{
260 struct pca954x *data = i2c_get_clientdata(client); 260 struct pca954x *data = i2c_get_clientdata(client);
261 const struct chip_desc *chip = &chips[data->type]; 261 const struct chip_desc *chip = &chips[data->type];
@@ -279,7 +279,7 @@ static struct i2c_driver pca954x_driver = {
279 .owner = THIS_MODULE, 279 .owner = THIS_MODULE,
280 }, 280 },
281 .probe = pca954x_probe, 281 .probe = pca954x_probe,
282 .remove = __devexit_p(pca954x_remove), 282 .remove = pca954x_remove,
283 .id_table = pca954x_id, 283 .id_table = pca954x_id,
284}; 284};
285 285
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 45163693f737..97d98fbf5849 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -12,7 +12,7 @@
12 * 12 *
13 * 13 *
14 * HighPoint has its own drivers (open source except for the RAID part) 14 * HighPoint has its own drivers (open source except for the RAID part)
15 * available from http://www.highpoint-tech.com/BIOS%20+%20Driver/. 15 * available from http://www.highpoint-tech.com/USA_new/service_support.htm
16 * This may be useful to anyone wanting to work on this driver, however do not 16 * This may be useful to anyone wanting to work on this driver, however do not
17 * trust them too much since the code tends to become less and less meaningful 17 * trust them too much since the code tends to become less and less meaningful
18 * as the time passes... :-/ 18 * as the time passes... :-/
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index d81e49680c3f..808bcdcbf8e1 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -10,7 +10,6 @@
10 * Author: Mikko Ala-Fossi <maf@iki.fi> 10 * Author: Mikko Ala-Fossi <maf@iki.fi>
11 * Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl> 11 * Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl>
12 * 12 *
13 * Try: http://www.maf.iki.fi/~maf/ht6560b/
14 */ 13 */
15 14
16#define DRV_NAME "ht6560b" 15#define DRV_NAME "ht6560b"
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 7c5b01ce51d2..274798068a54 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,12 +435,11 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
435 if (!(rq->cmd_flags & REQ_FLUSH)) 435 if (!(rq->cmd_flags & REQ_FLUSH))
436 return BLKPREP_OK; 436 return BLKPREP_OK;
437 437
438 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 438 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
439 439
440 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 440 /* FIXME: map struct ide_taskfile on rq->cmd[] */
441 BUG_ON(cmd == NULL); 441 BUG_ON(cmd == NULL);
442 442
443 memset(cmd, 0, sizeof(*cmd));
444 if (ata_id_flush_ext_enabled(drive->id) && 443 if (ata_id_flush_ext_enabled(drive->id) &&
445 (drive->capacity64 >= (1UL << 28))) 444 (drive->capacity64 >= (1UL << 28)))
446 cmd->tf.command = ATA_CMD_FLUSH_EXT; 445 cmd->tf.command = ATA_CMD_FLUSH_EXT;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 89d70de5e235..6e35eccc9caa 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -16,7 +16,7 @@ config INFINIBAND_USER_MAD
16 Userspace InfiniBand Management Datagram (MAD) support. This 16 Userspace InfiniBand Management Datagram (MAD) support. This
17 is the kernel side of the userspace MAD support, which allows 17 is the kernel side of the userspace MAD support, which allows
18 userspace processes to send and receive MADs. You will also 18 userspace processes to send and receive MADs. You will also
19 need libibumad from <http://www.openib.org>. 19 need libibumad from <http://www.openfabrics.org/downloads/management/>.
20 20
21config INFINIBAND_USER_ACCESS 21config INFINIBAND_USER_ACCESS
22 tristate "InfiniBand userspace access (verbs and CM)" 22 tristate "InfiniBand userspace access (verbs and CM)"
@@ -28,7 +28,7 @@ config INFINIBAND_USER_ACCESS
28 to set up connections and directly access InfiniBand 28 to set up connections and directly access InfiniBand
29 hardware for fast-path operations. You will also need 29 hardware for fast-path operations. You will also need
30 libibverbs, libibcm and a hardware driver library from 30 libibverbs, libibcm and a hardware driver library from
31 <http://www.openib.org>. 31 <http://www.openfabrics.org/git/>.
32 32
33config INFINIBAND_USER_MEM 33config INFINIBAND_USER_MEM
34 bool 34 bool
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
index 2acec3fadf69..2b6352b85485 100644
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ b/drivers/infiniband/hw/cxgb3/Kconfig
@@ -10,7 +10,7 @@ config INFINIBAND_CXGB3
10 our website at <http://www.chelsio.com>. 10 our website at <http://www.chelsio.com>.
11 11
12 For customer support, please visit our customer support page at 12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>. 13 <http://www.chelsio.com/support.html>.
14 14
15 Please send feedback to <linux-bugs@chelsio.com>. 15 Please send feedback to <linux-bugs@chelsio.com>.
16 16
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index ccb85eaaad75..6b7e6c543534 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -10,7 +10,7 @@ config INFINIBAND_CXGB4
10 our website at <http://www.chelsio.com>. 10 our website at <http://www.chelsio.com>.
11 11
12 For customer support, please visit our customer support page at 12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>. 13 <http://www.chelsio.com/support.html>.
14 14
15 Please send feedback to <linux-bugs@chelsio.com>. 15 Please send feedback to <linux-bugs@chelsio.com>.
16 16
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index aa2be214270f..79d9971aff1f 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1723,7 +1723,7 @@ static int qib_close(struct inode *in, struct file *fp)
1723 1723
1724 mutex_lock(&qib_mutex); 1724 mutex_lock(&qib_mutex);
1725 1725
1726 fd = (struct qib_filedata *) fp->private_data; 1726 fd = fp->private_data;
1727 fp->private_data = NULL; 1727 fp->private_data = NULL;
1728 rcd = fd->rcd; 1728 rcd = fd->rcd;
1729 if (!rcd) { 1729 if (!rcd) {
@@ -1809,7 +1809,7 @@ static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1809 struct qib_ctxtdata *rcd = ctxt_fp(fp); 1809 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1810 struct qib_filedata *fd; 1810 struct qib_filedata *fd;
1811 1811
1812 fd = (struct qib_filedata *) fp->private_data; 1812 fd = fp->private_data;
1813 1813
1814 info.num_active = qib_count_active_units(); 1814 info.num_active = qib_count_active_units();
1815 info.unit = rcd->dd->unit; 1815 info.unit = rcd->dd->unit;
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index b411c51842da..d00af71a2cfc 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -9,4 +9,4 @@ config INFINIBAND_ISER
9 9
10 The iSER protocol is defined by IETF. 10 The iSER protocol is defined by IETF.
11 See <http://www.ietf.org/rfc/rfc5046.txt> 11 See <http://www.ietf.org/rfc/rfc5046.txt>
12 and <http://www.infinibandta.org/members/spec/Annex_iSER.PDF> 12 and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF>
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 0ffaf2c77a19..e68e49786483 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -521,9 +521,8 @@ static void gc_multi_process_packet(struct gc *gc)
521 * PSX support 521 * PSX support
522 * 522 *
523 * See documentation at: 523 * See documentation at:
524 * http://www.dim.com/~mackys/psxmemcard/ps-eng2.txt 524 * http://www.geocities.co.jp/Playtown/2004/psx/ps_eng.txt
525 * http://www.gamesx.com/controldata/psxcont/psxcont.htm 525 * http://www.gamesx.com/controldata/psxcont/psxcont.htm
526 * ftp://milano.usal.es/pablo/
527 * 526 *
528 */ 527 */
529 528
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 2b0eba6619bd..b09c7d127219 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -259,7 +259,7 @@ static unsigned short keymap_usbph01(int scancode)
259 259
260/* 260/*
261 * Keymap for ATCom AU-100 261 * Keymap for ATCom AU-100
262 * http://www.atcom.cn/En_products_AU100.html 262 * http://www.atcom.cn/products.html
263 * http://www.packetizer.com/products/au100/ 263 * http://www.packetizer.com/products/au100/
264 * http://www.voip-info.org/wiki/view/AU-100 264 * http://www.voip-info.org/wiki/view/AU-100
265 * 265 *
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index c714ca2407f8..bf5fd7f6a313 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -30,6 +30,7 @@ config MOUSE_PS2
30 <http://w1.894.telia.com/~u89404340/touchpad/index.html> 30 <http://w1.894.telia.com/~u89404340/touchpad/index.html>
31 and a new version of GPM at: 31 and a new version of GPM at:
32 <http://www.geocities.com/dt_or/gpm/gpm.html> 32 <http://www.geocities.com/dt_or/gpm/gpm.html>
33 <http://xorg.freedesktop.org/archive/individual/driver/>
33 to take advantage of the advanced features of the touchpad. 34 to take advantage of the advanced features of the touchpad.
34 35
35 If unsure, say Y. 36 If unsure, say Y.
diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
index 88121c59c3cc..1fd8f5e192f9 100644
--- a/drivers/input/mouse/touchkit_ps2.c
+++ b/drivers/input/mouse/touchkit_ps2.c
@@ -21,8 +21,8 @@
21 * 21 *
22 * Based upon touchkitusb.c 22 * Based upon touchkitusb.c
23 * 23 *
24 * Vendor documentation is available in support section of: 24 * Vendor documentation is available at:
25 * http://www.egalax.com.tw/ 25 * http://home.eeti.com.tw/web20/drivers/Software%20Programming%20Guide_v2.0.pdf
26 */ 26 */
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/mk712.c b/drivers/input/touchscreen/mk712.c
index efd3aebaba5f..36e57deacd03 100644
--- a/drivers/input/touchscreen/mk712.c
+++ b/drivers/input/touchscreen/mk712.c
@@ -17,7 +17,7 @@
17 * found in Gateway AOL Connected Touchpad computers. 17 * found in Gateway AOL Connected Touchpad computers.
18 * 18 *
19 * Documentation for ICS MK712 can be found at: 19 * Documentation for ICS MK712 can be found at:
20 * http://www.icst.com/pdf/mk712.pdf 20 * http://www.idt.com/products/getDoc.cfm?docID=18713923
21 */ 21 */
22 22
23/* 23/*
diff --git a/drivers/isdn/i4l/isdn_audio.c b/drivers/isdn/i4l/isdn_audio.c
index 861bdf3421f2..d5013935ac62 100644
--- a/drivers/isdn/i4l/isdn_audio.c
+++ b/drivers/isdn/i4l/isdn_audio.c
@@ -439,7 +439,7 @@ isdn_audio_xlaw2adpcm(adpcm_state * s, int fmt, unsigned char *in,
439 439
440/* 440/*
441 * Goertzel algorithm. 441 * Goertzel algorithm.
442 * See http://ptolemy.eecs.berkeley.edu/~pino/Ptolemy/papers/96/dtmf_ict/ 442 * See http://ptolemy.eecs.berkeley.edu/papers/96/dtmf_ict/
443 * for more info. 443 * for more info.
444 * Result is stored into an sk_buff and queued up for later 444 * Result is stored into an sk_buff and queued up for later
445 * evaluation. 445 * evaluation.
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index d0d221332db0..9e3e2c566598 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * Copyright (C) 2003, 2004 Colin Leroy, Rasmus Rohde, Benjamin Herrenschmidt 4 * Copyright (C) 2003, 2004 Colin Leroy, Rasmus Rohde, Benjamin Herrenschmidt
5 * 5 *
6 * Documentation from 6 * Documentation from 115254175ADT7467_pra.pdf and 3686221171167ADT7460_b.pdf
7 * http://www.analog.com/UploadedFiles/Data_Sheets/115254175ADT7467_pra.pdf 7 * http://www.onsemi.com/PowerSolutions/product.do?id=ADT7467
8 * http://www.analog.com/UploadedFiles/Data_Sheets/3686221171167ADT7460_b.pdf 8 * http://www.onsemi.com/PowerSolutions/product.do?id=ADT7460
9 * 9 *
10 */ 10 */
11 11
diff --git a/drivers/media/IR/keymaps/rc-manli.c b/drivers/media/IR/keymaps/rc-manli.c
index 1e9fbfa90a1e..0f590b3d01c0 100644
--- a/drivers/media/IR/keymaps/rc-manli.c
+++ b/drivers/media/IR/keymaps/rc-manli.c
@@ -13,7 +13,6 @@
13#include <media/rc-map.h> 13#include <media/rc-map.h>
14 14
15/* Michael Tokarev <mjt@tls.msk.ru> 15/* Michael Tokarev <mjt@tls.msk.ru>
16 http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
17 keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at 16 keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
18 least, and probably other cards too. 17 least, and probably other cards too.
19 The "ascii-art picture" below (in comments, first row 18 The "ascii-art picture" below (in comments, first row
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 893fbc57c72f..a12b88f53ed9 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -26,7 +26,7 @@
26 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 26 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
27 * 27 *
28 * 28 *
29 * the project's page is at http://www.linuxtv.org/dvb/ 29 * the project's page is at http://www.linuxtv.org/
30 */ 30 */
31 31
32 32
@@ -2291,12 +2291,7 @@ static int frontend_init(struct av7110 *av7110)
2291/* Budgetpatch note: 2291/* Budgetpatch note:
2292 * Original hardware design by Roberto Deza: 2292 * Original hardware design by Roberto Deza:
2293 * There is a DVB_Wiki at 2293 * There is a DVB_Wiki at
2294 * http://212.227.36.83/linuxtv/wiki/index.php/Main_Page 2294 * http://www.linuxtv.org/
2295 * where is described this 'DVB TT Budget Patch', on Card Modding:
2296 * http://212.227.36.83/linuxtv/wiki/index.php/DVB_TT_Budget_Patch
2297 * On the short description there is also a link to a external file,
2298 * with more details:
2299 * http://perso.wanadoo.es/jesussolano/Ttf_tsc1.zip
2300 * 2295 *
2301 * New software triggering design by Emard that works on 2296 * New software triggering design by Emard that works on
2302 * original Roberto Deza's hardware: 2297 * original Roberto Deza's hardware:
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 6ef3996565ad..244d5d51f5f9 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -25,7 +25,7 @@
25 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 25 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
26 * 26 *
27 * 27 *
28 * the project's page is at http://www.linuxtv.org/dvb/ 28 * the project's page is at http://www.linuxtv.org/
29 */ 29 */
30 30
31#include <linux/types.h> 31#include <linux/types.h>
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 43f61f2eca98..122c72806916 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -25,7 +25,7 @@
25 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 25 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
26 * 26 *
27 * 27 *
28 * the project's page is at http://www.linuxtv.org/dvb/ 28 * the project's page is at http://www.linuxtv.org/
29 */ 29 */
30 30
31#include <linux/kernel.h> 31#include <linux/kernel.h>
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index e162691b515d..f1cbfe526989 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -22,7 +22,7 @@
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 23 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
24 * 24 *
25 * the project's page is at http://www.linuxtv.org/dvb/ 25 * the project's page is at http://www.linuxtv.org/
26 */ 26 */
27 27
28/* for debugging ARM communication: */ 28/* for debugging ARM communication: */
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 8986d967d2f4..ac20c5bbfa43 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -22,7 +22,7 @@
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 23 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
24 * 24 *
25 * the project's page is at http://www.linuxtv.org/dvb/ 25 * the project's page is at http://www.linuxtv.org/
26 */ 26 */
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 983672aa2450..97afc01f60d0 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -30,7 +30,7 @@
30 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 30 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
31 * 31 *
32 * 32 *
33 * the project's page is at http://www.linuxtv.org/dvb/ 33 * the project's page is at http://www.linuxtv.org/
34 */ 34 */
35 35
36#include "budget.h" 36#include "budget.h"
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 13ac9e3ab121..a9c2c326df4b 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -26,7 +26,7 @@
26 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 26 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
27 * 27 *
28 * 28 *
29 * the project's page is at http://www.linuxtv.org/dvb/ 29 * the project's page is at http://www.linuxtv.org/
30 */ 30 */
31 31
32#include <linux/module.h> 32#include <linux/module.h>
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index ba18e56d5f11..054661315311 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -31,7 +31,7 @@
31 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 31 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
32 * 32 *
33 * 33 *
34 * the project's page is at http://www.linuxtv.org/dvb/ 34 * the project's page is at http://www.linuxtv.org/
35 */ 35 */
36 36
37 37
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index 9c92f9ddd223..579835590690 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -27,7 +27,7 @@
27 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 27 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
28 * 28 *
29 * 29 *
30 * the project's page is at http://www.linuxtv.org/dvb/ 30 * the project's page is at http://www.linuxtv.org/
31 */ 31 */
32 32
33#include "av7110.h" 33#include "av7110.h"
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 874a10a9d493..d238fb9371a7 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -31,7 +31,7 @@
31 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 31 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
32 * 32 *
33 * 33 *
34 * the project's page is at http://www.linuxtv.org/dvb/ 34 * the project's page is at http://www.linuxtv.org/
35 */ 35 */
36 36
37#include "budget.h" 37#include "budget.h"
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 4349213b403b..255d40df4b46 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -13,7 +13,7 @@
13 * anybody does please mail me. 13 * anybody does please mail me.
14 * 14 *
15 * For the pdf file see: 15 * For the pdf file see:
16 * http://www.semiconductors.philips.com/pip/TEA5757H/V1 16 * http://www.nxp.com/acrobat_download2/expired_datasheets/TEA5757_5759_3.pdf
17 * 17 *
18 * 18 *
19 * CHANGES: 19 * CHANGES:
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 03439282dfce..b1f630527dc1 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -1,9 +1,6 @@
1/* Typhoon Radio Card driver for radio support 1/* Typhoon Radio Card driver for radio support
2 * (c) 1999 Dr. Henrik Seidel <Henrik.Seidel@gmx.de> 2 * (c) 1999 Dr. Henrik Seidel <Henrik.Seidel@gmx.de>
3 * 3 *
4 * Card manufacturer:
5 * http://194.18.155.92/idc/prod2.idc?nr=50753&lang=e
6 *
7 * Notes on the hardware 4 * Notes on the hardware
8 * 5 *
9 * This card has two output sockets, one for speakers and one for line. 6 * This card has two output sockets, one for speakers and one for line.
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f6e4d0475351..d000522cb0f4 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -978,7 +978,7 @@ config USB_STKWEBCAM
978 Supported devices are typically found in some Asus laptops, 978 Supported devices are typically found in some Asus laptops,
979 with USB id 174f:a311 and 05e1:0501. Other Syntek cameras 979 with USB id 174f:a311 and 05e1:0501. Other Syntek cameras
980 may be supported by the stk11xx driver, from which this is 980 may be supported by the stk11xx driver, from which this is
981 derived, see http://stk11xx.sourceforge.net 981 derived, see <http://sourceforge.net/projects/syntekdriver/>
982 982
983 To compile this driver as a module, choose M here: the 983 To compile this driver as a module, choose M here: the
984 module will be called stkwebcam. 984 module will be called stkwebcam.
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index be35e6965829..9536f1a40dd2 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -4,7 +4,7 @@
4 * sensor. 4 * sensor.
5 * 5 *
6 * The data sheet for this device can be found at: 6 * The data sheet for this device can be found at:
7 * http://www.marvell.com/products/pcconn/88ALP01.jsp 7 * http://www.marvell.com/products/pc_connectivity/88alp01/
8 * 8 *
9 * Copyright 2006 One Laptop Per Child Association, Inc. 9 * Copyright 2006 One Laptop Per Child Association, Inc.
10 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net> 10 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 6b805afe5d20..fe1090940b01 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -39,7 +39,7 @@ static struct cx18_card_tuner_i2c cx18_i2c_std = {
39 .tv = { 0x61, 0x60, I2C_CLIENT_END }, 39 .tv = { 0x61, 0x60, I2C_CLIENT_END },
40}; 40};
41 41
42/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii 42/* Please add new PCI IDs to: http://pci-ids.ucw.cz/
43 This keeps the PCI ID database up to date. Note that the entries 43 This keeps the PCI ID database up to date. Note that the entries
44 must be added under vendor 0x4444 (Conexant) as subsystem IDs. 44 must be added under vendor 0x4444 (Conexant) as subsystem IDs.
45 New vendor IDs should still be added to the vendor ID list. */ 45 New vendor IDs should still be added to the vendor ID list. */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index abd64e89f60f..53a67824071b 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -7,7 +7,7 @@
7 * (c) 2008 Steven Toth <stoth@linuxtv.org> 7 * (c) 2008 Steven Toth <stoth@linuxtv.org>
8 * - CX23885/7/8 support 8 * - CX23885/7/8 support
9 * 9 *
10 * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/), 10 * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e46e1ceef72c..660b2a927feb 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -9,7 +9,7 @@
9 * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> 9 * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
10 * - video_ioctl2 conversion 10 * - video_ioctl2 conversion
11 * 11 *
12 * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/), 12 * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/ivtv/ivtv-cards.c b/drivers/media/video/ivtv/ivtv-cards.c
index ca1fd3227a93..87afbbee2063 100644
--- a/drivers/media/video/ivtv/ivtv-cards.c
+++ b/drivers/media/video/ivtv/ivtv-cards.c
@@ -65,7 +65,7 @@ static struct ivtv_card_tuner_i2c ivtv_i2c_tda8290 = {
65 65
66/********************** card configuration *******************************/ 66/********************** card configuration *******************************/
67 67
68/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii 68/* Please add new PCI IDs to: http://pci-ids.ucw.cz/
69 This keeps the PCI ID database up to date. Note that the entries 69 This keeps the PCI ID database up to date. Note that the entries
70 must be added under vendor 0x4444 (Conexant) as subsystem IDs. 70 must be added under vendor 0x4444 (Conexant) as subsystem IDs.
71 New vendor IDs should still be added to the vendor ID list. */ 71 New vendor IDs should still be added to the vendor ID list. */
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index ef0c8178f255..b1dbcf1d2bcb 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -3,7 +3,7 @@
3 3
4 Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de> 4 Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de>
5 5
6 Visit http://www.mihu.de/linux/saa7146/mxb/ 6 Visit http://www.themm.net/~mihu/linux/saa7146/mxb.html
7 for further details about this card. 7 for further details about this card.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/video/sn9c102/sn9c102_pas202bcb.c b/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
index 2782f94cf6f8..2e86fdc86989 100644
--- a/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
+++ b/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
@@ -4,7 +4,6 @@
4 * * 4 * *
5 * Copyright (C) 2004 by Carlos Eduardo Medaglia Dyonisio * 5 * Copyright (C) 2004 by Carlos Eduardo Medaglia Dyonisio *
6 * <medaglia@undl.org.br> * 6 * <medaglia@undl.org.br> *
7 * http://cadu.homelinux.com:8080/ *
8 * * 7 * *
9 * Support for SN9C103, DAC Magnitude, exposure and green gain controls * 8 * Support for SN9C103, DAC Magnitude, exposure and green gain controls *
10 * added by Luca Risolia <luca.risolia@studio.unibo.it> * 9 * added by Luca Risolia <luca.risolia@studio.unibo.it> *
diff --git a/drivers/media/video/zoran/videocodec.h b/drivers/media/video/zoran/videocodec.h
index 5c27b251354e..b654bfff8740 100644
--- a/drivers/media/video/zoran/videocodec.h
+++ b/drivers/media/video/zoran/videocodec.h
@@ -56,7 +56,7 @@
56 the slave is bound to it). Otherwise it doesn't need this functions and 56 the slave is bound to it). Otherwise it doesn't need this functions and
57 therfor they may not be initialized. 57 therfor they may not be initialized.
58 58
59 The other fuctions are just for convenience, as they are for sure used by 59 The other functions are just for convenience, as they are for sure used by
60 most/all of the codecs. The last ones may be ommited, too. 60 most/all of the codecs. The last ones may be ommited, too.
61 61
62 See the structure declaration below for more information and which data has 62 See the structure declaration below for more information and which data has
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 6f89d0a096ea..3c471a4e3e4a 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1177,7 +1177,7 @@ static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height
1177 if (height > BUZ_MAX_HEIGHT) 1177 if (height > BUZ_MAX_HEIGHT)
1178 height = BUZ_MAX_HEIGHT; 1178 height = BUZ_MAX_HEIGHT;
1179 1179
1180 /* Check for vaild parameters */ 1180 /* Check for invalid parameters */
1181 if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT || 1181 if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT ||
1182 width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) { 1182 width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) {
1183 dprintk(1, 1183 dprintk(1,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index db2fbe2d4146..1f69743b12ec 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -112,8 +112,8 @@ config IBM_ASM
112 112
113 WARNING: This software may not be supported or function 113 WARNING: This software may not be supported or function
114 correctly on your IBM server. Please consult the IBM ServerProven 114 correctly on your IBM server. Please consult the IBM ServerProven
115 website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for 115 website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
116 information on the specific driver level and support statement 116 for information on the specific driver level and support statement
117 for your IBM server. 117 for your IBM server.
118 118
119config PHANTOM 119config PHANTOM
@@ -385,7 +385,7 @@ config BMP085
385 depends on I2C && SYSFS 385 depends on I2C && SYSFS
386 help 386 help
387 If you say yes here you get support for the Bosch Sensortec 387 If you say yes here you get support for the Bosch Sensortec
388 BMP086 digital pressure sensor. 388 BMP085 digital pressure sensor.
389 389
390 To compile this driver as a module, choose M here: the 390 To compile this driver as a module, choose M here: the
391 module will be called bmp085. 391 module will be called bmp085.
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3e6c47bdce53..ba29d2f0ffd7 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -418,8 +418,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
418 418
419 /* 419 /*
420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
421 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19 421 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
422 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf 422 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
424 */ 424 */
425 if (extp->MajorVersion != '1' || 425 if (extp->MajorVersion != '1' ||
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index f4359fe7150f..caf604167f03 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -17,7 +17,7 @@
17 * - January 2000 17 * - January 2000
18 * 18 *
19 * [2] MTD internal API documentation 19 * [2] MTD internal API documentation
20 * - http://www.linux-mtd.infradead.org/tech/ 20 * - http://www.linux-mtd.infradead.org/
21 * 21 *
22 * Limitations: 22 * Limitations:
23 * 23 *
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 4d6a64c387ec..037b399df3f1 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -51,7 +51,7 @@
51 51
52 Use of the FTL format for non-PCMCIA applications may be an 52 Use of the FTL format for non-PCMCIA applications may be an
53 infringement of these patents. For additional information, 53 infringement of these patents. For additional information,
54 contact M-Systems (http://www.m-sys.com) directly. 54 contact M-Systems directly. M-Systems since acquired by Sandisk.
55 55
56======================================================================*/ 56======================================================================*/
57#include <linux/mtd/blktrans.h> 57#include <linux/mtd/blktrans.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 701d942c6795..962212628f6e 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -172,7 +172,7 @@ config MTD_OCTAGON
172 This provides a 'mapping' driver which supports the way in which 172 This provides a 'mapping' driver which supports the way in which
173 the flash chips are connected in the Octagon-5066 Single Board 173 the flash chips are connected in the Octagon-5066 Single Board
174 Computer. More information on the board is available at 174 Computer. More information on the board is available at
175 <http://www.octagonsystems.com/CPUpages/5066.html>. 175 <http://www.octagonsystems.com/products/5066.aspx>.
176 176
177config MTD_VMAX 177config MTD_VMAX
178 tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301" 178 tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301"
@@ -284,7 +284,7 @@ config MTD_TQM8XXL
284 chips, currently uses AMD one. This 'mapping' driver supports 284 chips, currently uses AMD one. This 'mapping' driver supports
285 that arrangement, allowing the CFI probe and command set driver 285 that arrangement, allowing the CFI probe and command set driver
286 code to communicate with the chips on the TQM8xxL board. More at 286 code to communicate with the chips on the TQM8xxL board. More at
287 <http://www.denx.de/embedded-ppc-en.html>. 287 <http://www.denx.de/wiki/PPCEmbedded/>.
288 288
289config MTD_RPXLITE 289config MTD_RPXLITE
290 tristate "CFI Flash device mapped on RPX Lite or CLLF" 290 tristate "CFI Flash device mapped on RPX Lite or CLLF"
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index db1dfc5a1b11..e06c8983978e 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -2,7 +2,7 @@
2 * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01 2 * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
3 * 3 *
4 * The data sheet for this device can be found at: 4 * The data sheet for this device can be found at:
5 * http://www.marvell.com/products/pcconn/88ALP01.jsp 5 * http://wiki.laptop.org/go/Datasheets
6 * 6 *
7 * Copyright © 2006 Red Hat, Inc. 7 * Copyright © 2006 Red Hat, Inc.
8 * Copyright © 2006 David Woodhouse <dwmw2@infradead.org> 8 * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7ca1fc8a3a76..77c1fab7d774 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -924,7 +924,7 @@ config SMC91X
924 including the SMC91C94 and the SMC91C111. Say Y if you want it 924 including the SMC91C94 and the SMC91C111. Say Y if you want it
925 compiled into the kernel, and read the file 925 compiled into the kernel, and read the file
926 <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO, 926 <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
927 available from <http://www.linuxdoc.org/docs.html#howto>. 927 available from <http://www.tldp.org/docs.html#howto>.
928 928
929 This driver is also available as a module ( = code which can be 929 This driver is also available as a module ( = code which can be
930 inserted in and removed from the running kernel whenever you want). 930 inserted in and removed from the running kernel whenever you want).
@@ -1034,7 +1034,7 @@ config SMC911X
1034 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 1034 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
1035 Say Y if you want it compiled into the kernel, 1035 Say Y if you want it compiled into the kernel,
1036 and read the Ethernet-HOWTO, available from 1036 and read the Ethernet-HOWTO, available from
1037 <http://www.linuxdoc.org/docs.html#howto>. 1037 <http://www.tldp.org/docs.html#howto>.
1038 1038
1039 This driver is also available as a module. The module will be 1039 This driver is also available as a module. The module will be
1040 called smc911x. If you want to compile it as a module, say M 1040 called smc911x. If you want to compile it as a module, say M
@@ -1516,7 +1516,7 @@ config E100
1516 1516
1517 For the latest Intel PRO/100 network driver for Linux, see: 1517 For the latest Intel PRO/100 network driver for Linux, see:
1518 1518
1519 <http://appsr.intel.com/scripts-df/support_intel.asp> 1519 <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
1520 1520
1521 More specific information on configuring the driver is in 1521 More specific information on configuring the driver is in
1522 <file:Documentation/networking/e100.txt>. 1522 <file:Documentation/networking/e100.txt>.
@@ -1542,9 +1542,8 @@ config FEALNX
1542 select CRC32 1542 select CRC32
1543 select MII 1543 select MII
1544 help 1544 help
1545 Say Y here to support the Mysom MTD-800 family of PCI-based Ethernet 1545 Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
1546 cards. Specifications and data at 1546 cards. <http://www.myson.com.tw/>
1547 <http://www.myson.com.hk/mtd/datasheet/>.
1548 1547
1549config NATSEMI 1548config NATSEMI
1550 tristate "National Semiconductor DP8381x series PCI Ethernet support" 1549 tristate "National Semiconductor DP8381x series PCI Ethernet support"
@@ -1718,7 +1717,7 @@ config SMSC9420
1718 This is a driver for SMSC's LAN9420 PCI ethernet adapter. 1717 This is a driver for SMSC's LAN9420 PCI ethernet adapter.
1719 Say Y if you want it compiled into the kernel, 1718 Say Y if you want it compiled into the kernel,
1720 and read the Ethernet-HOWTO, available from 1719 and read the Ethernet-HOWTO, available from
1721 <http://www.linuxdoc.org/docs.html#howto>. 1720 <http://www.tldp.org/docs.html#howto>.
1722 1721
1723 This driver is also available as a module. The module will be 1722 This driver is also available as a module. The module will be
1724 called smsc9420. If you want to compile it as a module, say M 1723 called smsc9420. If you want to compile it as a module, say M
@@ -2565,7 +2564,7 @@ config CHELSIO_T1
2565 our website at <http://www.chelsio.com>. 2564 our website at <http://www.chelsio.com>.
2566 2565
2567 For customer support, please visit our customer support page at 2566 For customer support, please visit our customer support page at
2568 <http://www.chelsio.com/support.htm>. 2567 <http://www.chelsio.com/support.html>.
2569 2568
2570 Please send feedback to <linux-bugs@chelsio.com>. 2569 Please send feedback to <linux-bugs@chelsio.com>.
2571 2570
@@ -2597,7 +2596,7 @@ config CHELSIO_T3
2597 our website at <http://www.chelsio.com>. 2596 our website at <http://www.chelsio.com>.
2598 2597
2599 For customer support, please visit our customer support page at 2598 For customer support, please visit our customer support page at
2600 <http://www.chelsio.com/support.htm>. 2599 <http://www.chelsio.com/support.html>.
2601 2600
2602 Please send feedback to <linux-bugs@chelsio.com>. 2601 Please send feedback to <linux-bugs@chelsio.com>.
2603 2602
@@ -2622,7 +2621,7 @@ config CHELSIO_T4
2622 our website at <http://www.chelsio.com>. 2621 our website at <http://www.chelsio.com>.
2623 2622
2624 For customer support, please visit our customer support page at 2623 For customer support, please visit our customer support page at
2625 <http://www.chelsio.com/support.htm>. 2624 <http://www.chelsio.com/support.html>.
2626 2625
2627 Please send feedback to <linux-bugs@chelsio.com>. 2626 Please send feedback to <linux-bugs@chelsio.com>.
2628 2627
@@ -2645,7 +2644,7 @@ config CHELSIO_T4VF
2645 our website at <http://www.chelsio.com>. 2644 our website at <http://www.chelsio.com>.
2646 2645
2647 For customer support, please visit our customer support page at 2646 For customer support, please visit our customer support page at
2648 <http://www.chelsio.com/support.htm>. 2647 <http://www.chelsio.com/support.html>.
2649 2648
2650 Please send feedback to <linux-bugs@chelsio.com>. 2649 Please send feedback to <linux-bugs@chelsio.com>.
2651 2650
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 20f97e7017ce..0b376a990972 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -19,7 +19,7 @@ config ATALK
19 19
20 General information about how to connect Linux, Windows machines and 20 General information about how to connect Linux, Windows machines and
21 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. The 21 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. The
22 NET-3-HOWTO, available from 22 NET3-4-HOWTO, available from
23 <http://www.tldp.org/docs.html#howto>, contains valuable 23 <http://www.tldp.org/docs.html#howto>, contains valuable
24 information as well. 24 information as well.
25 25
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index dfd96b20547f..f3459798b0e9 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -68,7 +68,7 @@ static int xcvr[NUM_UNITS]; /* The data transfer mode. */
68 68
69 In 1997 Realtek made available the documentation for the second generation 69 In 1997 Realtek made available the documentation for the second generation
70 RTL8012 chip, which has lead to several driver improvements. 70 RTL8012 chip, which has lead to several driver improvements.
71 http://www.realtek.com.tw/cn/cn.html 71 http://www.realtek.com.tw/
72 72
73 Theory of Operation 73 Theory of Operation
74 74
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 44c0694c1f4e..91b3846ffc8a 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1487,7 +1487,7 @@ static void __init depca_platform_probe (void)
1487 if (!pldev->dev.driver) { 1487 if (!pldev->dev.driver) {
1488 /* The driver was not bound to this device, there was 1488 /* The driver was not bound to this device, there was
1489 * no hardware at this address. Unregister it, as the 1489 * no hardware at this address. Unregister it, as the
1490 * release fuction will take care of freeing the 1490 * release function will take care of freeing the
1491 * allocated structure */ 1491 * allocated structure */
1492 1492
1493 depca_io_ports[i].device = NULL; 1493 depca_io_ports[i].device = NULL;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 32543a300b81..aa56963ad558 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -131,8 +131,8 @@ IIIa. Ring buffers
131 131
132IVb. References 132IVb. References
133 133
134http://www.smsc.com/main/tools/discontinued/83c171.pdf 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135http://www.smsc.com/main/tools/discontinued/83c175.pdf 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136http://scyld.com/expert/NWay.html 136http://scyld.com/expert/NWay.html
137http://www.national.com/pf/DP/DP83840A.html 137http://www.national.com/pf/DP/DP83840A.html
138 138
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 62d5d5cfd6a6..95dbcfdf131d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -73,7 +73,7 @@ config DMASCC
73 certain parameters, such as channel access timing, clock mode, and 73 certain parameters, such as channel access timing, clock mode, and
74 DMA channel. This is accomplished with a small utility program, 74 DMA channel. This is accomplished with a small utility program,
75 dmascc_cfg, available at 75 dmascc_cfg, available at
76 <http://cacofonix.nt.tuwien.ac.at/~oe1kib/Linux/>. Please be sure to 76 <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to
77 get at least version 1.27 of dmascc_cfg, as older versions will not 77 get at least version 1.27 of dmascc_cfg, as older versions will not
78 work with the current driver. 78 work with the current driver.
79 79
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 0037a696cd0a..94d9969ec0bb 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -23,7 +23,7 @@ paper sources:
23 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0 23 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
24 Document Number SC30-3661-00' by IBM for info on the adapter itself 24 Document Number SC30-3661-00' by IBM for info on the adapter itself
25 25
26 Also see http://www.natsemi.com/ 26 Also see http://www.national.com/analog
27 27
28special acknowledgements to: 28special acknowledgements to:
29 - Bob Eager for helping me out with documentation from IBM 29 - Bob Eager for helping me out with documentation from IBM
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 75155a27fdde..14db09e2fa8b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3540,7 +3540,7 @@ enum latency_range {
3540 * Stores a new ITR value based on strictly on packet size. This 3540 * Stores a new ITR value based on strictly on packet size. This
3541 * algorithm is less sophisticated than that used in igb_update_itr, 3541 * algorithm is less sophisticated than that used in igb_update_itr,
3542 * due to the difficulty of synchronizing statistics across multiple 3542 * due to the difficulty of synchronizing statistics across multiple
3543 * receive rings. The divisors and thresholds used by this fuction 3543 * receive rings. The divisors and thresholds used by this function
3544 * were determined based on theoretical maximum wire speed and testing 3544 * were determined based on theoretical maximum wire speed and testing
3545 * data, in order to minimize response time while increasing bulk 3545 * data, in order to minimize response time while increasing bulk
3546 * throughput. 3546 * throughput.
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index a3cb109006a5..92631eb6f6a3 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -142,7 +142,7 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
142 * Function ali_ircc_init () 142 * Function ali_ircc_init ()
143 * 143 *
144 * Initialize chip. Find out whay kinds of chips we are dealing with 144 * Initialize chip. Find out whay kinds of chips we are dealing with
145 * and their configuation registers address 145 * and their configuration registers address
146 */ 146 */
147static int __init ali_ircc_init(void) 147static int __init ali_ircc_init(void)
148{ 148{
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 36c3060411d2..4dc39e5f0156 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -54,7 +54,7 @@
54/* anyone who has. HOWEVER the chip bears a striking resemblence */ 54/* anyone who has. HOWEVER the chip bears a striking resemblence */
55/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */ 55/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
56/* the documentation for this is freely available at */ 56/* the documentation for this is freely available at */
57/* http://www.toshiba.com/taec/components/Generic/TMPR3922.shtml */ 57/* http://www.madingley.org/james/resources/toshoboe/TMPR3922.pdf */
58/* The mapping between the registers in that document and the */ 58/* The mapping between the registers in that document and the */
59/* Registers in the 701 oboe chip are as follows */ 59/* Registers in the 701 oboe chip are as follows */
60 60
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 8dd03439d994..1766dc4f07e1 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -78,7 +78,7 @@ that almost all frames will need to be copied to an alignment buffer.
78 78
79IVb. References 79IVb. References
80 80
81http://www.realtek.com.tw/cn/cn.html 81http://www.realtek.com.tw/
82http://www.scyld.com/expert/NWay.html 82http://www.scyld.com/expert/NWay.html
83 83
84IVc. Errata 84IVc. Errata
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 2807a0fcadc4..321b12f82645 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -62,7 +62,7 @@ invalid ramWidth is Very Bad.
62V. References 62V. References
63 63
64http://www.scyld.com/expert/NWay.html 64http://www.scyld.com/expert/NWay.html
65http://www.national.com/pf/DP/DP83840.html 65http://www.national.com/opf/DP/DP83840A.html
66 66
67Thanks to Terry Murphy of 3Com for providing development information for 67Thanks to Terry Murphy of 3Com for providing development information for
68earlier 3Com products. 68earlier 3Com products.
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5526ab4895e6..5ecfa4b1e758 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -642,7 +642,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
642 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0); 642 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
643 if (status) 643 if (status)
644 dev_err(ctodev(card), 644 dev_err(ctodev(card),
645 "lv1_net_stop_rx_dma faild, %d\n", status); 645 "lv1_net_stop_rx_dma failed, %d\n", status);
646} 646}
647 647
648/** 648/**
@@ -660,7 +660,7 @@ static inline void gelic_card_disable_txdmac(struct gelic_card *card)
660 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0); 660 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
661 if (status) 661 if (status)
662 dev_err(ctodev(card), 662 dev_err(ctodev(card),
663 "lv1_net_stop_tx_dma faild, status=%d\n", status); 663 "lv1_net_stop_tx_dma failed, status=%d\n", status);
664} 664}
665 665
666/** 666/**
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index d8249d7653c6..d96d2f7a3f14 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -95,7 +95,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
95#include <asm/sibyte/sb1250_regs.h> 95#include <asm/sibyte/sb1250_regs.h>
96#include <asm/sibyte/sb1250_int.h> 96#include <asm/sibyte/sb1250_int.h>
97#else 97#else
98#error invalid SiByte MAC configuation 98#error invalid SiByte MAC configuration
99#endif 99#endif
100#include <asm/sibyte/sb1250_scd.h> 100#include <asm/sibyte/sb1250_scd.h>
101#include <asm/sibyte/sb1250_mac.h> 101#include <asm/sibyte/sb1250_mac.h>
@@ -106,7 +106,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
106#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 106#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
107#define UNIT_INT(n) (K_INT_MAC_0 + (n)) 107#define UNIT_INT(n) (K_INT_MAC_0 + (n))
108#else 108#else
109#error invalid SiByte MAC configuation 109#error invalid SiByte MAC configuration
110#endif 110#endif
111 111
112#ifdef K_INT_PHY 112#ifdef K_INT_PHY
@@ -1568,7 +1568,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1568 M_MAC_RX_ENABLE | 1568 M_MAC_RX_ENABLE |
1569 M_MAC_TX_ENABLE, s->sbm_macenable); 1569 M_MAC_TX_ENABLE, s->sbm_macenable);
1570#else 1570#else
1571#error invalid SiByte MAC configuation 1571#error invalid SiByte MAC configuration
1572#endif 1572#endif
1573 1573
1574#ifdef CONFIG_SBMAC_COALESCE 1574#ifdef CONFIG_SBMAC_COALESCE
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 31b92f5f32cb..417adf372828 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -15,7 +15,7 @@
15 * Rewritten for 2.6 by Cesar Eduardo Barros 15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 * 16 *
17 * A datasheet for this chip can be found at 17 * A datasheet for this chip can be found at
18 * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf 18 * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
19 */ 19 */
20 20
21/* Note about set_mac_address: I don't know how to change the hardware 21/* Note about set_mac_address: I don't know how to change the hardware
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c
index e6baa53307c7..c0798fd2ca69 100644
--- a/drivers/net/skfp/hwt.c
+++ b/drivers/net/skfp/hwt.c
@@ -221,7 +221,7 @@ u_long hwt_quick_read(struct s_smc *smc)
221 * para start start time 221 * para start start time
222 * duration time to wait 222 * duration time to wait
223 * 223 *
224 * NOTE: The fuction will return immediately, if the timer is not 224 * NOTE: The function will return immediately, if the timer is not
225 * started 225 * started
226 ************************/ 226 ************************/
227void hwt_wait_time(struct s_smc *smc, u_long start, long int duration) 227void hwt_wait_time(struct s_smc *smc, u_long start, long int duration)
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index ba2e8339fe90..0a66fed52e8e 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -33,7 +33,7 @@
33 * The driver architecture is based on the DEC FDDI driver by 33 * The driver architecture is based on the DEC FDDI driver by
34 * Lawrence V. Stefani and several ethernet drivers. 34 * Lawrence V. Stefani and several ethernet drivers.
35 * I also used an existing Windows NT miniport driver. 35 * I also used an existing Windows NT miniport driver.
36 * All hardware dependent fuctions are handled by the SysKonnect 36 * All hardware dependent functions are handled by the SysKonnect
37 * Hardware Module. 37 * Hardware Module.
38 * The only headerfiles that are directly related to this source 38 * The only headerfiles that are directly related to this source
39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h. 39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ec8c804a795d..f8e463cd8ecc 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -78,7 +78,7 @@
78 * - Updated tlan.txt accordingly. 78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length. 79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at 80 * - There is now a TLAN website up at
81 * http://tlan.kernel.dk 81 * http://hp.sourceforge.net/
82 * 82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now 83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald 84 * reports PHY information when used with Donald
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index c83f4f6e39e1..663b8860a531 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -5,7 +5,7 @@
5 * Originally sktr.c: Written 1997 by Christoph Goos 5 * Originally sktr.c: Written 1997 by Christoph Goos
6 * 6 *
7 * A fine result of the Linux Systems Network Architecture Project. 7 * A fine result of the Linux Systems Network Architecture Project.
8 * http://www.linux-sna.org 8 * http://www.vanheusden.com/sna/
9 * 9 *
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference. 11 * of the GNU General Public License, incorporated herein by reference.
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index f3035951422f..1f8d4a8d8ea4 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -151,7 +151,7 @@ config ULI526X
151 select CRC32 151 select CRC32
152 ---help--- 152 ---help---
153 This driver is for ULi M5261/M5263 10/100M Ethernet Controller 153 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
154 (<http://www.uli.com.tw/>). 154 (<http://www.nvidia.com/page/uli_drivers.html>).
155 155
156 To compile this driver as a module, choose M here. The module will 156 To compile this driver as a module, choose M here. The module will
157 be called uli526x. 157 be called uli526x.
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index b8197666021e..4690c8e69207 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -59,7 +59,7 @@
59 * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate) 59 * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
60 * Bit 3 - Autopolarity state 60 * Bit 3 - Autopolarity state
61 * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed 61 * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
62 * Bit 1 - LS100B - link state of 100baseT 0 - good, 1- faild 62 * Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed
63 * 63 *
64 * 64 *
65 * Data Port Selection Info 65 * Data Port Selection Info
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 08555f8b15f4..08ad269f6b4e 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -32,7 +32,7 @@
32 32
33 33
34/* 34/*
35 * Prolific PL-2301/PL-2302 driver ... http://www.prolifictech.com 35 * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
36 * 36 *
37 * The protocol and handshaking used here should be bug-compatible 37 * The protocol and handshaking used here should be bug-compatible
38 * with the Linux 2.2 "plusb" driver, by Deti Fliegl. 38 * with the Linux 2.2 "plusb" driver, by Deti Fliegl.
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index d08ce6a264cb..423eb26386c8 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -409,7 +409,7 @@ config CYCLADES_SYNC
409 tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)" 409 tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
410 depends on WAN_ROUTER_DRIVERS && (PCI || ISA) 410 depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
411 ---help--- 411 ---help---
412 Cyclom 2X from Cyclades Corporation <http://www.cyclades.com/> is an 412 Cyclom 2X from Cyclades Corporation <http://www.avocent.com/> is an
413 intelligent multiprotocol WAN adapter with data transfer rates up to 413 intelligent multiprotocol WAN adapter with data transfer rates up to
414 512 Kbps. These cards support the X.25 and SNA related protocols. 414 512 Kbps. These cards support the X.25 and SNA related protocols.
415 415
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9937bbab938d..5d4bb615ccce 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -109,7 +109,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
109 return NOTIFY_DONE; /* not an HDLC device */ 109 return NOTIFY_DONE; /* not an HDLC device */
110 110
111 if (event != NETDEV_CHANGE) 111 if (event != NETDEV_CHANGE)
112 return NOTIFY_DONE; /* Only interrested in carrier changes */ 112 return NOTIFY_DONE; /* Only interested in carrier changes */
113 113
114 on = netif_carrier_ok(dev); 114 on = netif_carrier_ok(dev);
115 115
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 4a367cdb3eb9..308b79e1ff08 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -348,7 +348,7 @@ struct ath5k_srev_name {
348/* 348/*
349 * Some of this information is based on Documentation from: 349 * Some of this information is based on Documentation from:
350 * 350 *
351 * http://madwifi.org/wiki/ChipsetFeatures/SuperAG 351 * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
352 * 352 *
353 * Modulation for Atheros' eXtended Range - range enhancing extension that is 353 * Modulation for Atheros' eXtended Range - range enhancing extension that is
354 * supposed to double the distance an Atheros client device can keep a 354 * supposed to double the distance an Atheros client device can keep a
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index a34929f06533..ca79ecd832fd 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -26,7 +26,6 @@
26 * Atheros presentations and papers like these: 26 * Atheros presentations and papers like these:
27 * 27 *
28 * 5210 - http://nova.stanford.edu/~bbaas/ps/isscc2002_slides.pdf 28 * 5210 - http://nova.stanford.edu/~bbaas/ps/isscc2002_slides.pdf
29 * http://www.it.iitb.ac.in/~janak/wifire/01222734.pdf
30 * 29 *
31 * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf 30 * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf
32 * 31 *
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index e9d9d622a9b0..b7cb165d612b 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2621,7 +2621,7 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
2621 iface = netdev_priv(dev); 2621 iface = netdev_priv(dev);
2622 local = iface->local; 2622 local = iface->local;
2623 2623
2624 /* Detect early interrupt before driver is fully configued */ 2624 /* Detect early interrupt before driver is fully configured */
2625 spin_lock(&local->irq_init_lock); 2625 spin_lock(&local->irq_init_lock);
2626 if (!dev->base_addr) { 2626 if (!dev->base_addr) {
2627 if (net_ratelimit()) { 2627 if (net_ratelimit()) {
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index e5f45cb2a7a2..25f965ffc889 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -9,7 +9,7 @@ config P54_COMMON
9 also need to be enabled in order to support any devices. 9 also need to be enabled in order to support any devices.
10 10
11 These devices require softmac firmware which can be found at 11 These devices require softmac firmware which can be found at
12 http://prism54.org/ 12 <http://wireless.kernel.org/en/users/Drivers/p54>
13 13
14 If you choose to build a module, it'll be called p54common. 14 If you choose to build a module, it'll be called p54common.
15 15
@@ -21,7 +21,7 @@ config P54_USB
21 This driver is for USB isl38xx based wireless cards. 21 This driver is for USB isl38xx based wireless cards.
22 22
23 These devices require softmac firmware which can be found at 23 These devices require softmac firmware which can be found at
24 http://prism54.org/ 24 <http://wireless.kernel.org/en/users/Drivers/p54>
25 25
26 If you choose to build a module, it'll be called p54usb. 26 If you choose to build a module, it'll be called p54usb.
27 27
@@ -35,7 +35,7 @@ config P54_PCI
35 supported by the fullmac driver/firmware. 35 supported by the fullmac driver/firmware.
36 36
37 This driver requires softmac firmware which can be found at 37 This driver requires softmac firmware which can be found at
38 http://prism54.org/ 38 <http://wireless.kernel.org/en/users/Drivers/p54>
39 39
40 If you choose to build a module, it'll be called p54pci. 40 If you choose to build a module, it'll be called p54pci.
41 41
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index dc14420a9adc..b5e64d71b7a6 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -38,7 +38,7 @@ module_param(init_pcitm, int, 0);
38/* In this order: vendor, device, subvendor, subdevice, class, class_mask, 38/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
39 * driver_data 39 * driver_data
40 * If you have an update for this please contact prism54-devel@prism54.org 40 * If you have an update for this please contact prism54-devel@prism54.org
41 * The latest list can be found at http://prism54.org/supported_cards.php */ 41 * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */
42static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = { 42static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
44 { 44 {
diff --git a/drivers/parisc/README.dino b/drivers/parisc/README.dino
index 097324f34bbe..1627426996c1 100644
--- a/drivers/parisc/README.dino
+++ b/drivers/parisc/README.dino
@@ -10,8 +10,7 @@
10** PCI bus. HP-supplied graphics cards that utilize the PCI bus are 10** PCI bus. HP-supplied graphics cards that utilize the PCI bus are
11** not affected." 11** not affected."
12** 12**
13** REVISIT: "go/pci_defect" link below is stale. 13** http://h20000.www2.hp.com/bizsupport/TechSupport/Home.jsp?locale=en_US&prodTypeId=12454&prodSeriesId=44443
14** HP Internal can use <http://hpfcdma.fc.hp.com:80/Dino/>
15** 14**
16** Product First Good Serial Number 15** Product First Good Serial Number
17** C200/C240 (US) US67350000 16** C200/C240 (US) US67350000
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 0950fa40684f..8d62fb76cd41 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2599,7 +2599,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2599 printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n"); 2599 printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
2600 return 0; 2600 return 0;
2601 case 0x8: 2601 case 0x8:
2602 DPRINTK(KERN_DEBUG "parport_pc: ITE8874 found (2S)\n"); 2602 printk(KERN_INFO "parport_pc: ITE8874 found (2S)\n");
2603 return 0; 2603 return 0;
2604 default: 2604 default:
2605 printk(KERN_INFO "parport_pc: unknown ITE887x\n"); 2605 printk(KERN_INFO "parport_pc: unknown ITE887x\n");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 857ae01734a6..cc96c7142dac 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -226,6 +226,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir
226 * VIA Apollo KT133 needs PCI latency patch 226 * VIA Apollo KT133 needs PCI latency patch
227 * Made according to a windows driver based patch by George E. Breese 227 * Made according to a windows driver based patch by George E. Breese
228 * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm 228 * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
229 * and http://www.georgebreese.com/net/software/#PCI
229 * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for 230 * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
230 * the info on which Mr Breese based his work. 231 * the info on which Mr Breese based his work.
231 * 232 *
@@ -1016,7 +1017,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge)
1016/* 1017/*
1017 * Common misconfiguration of the MediaGX/Geode PCI master that will 1018 * Common misconfiguration of the MediaGX/Geode PCI master that will
1018 * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 1019 * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
1019 * datasheets found at http://www.national.com/ds/GX for info on what 1020 * datasheets found at http://www.national.com/analog for info on what
1020 * these bits do. <christer@weinigel.se> 1021 * these bits do. <christer@weinigel.se>
1021 */ 1022 */
1022static void quirk_mediagx_master(struct pci_dev *dev) 1023static void quirk_mediagx_master(struct pci_dev *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 408dbaa080a1..9dc565c615bd 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1072,7 +1072,7 @@ static void yenta_config_init(struct yenta_socket *socket)
1072 * invisible during PCI scans because of a misconfigured subordinate number 1072 * invisible during PCI scans because of a misconfigured subordinate number
1073 * of the parent brige - some BIOSes seem to be too lazy to set it right. 1073 * of the parent brige - some BIOSes seem to be too lazy to set it right.
1074 * Does the fixup carefully by checking how far it can go without conflicts. 1074 * Does the fixup carefully by checking how far it can go without conflicts.
1075 * See http\://bugzilla.kernel.org/show_bug.cgi?id=2944 for more information. 1075 * See http://bugzilla.kernel.org/show_bug.cgi?id=2944 for more information.
1076 */ 1076 */
1077static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) 1077static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
1078{ 1078{
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 2d8ac43f78e8..bc89f392a629 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -11,7 +11,6 @@
11 * 11 *
12 * The .../escd file is utilized by the lsescd utility written by 12 * The .../escd file is utilized by the lsescd utility written by
13 * Gunther Mayer. 13 * Gunther Mayer.
14 * http://home.t-online.de/home/gunther.mayer/lsescd
15 * 14 *
16 * The .../legacy_device_resources file is not used yet. 15 * The .../legacy_device_resources file is not used yet.
17 * 16 *
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 62de66af0a68..ddb0857e15a4 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -274,7 +274,7 @@ static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
274 nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev, 274 nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
275 &nuc900_rtc_ops, THIS_MODULE); 275 &nuc900_rtc_ops, THIS_MODULE);
276 if (IS_ERR(nuc900_rtc->rtcdev)) { 276 if (IS_ERR(nuc900_rtc->rtcdev)) {
277 dev_err(&pdev->dev, "rtc device register faild\n"); 277 dev_err(&pdev->dev, "rtc device register failed\n");
278 err = PTR_ERR(nuc900_rtc->rtcdev); 278 err = PTR_ERR(nuc900_rtc->rtcdev);
279 goto fail3; 279 goto fail3;
280 } 280 }
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 85bfd8794856..e82d427ff5eb 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2197,7 +2197,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2197 2197
2198/* 2198/*
2199 ***************************************************************************** 2199 *****************************************************************************
2200 * main ERP control fuctions (24 and 32 byte sense) 2200 * main ERP control functions (24 and 32 byte sense)
2201 ***************************************************************************** 2201 *****************************************************************************
2202 */ 2202 */
2203 2203
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 66360c24bd48..59b4ecfb967b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1190,7 +1190,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1190 goto out_err2; 1190 goto out_err2;
1191 } 1191 }
1192 /* 1192 /*
1193 * dasd_eckd_vaildate_server is done on the first device that 1193 * dasd_eckd_validate_server is done on the first device that
1194 * is found for an LCU. All later other devices have to wait 1194 * is found for an LCU. All later other devices have to wait
1195 * for it, so they will read the correct feature codes. 1195 * for it, so they will read the correct feature codes.
1196 */ 1196 */
@@ -1216,7 +1216,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1216 "Read device characteristic failed, rc=%d", rc); 1216 "Read device characteristic failed, rc=%d", rc);
1217 goto out_err3; 1217 goto out_err3;
1218 } 1218 }
1219 /* find the vaild cylinder size */ 1219 /* find the valid cylinder size */
1220 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1220 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1221 private->rdc_data.long_no_cyl) 1221 private->rdc_data.long_no_cyl)
1222 private->real_cyl = private->rdc_data.long_no_cyl; 1222 private->real_cyl = private->rdc_data.long_no_cyl;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fc993acf99b6..deff2c3361e4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -31,7 +31,7 @@ debug_info_t *TAPE_DBF_AREA = NULL;
31EXPORT_SYMBOL(TAPE_DBF_AREA); 31EXPORT_SYMBOL(TAPE_DBF_AREA);
32 32
33/******************************************************************* 33/*******************************************************************
34 * Error Recovery fuctions: 34 * Error Recovery functions:
35 * - Read Opposite: implemented 35 * - Read Opposite: implemented
36 * - Read Device (buffered) log: BRA 36 * - Read Device (buffered) log: BRA
37 * - Read Library log: BRA 37 * - Read Library log: BRA
@@ -798,7 +798,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
798} 798}
799 799
800/* 800/*
801 * This fuction is called, when error recovery was successfull 801 * This function is called, when error recovery was successful
802 */ 802 */
803static inline int 803static inline int
804tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) 804tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
@@ -809,7 +809,7 @@ tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
809} 809}
810 810
811/* 811/*
812 * This fuction is called, when error recovery was not successfull 812 * This function is called, when error recovery was not successful
813 */ 813 */
814static inline int 814static inline int
815tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, 815tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 0e7cb1a84151..31a3ccbb6495 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -47,7 +47,7 @@ static int vmcp_release(struct inode *inode, struct file *file)
47{ 47{
48 struct vmcp_session *session; 48 struct vmcp_session *session;
49 49
50 session = (struct vmcp_session *)file->private_data; 50 session = file->private_data;
51 file->private_data = NULL; 51 file->private_data = NULL;
52 free_pages((unsigned long)session->response, get_order(session->bufsize)); 52 free_pages((unsigned long)session->response, get_order(session->bufsize));
53 kfree(session); 53 kfree(session);
@@ -94,7 +94,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
94 return -EFAULT; 94 return -EFAULT;
95 } 95 }
96 cmd[count] = '\0'; 96 cmd[count] = '\0';
97 session = (struct vmcp_session *)file->private_data; 97 session = file->private_data;
98 if (mutex_lock_interruptible(&session->mutex)) { 98 if (mutex_lock_interruptible(&session->mutex)) {
99 kfree(cmd); 99 kfree(cmd);
100 return -ERESTARTSYS; 100 return -ERESTARTSYS;
@@ -136,7 +136,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
136 int __user *argp; 136 int __user *argp;
137 int temp; 137 int temp;
138 138
139 session = (struct vmcp_session *)file->private_data; 139 session = file->private_data;
140 if (is_compat_task()) 140 if (is_compat_task())
141 argp = compat_ptr(arg); 141 argp = compat_ptr(arg);
142 else 142 else
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 4e298bc8949d..5a46b8c5d68a 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -32,6 +32,7 @@
32 * The pointer to our (page) of device descriptions. 32 * The pointer to our (page) of device descriptions.
33 */ 33 */
34static void *kvm_devices; 34static void *kvm_devices;
35struct work_struct hotplug_work;
35 36
36struct kvm_device { 37struct kvm_device {
37 struct virtio_device vdev; 38 struct virtio_device vdev;
@@ -328,13 +329,54 @@ static void scan_devices(void)
328} 329}
329 330
330/* 331/*
332 * match for a kvm device with a specific desc pointer
333 */
334static int match_desc(struct device *dev, void *data)
335{
336 if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data)
337 return 1;
338
339 return 0;
340}
341
342/*
343 * hotplug_device tries to find changes in the device page.
344 */
345static void hotplug_devices(struct work_struct *dummy)
346{
347 unsigned int i;
348 struct kvm_device_desc *d;
349 struct device *dev;
350
351 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
352 d = kvm_devices + i;
353
354 /* end of list */
355 if (d->type == 0)
356 break;
357
358 /* device already exists */
359 dev = device_find_child(kvm_root, d, match_desc);
360 if (dev) {
361 /* XXX check for hotplug remove */
362 put_device(dev);
363 continue;
364 }
365
366 /* new device */
367 printk(KERN_INFO "Adding new virtio device %p\n", d);
368 add_kvm_device(d, i);
369 }
370}
371
372/*
331 * we emulate the request_irq behaviour on top of s390 extints 373 * we emulate the request_irq behaviour on top of s390 extints
332 */ 374 */
333static void kvm_extint_handler(u16 code) 375static void kvm_extint_handler(u16 code)
334{ 376{
335 struct virtqueue *vq; 377 struct virtqueue *vq;
336 u16 subcode; 378 u16 subcode;
337 int config_changed; 379 u32 param;
338 380
339 subcode = S390_lowcore.cpu_addr; 381 subcode = S390_lowcore.cpu_addr;
340 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 382 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
@@ -343,18 +385,28 @@ static void kvm_extint_handler(u16 code)
343 /* The LSB might be overloaded, we have to mask it */ 385 /* The LSB might be overloaded, we have to mask it */
344 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); 386 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
345 387
346 /* We use the LSB of extparam, to decide, if this interrupt is a config 388 /* We use ext_params to decide what this interrupt means */
347 * change or a "standard" interrupt */ 389 param = S390_lowcore.ext_params & VIRTIO_PARAM_MASK;
348 config_changed = S390_lowcore.ext_params & 1;
349 390
350 if (config_changed) { 391 switch (param) {
392 case VIRTIO_PARAM_CONFIG_CHANGED:
393 {
351 struct virtio_driver *drv; 394 struct virtio_driver *drv;
352 drv = container_of(vq->vdev->dev.driver, 395 drv = container_of(vq->vdev->dev.driver,
353 struct virtio_driver, driver); 396 struct virtio_driver, driver);
354 if (drv->config_changed) 397 if (drv->config_changed)
355 drv->config_changed(vq->vdev); 398 drv->config_changed(vq->vdev);
356 } else 399
400 break;
401 }
402 case VIRTIO_PARAM_DEV_ADD:
403 schedule_work(&hotplug_work);
404 break;
405 case VIRTIO_PARAM_VRING_INTERRUPT:
406 default:
357 vring_interrupt(0, vq); 407 vring_interrupt(0, vq);
408 break;
409 }
358} 410}
359 411
360/* 412/*
@@ -383,6 +435,8 @@ static int __init kvm_devices_init(void)
383 435
384 kvm_devices = (void *) real_memory_size; 436 kvm_devices = (void *) real_memory_size;
385 437
438 INIT_WORK(&hotplug_work, hotplug_devices);
439
386 ctl_set_bit(0, 9); 440 ctl_set_bit(0, 9);
387 register_external_interrupt(0x2603, kvm_extint_handler); 441 register_external_interrupt(0x2603, kvm_extint_handler);
388 442
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 2e9632e2c98b..8616496ffc02 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -116,7 +116,7 @@ config CHR_DEV_OSST
116 <http://www.tldp.org/docs.html#howto> and 116 <http://www.tldp.org/docs.html#howto> and
117 <file:Documentation/scsi/osst.txt> in the kernel source. 117 <file:Documentation/scsi/osst.txt> in the kernel source.
118 More info on the OnStream driver may be found on 118 More info on the OnStream driver may be found on
119 <http://linux1.onstream.nl/test/> 119 <http://sourceforge.net/projects/osst/>
120 Please also have a look at the standard st docu, as most of it 120 Please also have a look at the standard st docu, as most of it
121 applies to osst as well. 121 applies to osst as well.
122 122
@@ -156,9 +156,9 @@ config CHR_DEV_SG
156 directly, so you need some additional software which knows how to 156 directly, so you need some additional software which knows how to
157 talk to these devices using the SCSI protocol: 157 talk to these devices using the SCSI protocol:
158 158
159 For scanners, look at SANE (<http://www.mostang.com/sane/>). For CD 159 For scanners, look at SANE (<http://www.sane-project.org/>). For CD
160 writer software look at Cdrtools 160 writer software look at Cdrtools
161 (<http://www.fokus.gmd.de/research/cc/glone/employees/joerg.schilling/private/cdrecord.html>) 161 (<http://cdrecord.berlios.de/private/cdrecord.html>)
162 and for burning a "disk at once": CDRDAO 162 and for burning a "disk at once": CDRDAO
163 (<http://cdrdao.sourceforge.net/>). Cdparanoia is a high 163 (<http://cdrdao.sourceforge.net/>). Cdparanoia is a high
164 quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>). 164 quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>).
@@ -951,6 +951,7 @@ config SCSI_IPS
951 ---help--- 951 ---help---
952 This is support for the IBM ServeRAID hardware RAID controllers. 952 This is support for the IBM ServeRAID hardware RAID controllers.
953 See <http://www.developer.ibm.com/welcome/netfinity/serveraid.html> 953 See <http://www.developer.ibm.com/welcome/netfinity/serveraid.html>
954 and <http://www-947.ibm.com/support/entry/portal/docdisplay?brand=5000008&lndocid=SERV-RAID>
954 for more information. If this driver does not work correctly 955 for more information. If this driver does not work correctly
955 without modification please contact the author by email at 956 without modification please contact the author by email at
956 <ipslinux@adaptec.com>. 957 <ipslinux@adaptec.com>.
@@ -1610,7 +1611,7 @@ config SCSI_DEBUG
1610 host adapter with one dummy SCSI disk. Each dummy disk uses kernel 1611 host adapter with one dummy SCSI disk. Each dummy disk uses kernel
1611 RAM as storage (i.e. it is a ramdisk). To save space when multiple 1612 RAM as storage (i.e. it is a ramdisk). To save space when multiple
1612 dummy disks are simulated, they share the same kernel RAM for 1613 dummy disks are simulated, they share the same kernel RAM for
1613 their storage. See <http://www.torque.net/sg/sdebug.html> for more 1614 their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more
1614 information. This driver is primarily of use to those testing the 1615 information. This driver is primarily of use to those testing the
1615 SCSI and block subsystems. If unsure, say N. 1616 SCSI and block subsystems. If unsure, say N.
1616 1617
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index dae46d779c7b..29c0ed1cf507 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,7 +771,7 @@ static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long
771{ 771{
772 if (!capable(CAP_SYS_RAWIO)) 772 if (!capable(CAP_SYS_RAWIO))
773 return -EPERM; 773 return -EPERM;
774 return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); 774 return aac_compat_do_ioctl(file->private_data, cmd, arg);
775} 775}
776#endif 776#endif
777 777
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c3d7174e3469..a345dde16c86 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5509,7 +5509,7 @@ lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
5509 * @buff: Buffer containing config region 23 data. 5509 * @buff: Buffer containing config region 23 data.
5510 * @size: Size of the data buffer. 5510 * @size: Size of the data buffer.
5511 * 5511 *
5512 * This fuction parse the FCoE config parameters in config region 23 and 5512 * This function parses the FCoE config parameters in config region 23 and
5513 * populate driver data structure with the parameters. 5513 * populate driver data structure with the parameters.
5514 */ 5514 */
5515void 5515void
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5428d53f5a13..909ed9ed24c0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -212,7 +212,7 @@ static void sg_put_dev(Sg_device *sdp);
212 212
213static int sg_allow_access(struct file *filp, unsigned char *cmd) 213static int sg_allow_access(struct file *filp, unsigned char *cmd)
214{ 214{
215 struct sg_fd *sfp = (struct sg_fd *)filp->private_data; 215 struct sg_fd *sfp = filp->private_data;
216 216
217 if (sfp->parentdp->device->type == TYPE_SCANNER) 217 if (sfp->parentdp->device->type == TYPE_SCANNER)
218 return 0; 218 return 0;
diff --git a/drivers/serial/68328serial.h b/drivers/serial/68328serial.h
index 58aa2154655b..664ceb0a158c 100644
--- a/drivers/serial/68328serial.h
+++ b/drivers/serial/68328serial.h
@@ -181,13 +181,8 @@ struct m68k_serial {
181/* 181/*
182 * Define the number of ports supported and their irqs. 182 * Define the number of ports supported and their irqs.
183 */ 183 */
184#ifndef CONFIG_68328_SERIAL_UART2
185#define NR_PORTS 1 184#define NR_PORTS 1
186#define UART_IRQ_DEFNS {UART_IRQ_NUM} 185#define UART_IRQ_DEFNS {UART_IRQ_NUM}
187#else
188#define NR_PORTS 2
189#define UART_IRQ_DEFNS {UART1_IRQ_NUM, UART2_IRQ_NUM}
190#endif
191 186
192#endif /* __KERNEL__ */ 187#endif /* __KERNEL__ */
193#endif /* !(_MC683XX_SERIAL_H) */ 188#endif /* !(_MC683XX_SERIAL_H) */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 167c4a6ccbc3..4d8e14b7aa93 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -919,7 +919,7 @@ static int broken_efr(struct uart_8250_port *up)
919 /* 919 /*
920 * Exar ST16C2550 "A2" devices incorrectly detect as 920 * Exar ST16C2550 "A2" devices incorrectly detect as
921 * having an EFR, and report an ID of 0x0201. See 921 * having an EFR, and report an ID of 0x0201. See
922 * http://www.exar.com/info.php?pdf=dan180_oct2004.pdf 922 * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
923 */ 923 */
924 if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16) 924 if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
925 return 1; 925 return 1;
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 6f1b51e231e4..e95c524d9d18 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -10,7 +10,7 @@
10 10
11/* 11/*
12 * This driver and the hardware supported are in term of EE-191 of ADI. 12 * This driver and the hardware supported are in term of EE-191 of ADI.
13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf 13 * http://www.analog.com/static/imported-files/application_notes/EE191.pdf
14 * This application note describe how to implement a UART on a Sharc DSP, 14 * This application note describe how to implement a UART on a Sharc DSP,
15 * but this driver is implemented on Blackfin Processor. 15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out. 16 * Transmit Frame Sync is not used by this driver to transfer data out.
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 9ce253e381d2..6d06ce1d5675 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -10,7 +10,7 @@
10 10
11/* 11/*
12 * This driver and the hardware supported are in term of EE-191 of ADI. 12 * This driver and the hardware supported are in term of EE-191 of ADI.
13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf 13 * http://www.analog.com/static/imported-files/application_notes/EE191.pdf
14 * This application note describe how to implement a UART on a Sharc DSP, 14 * This application note describe how to implement a UART on a Sharc DSP,
15 * but this driver is implemented on Blackfin Processor. 15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out. 16 * Transmit Frame Sync is not used by this driver to transfer data out.
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index c4bf54bb3fc7..d2fce865b731 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
44 * Register definitions 44 * Register definitions
45 * 45 *
46 * For register details see datasheet: 46 * For register details see datasheet:
47 * http://www.xilinx.com/bvdocs/ipcenter/data_sheet/opb_uartlite.pdf 47 * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
48 */ 48 */
49 49
50#define ULITE_RX 0x00 50#define ULITE_RX 0x00
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
index 96b9717f168f..0d82a6d5fa58 100644
--- a/drivers/staging/asus_oled/README
+++ b/drivers/staging/asus_oled/README
@@ -2,7 +2,7 @@
2 Driver for Asus OLED display present in some Asus laptops. 2 Driver for Asus OLED display present in some Asus laptops.
3 3
4 The code of this driver is based on 'asusoled' program taken from 4 The code of this driver is based on 'asusoled' program taken from
5 https://launchpad.net/asusoled/. I just wanted to have a simple 5 <http://lapsus.berlios.de/asus_oled.html>. I just wanted to have a simple
6 kernel driver for controlling this device, but I didn't know how 6 kernel driver for controlling this device, but I didn't know how
7 to do that. Now I know ;) Also, that program can not be used 7 to do that. Now I know ;) Also, that program can not be used
8 with usbhid loaded, which means no USB mouse/keyboard while 8 with usbhid loaded, which means no USB mouse/keyboard while
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 5b279fb30f3f..8c95d8c2a4f4 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -24,7 +24,7 @@
24 * 24 *
25 * 25 *
26 * Asus OLED support is based on asusoled program taken from 26 * Asus OLED support is based on asusoled program taken from
27 * https://launchpad.net/asusoled/. 27 * <http://lapsus.berlios.de/asus_oled.html>.
28 * 28 *
29 * 29 *
30 */ 30 */
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index ced346a7cae3..78b1410ba4f6 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -37,7 +37,7 @@ Configuration Options:
37Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org). 37Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
38Only supports DIO, AO and simple AI in it's present form. 38Only supports DIO, AO and simple AI in it's present form.
39No interrupts, multi channel or FIFO AI, although the card looks like it could support this. 39No interrupts, multi channel or FIFO AI, although the card looks like it could support this.
40See http://www.measurementcomputing.com/PDFManuals/pcim-das1602_16.pdf for more details. 40See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
41*/ 41*/
42 42
43#include "../comedidev.h" 43#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 6af6c8323d56..82be77daa7d7 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -50,8 +50,8 @@ Configuration options:
50 With some help from our swedish distributor, we got the Windows sourcecode 50 With some help from our swedish distributor, we got the Windows sourcecode
51 for the card, and here are the findings so far. 51 for the card, and here are the findings so far.
52 52
53 1. A good document that describes the PCI interface chip is found at: 53 1. A good document that describes the PCI interface chip is 9080db-106.pdf
54 http://plx.plxtech.com/download/9080/databook/9080db-106.pdf 54 available from http://www.plxtech.com/products/io/pci9080
55 55
56 2. The initialization done so far is: 56 2. The initialization done so far is:
57 a. program the FPGA (windows code sans a lot of error messages) 57 a. program the FPGA (windows code sans a lot of error messages)
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 3acf7e62bec4..1411dd8f4e7c 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -37,7 +37,7 @@ boards has not
37yet been added to the driver, mainly due to the fact that 37yet been added to the driver, mainly due to the fact that
38I don't know the device id numbers. If you have one 38I don't know the device id numbers. If you have one
39of these boards, 39of these boards,
40please file a bug report at https://bugs.comedi.org/ 40please file a bug report at http://comedi.org/
41so I can get the necessary information from you. 41so I can get the necessary information from you.
42 42
43The 1200 series boards have onboard calibration dacs for correcting 43The 1200 series boards have onboard calibration dacs for correcting
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index bd16f913af23..986ef6712989 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -34,7 +34,7 @@
34 340747b.pdf AT-MIO E series Register Level Programmer Manual 34 340747b.pdf AT-MIO E series Register Level Programmer Manual
35 341079b.pdf PCI E Series RLPM 35 341079b.pdf PCI E Series RLPM
36 340934b.pdf DAQ-STC reference manual 36 340934b.pdf DAQ-STC reference manual
37 67xx and 611x registers (from http://www.ni.com/pdf/daq/us) 37 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/)
38 release_ni611x.pdf 38 release_ni611x.pdf
39 release_ni67xx.pdf 39 release_ni67xx.pdf
40 Other possibly relevant info: 40 Other possibly relevant info:
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 485d63f99293..0d254a1b78a7 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -13,7 +13,7 @@
13 * 13 *
14 ******************************************************************** 14 ********************************************************************
15 * 15 *
16 * Copyright (C) 1999 RG Studio s.c., http://www.rgstudio.com.pl/ 16 * Copyright (C) 1999 RG Studio s.c.
17 * Written by Krzysztof Halasa <khc@rgstudio.com.pl> 17 * Written by Krzysztof Halasa <khc@rgstudio.com.pl>
18 * 18 *
19 * Portions (C) SBE Inc., used by permission. 19 * Portions (C) SBE Inc., used by permission.
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 0367d2b9e2fa..a49a7c566d37 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -59,7 +59,7 @@ Configuration options:
59 Data sheet: http://www.rtdusa.com/pdf/dm7520.pdf 59 Data sheet: http://www.rtdusa.com/pdf/dm7520.pdf
60 Example source: http://www.rtdusa.com/examples/dm/dm7520.zip 60 Example source: http://www.rtdusa.com/examples/dm/dm7520.zip
61 Call them and ask for the register level manual. 61 Call them and ask for the register level manual.
62 PCI chip: http://www.plxtech.com/products/toolbox/9080.htm 62 PCI chip: http://www.plxtech.com/products/io/pci9080
63 63
64 Notes: 64 Notes:
65 This board is memory mapped. There is some IO stuff, but it isn't needed. 65 This board is memory mapped. There is some IO stuff, but it isn't needed.
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index 66122479d529..ba8f670ec0a7 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -5,8 +5,7 @@
5 * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com> 5 * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
6 * 6 *
7 * Information gathered from disassebled dsdt and from here: 7 * Information gathered from disassebled dsdt and from here:
8 * "http://download.microsoft.com/download/9/c/5/ 8 * <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx>
9 * 9c5b2167-8017-4bae-9fde-d599bac8184a/DirAppLaunch_Vista.doc"
10 * 9 *
11 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 1da73ecd9799..bb440792a1b7 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -17,9 +17,9 @@ config UIO_CIF
17 depends on PCI 17 depends on PCI
18 help 18 help
19 Driver for Hilscher CIF DeviceNet and Profibus cards. This 19 Driver for Hilscher CIF DeviceNet and Profibus cards. This
20 driver requires a userspace component that handles all of the 20 driver requires a userspace component called cif that handles
21 heavy lifting and can be found at: 21 all of the heavy lifting and can be found at:
22 http://www.osadl.org/projects/downloads/UIO/user/cif-* 22 <http://www.osadl.org/projects/downloads/UIO/user/>
23 23
24 To compile this driver as a module, choose M here: the module 24 To compile this driver as a module, choose M here: the module
25 will be called uio_cif. 25 will be called uio_cif.
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 387e503b9d14..bdec36acd0fa 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1266,7 +1266,6 @@ write_in:
1266 csr |= AT91_UDP_TXPKTRDY; 1266 csr |= AT91_UDP_TXPKTRDY;
1267 __raw_writel(csr, creg); 1267 __raw_writel(csr, creg);
1268 udc->req_pending = 0; 1268 udc->req_pending = 0;
1269 return;
1270} 1269}
1271 1270
1272static void handle_ep0(struct at91_udc *udc) 1271static void handle_ep0(struct at91_udc *udc)
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 1f48ceb55a77..00975ed903d1 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -317,8 +317,6 @@ static void f_audio_playback_work(struct work_struct *data)
317 317
318 u_audio_playback(&audio->card, play_buf->buf, play_buf->actual); 318 u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
319 f_audio_buffer_free(play_buf); 319 f_audio_buffer_free(play_buf);
320
321 return;
322} 320}
323 321
324static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) 322static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 2b98bd26364b..4f891eddd060 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -318,8 +318,6 @@ static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
318 spin_unlock(&hidg->spinlock); 318 spin_unlock(&hidg->spinlock);
319 319
320 wake_up(&hidg->read_queue); 320 wake_up(&hidg->read_queue);
321
322 return;
323} 321}
324 322
325static int hidg_setup(struct usb_function *f, 323static int hidg_setup(struct usb_function *f,
@@ -413,8 +411,6 @@ static void hidg_disable(struct usb_function *f)
413 411
414 usb_ep_disable(hidg->in_ep); 412 usb_ep_disable(hidg->in_ep);
415 hidg->in_ep->driver_data = NULL; 413 hidg->in_ep->driver_data = NULL;
416
417 return;
418} 414}
419 415
420static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 416static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index c16b402a876b..4c55eda4bd20 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -287,8 +287,6 @@ static void dr_controller_run(struct fsl_udc *udc)
287 temp = fsl_readl(&dr_regs->usbcmd); 287 temp = fsl_readl(&dr_regs->usbcmd);
288 temp |= USB_CMD_RUN_STOP; 288 temp |= USB_CMD_RUN_STOP;
289 fsl_writel(temp, &dr_regs->usbcmd); 289 fsl_writel(temp, &dr_regs->usbcmd);
290
291 return;
292} 290}
293 291
294static void dr_controller_stop(struct fsl_udc *udc) 292static void dr_controller_stop(struct fsl_udc *udc)
@@ -308,8 +306,6 @@ static void dr_controller_stop(struct fsl_udc *udc)
308 tmp = fsl_readl(&dr_regs->usbcmd); 306 tmp = fsl_readl(&dr_regs->usbcmd);
309 tmp &= ~USB_CMD_RUN_STOP; 307 tmp &= ~USB_CMD_RUN_STOP;
310 fsl_writel(tmp, &dr_regs->usbcmd); 308 fsl_writel(tmp, &dr_regs->usbcmd);
311
312 return;
313} 309}
314 310
315static void dr_ep_setup(unsigned char ep_num, unsigned char dir, 311static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
@@ -416,8 +412,6 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
416 p_QH->max_pkt_length = cpu_to_le32(tmp); 412 p_QH->max_pkt_length = cpu_to_le32(tmp);
417 p_QH->next_dtd_ptr = 1; 413 p_QH->next_dtd_ptr = 1;
418 p_QH->size_ioc_int_sts = 0; 414 p_QH->size_ioc_int_sts = 0;
419
420 return;
421} 415}
422 416
423/* Setup qh structure and ep register for ep0. */ 417/* Setup qh structure and ep register for ep0. */
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 027d66f81620..2efd6732d130 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1394,8 +1394,6 @@ static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1394 } 1394 }
1395 1395
1396 spin_unlock_irqrestore(&ep->lock, flags); 1396 spin_unlock_irqrestore(&ep->lock, flags);
1397
1398 return;
1399} 1397}
1400 1398
1401/** 1399/**
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 5b314041dfa9..d3cdffea9c8a 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -918,8 +918,6 @@ void rndis_deregister(int configNr)
918 918
919 if (configNr >= RNDIS_MAX_CONFIGS) return; 919 if (configNr >= RNDIS_MAX_CONFIGS) return;
920 rndis_per_dev_params[configNr].used = 0; 920 rndis_per_dev_params[configNr].used = 0;
921
922 return;
923} 921}
924 922
925int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter) 923int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 15fe3ecd203b..2adae8e39bba 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1095,7 +1095,6 @@ nogood:
1095 ep->hcpriv = NULL; 1095 ep->hcpriv = NULL;
1096done: 1096done:
1097 spin_unlock_irqrestore (&ehci->lock, flags); 1097 spin_unlock_irqrestore (&ehci->lock, flags);
1098 return;
1099} 1098}
1100 1099
1101static void 1100static void
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 1dfb2c8f7707..e49b75a78000 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -27,8 +27,8 @@
27 * * 32 transfer descriptors (called ETDs) 27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory 28 * * 4Kb of Data memory
29 * 29 *
30 * The data memory is shared between the host and fuction controlers 30 * The data memory is shared between the host and function controllers
31 * (but this driver only supports the host controler) 31 * (but this driver only supports the host controller)
32 * 32 *
33 * So setting up a transfer involves: 33 * So setting up a transfer involves:
34 * * Allocating a ETD 34 * * Allocating a ETD
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 3b5785032a10..f3713f43f3fe 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -370,7 +370,6 @@ sanitize:
370 } 370 }
371 ep->hcpriv = NULL; 371 ep->hcpriv = NULL;
372 spin_unlock_irqrestore (&ohci->lock, flags); 372 spin_unlock_irqrestore (&ohci->lock, flags);
373 return;
374} 373}
375 374
376static int ohci_get_frame (struct usb_hcd *hcd) 375static int ohci_get_frame (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index d32c3eae99cb..32149be4ad8e 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -544,8 +544,6 @@ static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
544 qtd->buffer = NULL; 544 qtd->buffer = NULL;
545 545
546 spin_unlock(&oxu->mem_lock); 546 spin_unlock(&oxu->mem_lock);
547
548 return;
549} 547}
550 548
551static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma) 549static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
@@ -571,8 +569,6 @@ static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
571 oxu->qtd_used[index] = 0; 569 oxu->qtd_used[index] = 0;
572 570
573 spin_unlock(&oxu->mem_lock); 571 spin_unlock(&oxu->mem_lock);
574
575 return;
576} 572}
577 573
578static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu) 574static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
@@ -615,8 +611,6 @@ static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
615 oxu->qh_used[index] = 0; 611 oxu->qh_used[index] = 0;
616 612
617 spin_unlock(&oxu->mem_lock); 613 spin_unlock(&oxu->mem_lock);
618
619 return;
620} 614}
621 615
622static void qh_destroy(struct kref *kref) 616static void qh_destroy(struct kref *kref)
@@ -693,8 +687,6 @@ static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
693 oxu->murb_used[index] = 0; 687 oxu->murb_used[index] = 0;
694 688
695 spin_unlock(&oxu->mem_lock); 689 spin_unlock(&oxu->mem_lock);
696
697 return;
698} 690}
699 691
700static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu) 692static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
@@ -3070,7 +3062,6 @@ nogood:
3070 ep->hcpriv = NULL; 3062 ep->hcpriv = NULL;
3071done: 3063done:
3072 spin_unlock_irqrestore(&oxu->lock, flags); 3064 spin_unlock_irqrestore(&oxu->lock, flags);
3073 return;
3074} 3065}
3075 3066
3076static int oxu_get_frame(struct usb_hcd *hcd) 3067static int oxu_get_frame(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 464ed977b45d..4c502c890ebd 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -342,8 +342,6 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
342 writel(0x3f, op_reg_base + EHCI_USBSTS); 342 writel(0x3f, op_reg_base + EHCI_USBSTS);
343 343
344 iounmap(base); 344 iounmap(base);
345
346 return;
347} 345}
348 346
349/* 347/*
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 5b31bae92dbc..fab764946c74 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -316,7 +316,6 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
316 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) 316 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
317 return; 317 return;
318 kref_put(&u132->kref, u132_hcd_delete); 318 kref_put(&u132->kref, u132_hcd_delete);
319 return;
320} 319}
321 320
322static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring, 321static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
@@ -324,7 +323,6 @@ static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
324{ 323{
325 kref_get(&u132->kref); 324 kref_get(&u132->kref);
326 u132_ring_requeue_work(u132, ring, delta); 325 u132_ring_requeue_work(u132, ring, delta);
327 return;
328} 326}
329 327
330static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring) 328static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
@@ -543,7 +541,6 @@ static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
543 mutex_unlock(&u132->scheduler_lock); 541 mutex_unlock(&u132->scheduler_lock);
544 u132_endp_put_kref(u132, endp); 542 u132_endp_put_kref(u132, endp);
545 usb_hcd_giveback_urb(hcd, urb, status); 543 usb_hcd_giveback_urb(hcd, urb, status);
546 return;
547} 544}
548 545
549static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp, 546static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
@@ -574,8 +571,8 @@ static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
574 endp->active = 0; 571 endp->active = 0;
575 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 572 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
576 kfree(urbq); 573 kfree(urbq);
577 } usb_hcd_giveback_urb(hcd, urb, status); 574 }
578 return; 575 usb_hcd_giveback_urb(hcd, urb, status);
579} 576}
580 577
581static inline int edset_input(struct u132 *u132, struct u132_ring *ring, 578static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
@@ -3085,7 +3082,6 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3085 u132->endp[endps] = NULL; 3082 u132->endp[endps] = NULL;
3086 3083
3087 mutex_unlock(&u132->sw_lock); 3084 mutex_unlock(&u132->sw_lock);
3088 return;
3089} 3085}
3090 3086
3091static int __devinit u132_probe(struct platform_device *pdev) 3087static int __devinit u132_probe(struct platform_device *pdev)
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 3a6bcd5fee09..5a47805d9580 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -398,7 +398,6 @@ void mts_int_submit_urb (struct urb* transfer,
398 context->srb->result = DID_ERROR << 16; 398 context->srb->result = DID_ERROR << 16;
399 mts_transfer_cleanup(transfer); 399 mts_transfer_cleanup(transfer);
400 } 400 }
401 return;
402} 401}
403 402
404 403
@@ -409,7 +408,6 @@ static void mts_transfer_cleanup( struct urb *transfer )
409 408
410 if ( likely(context->final_callback != NULL) ) 409 if ( likely(context->final_callback != NULL) )
411 context->final_callback(context->srb); 410 context->final_callback(context->srb);
412
413} 411}
414 412
415static void mts_transfer_done( struct urb *transfer ) 413static void mts_transfer_done( struct urb *transfer )
@@ -420,8 +418,6 @@ static void mts_transfer_done( struct urb *transfer )
420 context->srb->result |= (unsigned)(*context->scsi_status)<<1; 418 context->srb->result |= (unsigned)(*context->scsi_status)<<1;
421 419
422 mts_transfer_cleanup(transfer); 420 mts_transfer_cleanup(transfer);
423
424 return;
425} 421}
426 422
427 423
@@ -452,8 +448,6 @@ static void mts_data_done( struct urb* transfer )
452 } 448 }
453 449
454 mts_get_status(transfer); 450 mts_get_status(transfer);
455
456 return;
457} 451}
458 452
459 453
@@ -496,8 +490,6 @@ static void mts_command_done( struct urb *transfer )
496 mts_get_status(transfer); 490 mts_get_status(transfer);
497 } 491 }
498 } 492 }
499
500 return;
501} 493}
502 494
503static void mts_do_sg (struct urb* transfer) 495static void mts_do_sg (struct urb* transfer)
@@ -522,7 +514,6 @@ static void mts_do_sg (struct urb* transfer)
522 sg[context->fragment].length, 514 sg[context->fragment].length,
523 context->fragment + 1 == scsi_sg_count(context->srb) ? 515 context->fragment + 1 == scsi_sg_count(context->srb) ?
524 mts_data_done : mts_do_sg); 516 mts_data_done : mts_do_sg);
525 return;
526} 517}
527 518
528static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 }; 519static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 };
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index c8eec9c2d89e..7839c98fa742 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -456,7 +456,6 @@ static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi)
456static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) 456static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi)
457{ 457{
458 ftdi_command_queue_work(ftdi, 0); 458 ftdi_command_queue_work(ftdi, 0);
459 return;
460} 459}
461 460
462static void ftdi_elan_command_work(struct work_struct *work) 461static void ftdi_elan_command_work(struct work_struct *work)
@@ -483,7 +482,6 @@ static void ftdi_elan_command_work(struct work_struct *work)
483static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi) 482static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi)
484{ 483{
485 ftdi_respond_queue_work(ftdi, 0); 484 ftdi_respond_queue_work(ftdi, 0);
486 return;
487} 485}
488 486
489static void ftdi_elan_respond_work(struct work_struct *work) 487static void ftdi_elan_respond_work(struct work_struct *work)
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 812dc288bb8c..10405119985c 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -90,7 +90,6 @@ static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb)
90 r->rnf_submit(r->r_data, urb); 90 r->rnf_submit(r->r_data, urb);
91 } 91 }
92 spin_unlock_irqrestore(&mbus->lock, flags); 92 spin_unlock_irqrestore(&mbus->lock, flags);
93 return;
94} 93}
95 94
96static void mon_submit(struct usb_bus *ubus, struct urb *urb) 95static void mon_submit(struct usb_bus *ubus, struct urb *urb)
@@ -117,7 +116,6 @@ static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int erro
117 r->rnf_error(r->r_data, urb, error); 116 r->rnf_error(r->r_data, urb, error);
118 } 117 }
119 spin_unlock_irqrestore(&mbus->lock, flags); 118 spin_unlock_irqrestore(&mbus->lock, flags);
120 return;
121} 119}
122 120
123static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error) 121static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 95058109f9fa..c2b29761fa98 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -176,7 +176,7 @@ config USB_SERIAL_VISOR
176 help 176 help
177 Say Y here if you want to connect to your HandSpring Visor, Palm 177 Say Y here if you want to connect to your HandSpring Visor, Palm
178 m500 or m505 through its USB docking station. See 178 m500 or m505 through its USB docking station. See
179 <http://usbvisor.sourceforge.net/> for more information on using this 179 <http://usbvisor.sourceforge.net/index.php3> for more information on using this
180 driver. 180 driver.
181 181
182 To compile this driver as a module, choose M here: the 182 To compile this driver as a module, choose M here: the
@@ -289,7 +289,7 @@ config USB_SERIAL_KEYSPAN
289 and was developed with their support. You must also include 289 and was developed with their support. You must also include
290 firmware to support your particular device(s). 290 firmware to support your particular device(s).
291 291
292 See <http://misc.nu/hugh/keyspan.html> for more information. 292 See <http://blemings.org/hugh/keyspan.html> for more information.
293 293
294 To compile this driver as a module, choose M here: the 294 To compile this driver as a module, choose M here: the
295 module will be called keyspan. 295 module will be called keyspan.
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index f5d06746cc3b..2edf238b00b9 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1320,8 +1320,6 @@ continue_read:
1320 cypress_set_dead(port); 1320 cypress_set_dead(port);
1321 } 1321 }
1322 } 1322 }
1323
1324 return;
1325} /* cypress_read_int_callback */ 1323} /* cypress_read_int_callback */
1326 1324
1327 1325
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 37b57c785cc7..89a9a5847803 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2108,7 +2108,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
2108 } 2108 }
2109 2109
2110 } 2110 }
2111 return;
2112} 2111}
2113 2112
2114static int ftdi_tiocmget(struct tty_struct *tty, struct file *file) 2113static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index cf1aea1b9ee7..7dfe02f1fb6a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -46,7 +46,7 @@
46#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */ 46#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */
47 47
48/* OOCDlink by Joern Kaipf <joernk@web.de> 48/* OOCDlink by Joern Kaipf <joernk@web.de>
49 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 49 * (http://www.joernonline.de/) */
50#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 50#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
51 51
52/* Luminary Micro Stellaris Boards, VID = FTDI_VID */ 52/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
@@ -336,7 +336,7 @@
336#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ 336#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
337 337
338/* ACT Solutions HomePro ZWave interface 338/* ACT Solutions HomePro ZWave interface
339 (http://www.act-solutions.com/HomePro.htm) */ 339 (http://www.act-solutions.com/HomePro-Product-Matrix.html) */
340#define FTDI_ACTZWAVE_PID 0xF2D0 340#define FTDI_ACTZWAVE_PID 0xF2D0
341 341
342/* 342/*
@@ -367,7 +367,7 @@
367#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */ 367#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
368 368
369/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */ 369/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
370/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */ 370/* http://www.usbuirt.com/ */
371#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */ 371#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
372 372
373/* CCS Inc. ICDU/ICDU40 product ID - 373/* CCS Inc. ICDU/ICDU40 product ID -
@@ -396,7 +396,7 @@
396 */ 396 */
397#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */ 397#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
398 398
399/* Inside Accesso contactless reader (http://www.insidefr.com) */ 399/* Inside Accesso contactless reader (http://www.insidecontactless.com/) */
400#define INSIDE_ACCESSO 0xFAD0 400#define INSIDE_ACCESSO 0xFAD0
401 401
402/* 402/*
@@ -635,14 +635,14 @@
635 635
636/* 636/*
637 * JETI SPECTROMETER SPECBOS 1201 637 * JETI SPECTROMETER SPECBOS 1201
638 * http://www.jeti.com/products/sys/scb/scb1201.php 638 * http://www.jeti.com/cms/index.php/instruments/other-instruments/specbos-2101
639 */ 639 */
640#define JETI_VID 0x0c6c 640#define JETI_VID 0x0c6c
641#define JETI_SPC1201_PID 0x04b2 641#define JETI_SPC1201_PID 0x04b2
642 642
643/* 643/*
644 * FTDI USB UART chips used in construction projects from the 644 * FTDI USB UART chips used in construction projects from the
645 * Elektor Electronics magazine (http://elektor-electronics.co.uk) 645 * Elektor Electronics magazine (http://www.elektor.com/)
646 */ 646 */
647#define ELEKTOR_VID 0x0C7D 647#define ELEKTOR_VID 0x0C7D
648#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */ 648#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index a42b29a695b2..26710b189918 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1264,7 +1264,6 @@ static void garmin_read_bulk_callback(struct urb *urb)
1264 garmin_data_p->flags &= ~FLAGS_BULK_IN_ACTIVE; 1264 garmin_data_p->flags &= ~FLAGS_BULK_IN_ACTIVE;
1265 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 1265 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
1266 } 1266 }
1267 return;
1268} 1267}
1269 1268
1270 1269
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index a0ab78ada25e..cd769ef24f8a 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1467,8 +1467,6 @@ static void edge_throttle(struct tty_struct *tty)
1467 if (status != 0) 1467 if (status != 0)
1468 return; 1468 return;
1469 } 1469 }
1470
1471 return;
1472} 1470}
1473 1471
1474 1472
@@ -1775,8 +1773,6 @@ static void edge_break(struct tty_struct *tty, int break_state)
1775 dbg("%s - error sending break set/clear command.", 1773 dbg("%s - error sending break set/clear command.",
1776 __func__); 1774 __func__);
1777 } 1775 }
1778
1779 return;
1780} 1776}
1781 1777
1782 1778
@@ -2047,7 +2043,6 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
2047 dbg("%s - Unrecognized IOSP status code %u", __func__, code); 2043 dbg("%s - Unrecognized IOSP status code %u", __func__, code);
2048 break; 2044 break;
2049 } 2045 }
2050 return;
2051} 2046}
2052 2047
2053 2048
@@ -2100,8 +2095,6 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
2100 2095
2101 /* Save the new modem status */ 2096 /* Save the new modem status */
2102 edge_port->shadowMSR = newMsr & 0xf0; 2097 edge_port->shadowMSR = newMsr & 0xf0;
2103
2104 return;
2105} 2098}
2106 2099
2107 2100
@@ -2148,8 +2141,6 @@ static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
2148 icount->parity++; 2141 icount->parity++;
2149 if (newLsr & LSR_FRM_ERR) 2142 if (newLsr & LSR_FRM_ERR)
2150 icount->frame++; 2143 icount->frame++;
2151
2152 return;
2153} 2144}
2154 2145
2155 2146
@@ -2725,7 +2716,6 @@ static void change_port_settings(struct tty_struct *tty,
2725 baud = tty_termios_baud_rate(old_termios); 2716 baud = tty_termios_baud_rate(old_termios);
2726 tty_encode_baud_rate(tty, baud, baud); 2717 tty_encode_baud_rate(tty, baud, baud);
2727 } 2718 }
2728 return;
2729} 2719}
2730 2720
2731 2721
@@ -2927,7 +2917,6 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2927 0x40, 0x4000, 0x0001, NULL, 0, 3000); 2917 0x40, 0x4000, 0x0001, NULL, 0, 3000);
2928 2918
2929 release_firmware(fw); 2919 release_firmware(fw);
2930 return;
2931} 2920}
2932 2921
2933 2922
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 4dad27a0f22a..22506b095c4f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1571,8 +1571,6 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
1571 } 1571 }
1572 } 1572 }
1573 tty_kref_put(tty); 1573 tty_kref_put(tty);
1574
1575 return;
1576} 1574}
1577 1575
1578static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data, 1576static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
@@ -2424,7 +2422,6 @@ static void change_port_settings(struct tty_struct *tty,
2424 dbg("%s - error %d when trying to write config to device", 2422 dbg("%s - error %d when trying to write config to device",
2425 __func__, status); 2423 __func__, status);
2426 kfree(config); 2424 kfree(config);
2427 return;
2428} 2425}
2429 2426
2430static void edge_set_termios(struct tty_struct *tty, 2427static void edge_set_termios(struct tty_struct *tty,
@@ -2445,7 +2442,6 @@ static void edge_set_termios(struct tty_struct *tty,
2445 return; 2442 return;
2446 /* change the port settings to the new ones specified */ 2443 /* change the port settings to the new ones specified */
2447 change_port_settings(tty, edge_port, old_termios); 2444 change_port_settings(tty, edge_port, old_termios);
2448 return;
2449} 2445}
2450 2446
2451static int edge_tiocmset(struct tty_struct *tty, struct file *file, 2447static int edge_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index efc72113216b..12ed594f5f80 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -807,7 +807,6 @@ static void read_rxcmd_callback(struct urb *urb)
807 iuu_uart_read_callback, port); 807 iuu_uart_read_callback, port);
808 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 808 result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
809 dbg("%s - submit result = %d", __func__, result); 809 dbg("%s - submit result = %d", __func__, result);
810 return;
811} 810}
812 811
813static int iuu_uart_on(struct usb_serial_port *port) 812static int iuu_uart_on(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 297163c3c610..0791778a66f3 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -9,7 +9,7 @@
9 the Free Software Foundation; either version 2 of the License, or 9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version. 10 (at your option) any later version.
11 11
12 See http://misc.nu/hugh/keyspan.html for more information. 12 See http://blemings.org/hugh/keyspan.html for more information.
13 13
14 Code in this driver inspired by and in a number of places taken 14 Code in this driver inspired by and in a number of places taken
15 from Brian Warner's original Keyspan-PDA driver. 15 from Brian Warner's original Keyspan-PDA driver.
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index bf3297ddd186..2d8baf6ac472 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -9,7 +9,7 @@
9 the Free Software Foundation; either version 2 of the License, or 9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version. 10 (at your option) any later version.
11 11
12 See http://misc.nu/hugh/keyspan.html for more information. 12 See http://blemings.org/hugh/keyspan.html for more information.
13 13
14 Code in this driver inspired by and in a number of places taken 14 Code in this driver inspired by and in a number of places taken
15 from Brian Warner's original Keyspan-PDA driver. 15 from Brian Warner's original Keyspan-PDA driver.
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 185fe9a7d4e0..a10dd5676ccc 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -292,7 +292,6 @@ static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
292 port->interrupt_in_urb->dev = port->serial->dev; 292 port->interrupt_in_urb->dev = port->serial->dev;
293 if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL)) 293 if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
294 dbg(" usb_submit_urb(read urb) failed"); 294 dbg(" usb_submit_urb(read urb) failed");
295 return;
296} 295}
297 296
298 297
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 3a3f5e6b8f96..d325bb8cb583 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -10,10 +10,9 @@
10 * 10 *
11 * This driver is for the device MCT USB-RS232 Converter (25 pin, Model No. 11 * This driver is for the device MCT USB-RS232 Converter (25 pin, Model No.
12 * U232-P25) from Magic Control Technology Corp. (there is also a 9 pin 12 * U232-P25) from Magic Control Technology Corp. (there is also a 9 pin
13 * Model No. U232-P9). See http://www.mct.com.tw/p_u232.html for further 13 * Model No. U232-P9). See http://www.mct.com.tw/products/product_us232.html
14 * information. The properties of this device are listed at the end of this 14 * for further information. The properties of this device are listed at the end
15 * file. This device is available from various distributors. I know Hana, 15 * of this file. This device was used in the Dlink DSB-S25.
16 * http://www.hana.de and D-Link, http://www.dlink.com/products/usb/dsbs25.
17 * 16 *
18 * All of the information about the device was acquired by using SniffUSB 17 * All of the information about the device was acquired by using SniffUSB
19 * on Windows98. The technical details of the reverse engineering are 18 * on Windows98. The technical details of the reverse engineering are
@@ -458,7 +457,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
458 * embedded UART. Exhaustive documentation for these is available at: 457 * embedded UART. Exhaustive documentation for these is available at:
459 * 458 *
460 * http://www.semiconductors.philips.com/pip/p87c52ubaa 459 * http://www.semiconductors.philips.com/pip/p87c52ubaa
461 * http://www.semiconductors.philips.com/pip/pdiusbd12 460 * http://www.nxp.com/acrobat_download/various/PDIUSBD12_PROGRAMMING_GUIDE.pdf
462 * 461 *
463 * Thanks to Julian Highfield for the pointer to the Philips database. 462 * Thanks to Julian Highfield for the pointer to the Philips database.
464 * 463 *
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fd0b6414f459..7d3bc9a3e2b6 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -827,7 +827,6 @@ exit:
827 dev_err(&urb->dev->dev, 827 dev_err(&urb->dev->dev,
828 "%s - Error %d submitting control urb\n", 828 "%s - Error %d submitting control urb\n",
829 __func__, result); 829 __func__, result);
830 return;
831} 830}
832 831
833/* 832/*
@@ -907,7 +906,6 @@ exit:
907 dev_err(&urb->dev->dev, 906 dev_err(&urb->dev->dev,
908 "%s - Error %d submitting control urb\n", 907 "%s - Error %d submitting control urb\n",
909 __func__, result); 908 __func__, result);
910 return;
911} 909}
912 910
913/* 911/*
@@ -1227,8 +1225,6 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
1227 mos7720_port->shadowLCR = data; 1225 mos7720_port->shadowLCR = data;
1228 write_mos_reg(serial, port->number - port->serial->minor, 1226 write_mos_reg(serial, port->number - port->serial->minor,
1229 LCR, mos7720_port->shadowLCR); 1227 LCR, mos7720_port->shadowLCR);
1230
1231 return;
1232} 1228}
1233 1229
1234/* 1230/*
@@ -1746,7 +1742,6 @@ static void change_port_settings(struct tty_struct *tty,
1746 dbg("usb_submit_urb(read bulk) failed, status = %d", 1742 dbg("usb_submit_urb(read bulk) failed, status = %d",
1747 status); 1743 status);
1748 } 1744 }
1749 return;
1750} 1745}
1751 1746
1752/* 1747/*
@@ -1803,7 +1798,6 @@ static void mos7720_set_termios(struct tty_struct *tty,
1803 dbg("usb_submit_urb(read bulk) failed, status = %d", 1798 dbg("usb_submit_urb(read bulk) failed, status = %d",
1804 status); 1799 status);
1805 } 1800 }
1806 return;
1807} 1801}
1808 1802
1809/* 1803/*
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 93dad5853cd5..5627993f9e41 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1367,8 +1367,6 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
1367 mos7840_port->shadowLCR); 1367 mos7840_port->shadowLCR);
1368 mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, 1368 mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
1369 mos7840_port->shadowLCR); 1369 mos7840_port->shadowLCR);
1370
1371 return;
1372} 1370}
1373 1371
1374/***************************************************************************** 1372/*****************************************************************************
@@ -1599,8 +1597,6 @@ static void mos7840_throttle(struct tty_struct *tty)
1599 if (status < 0) 1597 if (status < 0)
1600 return; 1598 return;
1601 } 1599 }
1602
1603 return;
1604} 1600}
1605 1601
1606/***************************************************************************** 1602/*****************************************************************************
@@ -2075,8 +2071,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
2075 mos7840_port->delta_msr_cond = 1; 2071 mos7840_port->delta_msr_cond = 1;
2076 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x", 2072 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
2077 mos7840_port->shadowLCR); 2073 mos7840_port->shadowLCR);
2078
2079 return;
2080} 2074}
2081 2075
2082/***************************************************************************** 2076/*****************************************************************************
@@ -2145,7 +2139,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
2145 mos7840_port->read_urb_busy = false; 2139 mos7840_port->read_urb_busy = false;
2146 } 2140 }
2147 } 2141 }
2148 return;
2149} 2142}
2150 2143
2151/***************************************************************************** 2144/*****************************************************************************
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 89c724c0ac0a..60f38d5e64fc 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -246,8 +246,6 @@ static void omninet_read_bulk_callback(struct urb *urb)
246 dev_err(&port->dev, 246 dev_err(&port->dev,
247 "%s - failed resubmitting read urb, error %d\n", 247 "%s - failed resubmitting read urb, error %d\n",
248 __func__, result); 248 __func__, result);
249
250 return;
251} 249}
252 250
253static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, 251static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index d47b56e9e8ce..7481ff8a49e4 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -620,8 +620,6 @@ static void sierra_indat_callback(struct urb *urb)
620 dev_err(&port->dev, "resubmit read urb failed." 620 dev_err(&port->dev, "resubmit read urb failed."
621 "(%d)\n", err); 621 "(%d)\n", err);
622 } 622 }
623
624 return;
625} 623}
626 624
627static void sierra_instat_callback(struct urb *urb) 625static void sierra_instat_callback(struct urb *urb)
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 329d311a35d9..765aa983bf58 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -441,7 +441,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
441 spcp8x5_set_workMode(serial->dev, 0x000a, 441 spcp8x5_set_workMode(serial->dev, 0x000a,
442 SET_WORKING_MODE_U2C, priv->type); 442 SET_WORKING_MODE_U2C, priv->type);
443 } 443 }
444 return;
445} 444}
446 445
447/* open the serial port. do some usb system call. set termios and get the line 446/* open the serial port. do some usb system call. set termios and get the line
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 0c70b4a621bb..fbc946797801 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -234,7 +234,6 @@ static void usb_wwan_indat_callback(struct urb *urb)
234 } 234 }
235 235
236 } 236 }
237 return;
238} 237}
239 238
240static void usb_wwan_outdat_callback(struct urb *urb) 239static void usb_wwan_outdat_callback(struct urb *urb)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 12ed8209ca72..3f9ac88d588c 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -655,8 +655,6 @@ static void whiteheat_release(struct usb_serial *serial)
655 } 655 }
656 kfree(info); 656 kfree(info);
657 } 657 }
658
659 return;
660} 658}
661 659
662static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port) 660static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -955,8 +953,6 @@ static void whiteheat_throttle(struct tty_struct *tty)
955 spin_lock_irq(&info->lock); 953 spin_lock_irq(&info->lock);
956 info->flags |= THROTTLED; 954 info->flags |= THROTTLED;
957 spin_unlock_irq(&info->lock); 955 spin_unlock_irq(&info->lock);
958
959 return;
960} 956}
961 957
962 958
@@ -975,8 +971,6 @@ static void whiteheat_unthrottle(struct tty_struct *tty)
975 971
976 if (actually_throttled) 972 if (actually_throttled)
977 rx_data_softint(&info->rx_work); 973 rx_data_softint(&info->rx_work);
978
979 return;
980} 974}
981 975
982 976
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index f2767cf2e229..49a489e03716 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -36,7 +36,7 @@ config USB_STORAGE_DATAFAB
36 depends on USB_STORAGE 36 depends on USB_STORAGE
37 help 37 help
38 Support for certain Datafab CompactFlash readers. 38 Support for certain Datafab CompactFlash readers.
39 Datafab has a web page at <http://www.datafabusa.com/>. 39 Datafab has a web page at <http://www.datafab.com/>.
40 40
41 If this driver is compiled as a module, it will be named ums-datafab. 41 If this driver is compiled as a module, it will be named ums-datafab.
42 42
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8b31fdfefc98..dc06ff134559 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -49,7 +49,7 @@ menuconfig FB
49 You need an utility program called fbset to make full use of frame 49 You need an utility program called fbset to make full use of frame
50 buffer devices. Please read <file:Documentation/fb/framebuffer.txt> 50 buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
51 and the Framebuffer-HOWTO at 51 and the Framebuffer-HOWTO at
52 <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.2.html> for more 52 <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
53 information. 53 information.
54 54
55 Say Y here and to the driver for your graphics board below if you 55 Say Y here and to the driver for your graphics board below if you
@@ -955,7 +955,7 @@ config FB_EPSON1355
955 Build in support for the SED1355 Epson Research Embedded RAMDAC 955 Build in support for the SED1355 Epson Research Embedded RAMDAC
956 LCD/CRT Controller (since redesignated as the S1D13505) as a 956 LCD/CRT Controller (since redesignated as the S1D13505) as a
957 framebuffer. Product specs at 957 framebuffer. Product specs at
958 <http://www.erd.epson.com/vdc/html/products.htm>. 958 <http://vdc.epson.com/>.
959 959
960config FB_S1D13XXX 960config FB_S1D13XXX
961 tristate "Epson S1D13XXX framebuffer support" 961 tristate "Epson S1D13XXX framebuffer support"
@@ -966,7 +966,7 @@ config FB_S1D13XXX
966 help 966 help
967 Support for S1D13XXX framebuffer device family (currently only 967 Support for S1D13XXX framebuffer device family (currently only
968 working with S1D13806). Product specs at 968 working with S1D13806). Product specs at
969 <http://www.erd.epson.com/vdc/html/legacy_13xxx.htm> 969 <http://vdc.epson.com/>
970 970
971config FB_ATMEL 971config FB_ATMEL
972 tristate "AT91/AT32 LCD Controller support" 972 tristate "AT91/AT32 LCD Controller support"
@@ -1323,7 +1323,7 @@ config FB_RADEON
1323 don't need to choose this to run the Radeon in plain VGA mode. 1323 don't need to choose this to run the Radeon in plain VGA mode.
1324 1324
1325 There is a product page at 1325 There is a product page at
1326 http://apps.ati.com/ATIcompare/ 1326 http://products.amd.com/en-us/GraphicCardResult.aspx
1327 1327
1328config FB_RADEON_I2C 1328config FB_RADEON_I2C
1329 bool "DDC/I2C for ATI Radeon support" 1329 bool "DDC/I2C for ATI Radeon support"
@@ -1395,7 +1395,7 @@ config FB_ATY_CT
1395 Say Y here to support use of ATI's 64-bit Rage boards (or other 1395 Say Y here to support use of ATI's 64-bit Rage boards (or other
1396 boards based on the Mach64 CT, VT, GT, and LT chipsets) as a 1396 boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
1397 framebuffer device. The ATI product support page for these boards 1397 framebuffer device. The ATI product support page for these boards
1398 is at <http://support.ati.com/products/pc/mach64/>. 1398 is at <http://support.ati.com/products/pc/mach64/mach64.html>.
1399 1399
1400config FB_ATY_GENERIC_LCD 1400config FB_ATY_GENERIC_LCD
1401 bool "Mach64 generic LCD support (EXPERIMENTAL)" 1401 bool "Mach64 generic LCD support (EXPERIMENTAL)"
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index f3d7440f0072..3ec4923c2d84 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -2,7 +2,6 @@
2 * linux/drivers/video/arcfb.c -- FB driver for Arc monochrome LCD board 2 * linux/drivers/video/arcfb.c -- FB driver for Arc monochrome LCD board
3 * 3 *
4 * Copyright (C) 2005, Jaya Kumar <jayalk@intworks.biz> 4 * Copyright (C) 2005, Jaya Kumar <jayalk@intworks.biz>
5 * http://www.intworks.biz/arclcd
6 * 5 *
7 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for 7 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 359fc64e761a..78d1f4cd1fe0 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -7,7 +7,6 @@
7 7
8 8
9#include <linux/i2c.h> 9#include <linux/i2c.h>
10#include <linux/i2c-id.h>
11#include <linux/i2c-algo-bit.h> 10#include <linux/i2c-algo-bit.h>
12 11
13#include <asm/io.h> 12#include <asm/io.h>
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index b020ba7f1cf2..e7d0f525041e 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -241,12 +241,12 @@ static int request_ports(struct bfin_bf54xfb_info *fbi)
241 u16 disp = fbi->mach_info->disp; 241 u16 disp = fbi->mach_info->disp;
242 242
243 if (gpio_request(disp, DRIVER_NAME)) { 243 if (gpio_request(disp, DRIVER_NAME)) {
244 printk(KERN_ERR "Requesting GPIO %d faild\n", disp); 244 printk(KERN_ERR "Requesting GPIO %d failed\n", disp);
245 return -EFAULT; 245 return -EFAULT;
246 } 246 }
247 247
248 if (peripheral_request_list(eppi_req_18, DRIVER_NAME)) { 248 if (peripheral_request_list(eppi_req_18, DRIVER_NAME)) {
249 printk(KERN_ERR "Requesting Peripherals faild\n"); 249 printk(KERN_ERR "Requesting Peripherals failed\n");
250 gpio_free(disp); 250 gpio_free(disp);
251 return -EFAULT; 251 return -EFAULT;
252 } 252 }
@@ -256,7 +256,7 @@ static int request_ports(struct bfin_bf54xfb_info *fbi)
256 u16 eppi_req_24[] = EPPI0_24; 256 u16 eppi_req_24[] = EPPI0_24;
257 257
258 if (peripheral_request_list(eppi_req_24, DRIVER_NAME)) { 258 if (peripheral_request_list(eppi_req_24, DRIVER_NAME)) {
259 printk(KERN_ERR "Requesting Peripherals faild\n"); 259 printk(KERN_ERR "Requesting Peripherals failed\n");
260 peripheral_free_list(eppi_req_18); 260 peripheral_free_list(eppi_req_18);
261 gpio_free(disp); 261 gpio_free(disp);
262 return -EFAULT; 262 return -EFAULT;
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 7a50272eaab9..3cf77676947c 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -192,7 +192,7 @@ static int bfin_t350mcqb_request_ports(int action)
192{ 192{
193 if (action) { 193 if (action) {
194 if (peripheral_request_list(ppi0_req_8, DRIVER_NAME)) { 194 if (peripheral_request_list(ppi0_req_8, DRIVER_NAME)) {
195 printk(KERN_ERR "Requesting Peripherals faild\n"); 195 printk(KERN_ERR "Requesting Peripherals failed\n");
196 return -EFAULT; 196 return -EFAULT;
197 } 197 }
198 } else 198 } else
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index db9713b49ce9..a268cbf1cbea 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -4,7 +4,7 @@
4 * Epson Research S1D13505 Embedded RAMDAC LCD/CRT Controller 4 * Epson Research S1D13505 Embedded RAMDAC LCD/CRT Controller
5 * (previously known as SED1355) 5 * (previously known as SED1355)
6 * 6 *
7 * Cf. http://www.erd.epson.com/vdc/html/S1D13505.html 7 * Cf. http://vdc.epson.com/
8 * 8 *
9 * 9 *
10 * Copyright (C) Hewlett-Packard Company. All rights reserved. 10 * Copyright (C) Hewlett-Packard Company. All rights reserved.
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index 7293eaccd81b..7cb715dfc0e1 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Based from the VESA(TM) Coordinated Video Timing Generator by 6 * Based from the VESA(TM) Coordinated Video Timing Generator by
7 * Graham Loveridge April 9, 2003 available at 7 * Graham Loveridge April 9, 2003 available at
8 * http://www.vesa.org/public/CVT/CVTd6r1.xls 8 * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive 11 * License. See the file COPYING in the main directory of this archive
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 328ae6c673ec..f37de60ecc59 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -17,7 +17,6 @@
17#include <linux/agp_backend.h> 17#include <linux/agp_backend.h>
18#include <linux/fb.h> 18#include <linux/fb.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/i2c-id.h>
21#include <linux/i2c-algo-bit.h> 20#include <linux/i2c-algo-bit.h>
22#include <video/vga.h> 21#include <video/vga.h>
23 22
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 487f2be47460..3300bd31d9d7 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -32,7 +32,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
32#include <linux/fb.h> 32#include <linux/fb.h>
33 33
34#include <linux/i2c.h> 34#include <linux/i2c.h>
35#include <linux/i2c-id.h>
36#include <linux/i2c-algo-bit.h> 35#include <linux/i2c-algo-bit.h>
37 36
38#include <asm/io.h> 37#include <asm/io.h>
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index 9b3d6e4584cc..63ed3b72b01c 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -10,7 +10,7 @@
10 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. 10 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
11 * 11 *
12 * This work was made possible by help and equipment support from E-Ink 12 * This work was made possible by help and equipment support from E-Ink
13 * Corporation. http://support.eink.com/community 13 * Corporation. http://www.eink.com/
14 * 14 *
15 * This driver is written to be used with the Metronome display controller. 15 * This driver is written to be used with the Metronome display controller.
16 * It is intended to be architecture independent. A board specific driver 16 * It is intended to be architecture independent. A board specific driver
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 8bfdfc3c5234..e4c3f214eb8e 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -13,7 +13,6 @@
13#define __SAVAGEFB_H__ 13#define __SAVAGEFB_H__
14 14
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/i2c-id.h>
17#include <linux/i2c-algo-bit.h> 16#include <linux/i2c-algo-bit.h>
18#include <linux/mutex.h> 17#include <linux/mutex.h>
19#include <video/vga.h> 18#include <video/vga.h>
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 090aa1a9be6e..6a069d047914 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -253,7 +253,7 @@ static int __init vesafb_probe(struct platform_device *dev)
253 size_vmode = vesafb_defined.yres * vesafb_fix.line_length; 253 size_vmode = vesafb_defined.yres * vesafb_fix.line_length;
254 254
255 /* size_total -- all video memory we have. Used for mtrr 255 /* size_total -- all video memory we have. Used for mtrr
256 * entries, ressource allocation and bounds 256 * entries, resource allocation and bounds
257 * checking. */ 257 * checking. */
258 size_total = screen_info.lfb_size * 65536; 258 size_total = screen_info.lfb_size * 65536;
259 if (vram_total) 259 if (vram_total)
diff --git a/firmware/keyspan_pda/keyspan_pda.S b/firmware/keyspan_pda/keyspan_pda.S
index 418fe69aa5e0..f3acc197a5ef 100644
--- a/firmware/keyspan_pda/keyspan_pda.S
+++ b/firmware/keyspan_pda/keyspan_pda.S
@@ -74,7 +74,7 @@
74 * recognizes the new device ID and glues it to the real serial driver code. 74 * recognizes the new device ID and glues it to the real serial driver code.
75 * 75 *
76 * USEFUL DOCS: 76 * USEFUL DOCS:
77 * EzUSB Technical Reference Manual: <http://www.anchorchips.com> 77 * EzUSB Technical Reference Manual: <http://www.cypress.com/>
78 * 8051 manuals: everywhere, but try www.dalsemi.com because the EzUSB is 78 * 8051 manuals: everywhere, but try www.dalsemi.com because the EzUSB is
79 * basically the Dallas enhanced 8051 code. Remember that the EzUSB IO ports 79 * basically the Dallas enhanced 8051 code. Remember that the EzUSB IO ports
80 * use totally different registers! 80 * use totally different registers!
diff --git a/firmware/keyspan_pda/xircom_pgs.S b/firmware/keyspan_pda/xircom_pgs.S
index 05d99dd63776..0b79bbf0ae15 100644
--- a/firmware/keyspan_pda/xircom_pgs.S
+++ b/firmware/keyspan_pda/xircom_pgs.S
@@ -74,7 +74,7 @@
74 * recognizes the new device ID and glues it to the real serial driver code. 74 * recognizes the new device ID and glues it to the real serial driver code.
75 * 75 *
76 * USEFUL DOCS: 76 * USEFUL DOCS:
77 * EzUSB Technical Reference Manual: <http://www.anchorchips.com> 77 * EzUSB Technical Reference Manual: <http://www.cypress.com/>
78 * 8051 manuals: everywhere, but try www.dalsemi.com because the EzUSB is 78 * 8051 manuals: everywhere, but try www.dalsemi.com because the EzUSB is
79 * basically the Dallas enhanced 8051 code. Remember that the EzUSB IO ports 79 * basically the Dallas enhanced 8051 code. Remember that the EzUSB IO ports
80 * use totally different registers! 80 * use totally different registers!
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 0032a9f5a3a9..40186b959429 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -477,7 +477,7 @@ ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
477static inline struct ecryptfs_file_info * 477static inline struct ecryptfs_file_info *
478ecryptfs_file_to_private(struct file *file) 478ecryptfs_file_to_private(struct file *file)
479{ 479{
480 return (struct ecryptfs_file_info *)file->private_data; 480 return file->private_data;
481} 481}
482 482
483static inline void 483static inline void
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 3eadd97324b1..44602754f758 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -511,7 +511,7 @@ static int write_exec(struct page_collect *pcol)
511 511
512 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL); 512 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
513 if (!pcol_copy) { 513 if (!pcol_copy) {
514 EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n"); 514 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
515 ret = -ENOMEM; 515 ret = -ENOMEM;
516 goto err; 516 goto err;
517 } 517 }
@@ -527,7 +527,7 @@ static int write_exec(struct page_collect *pcol)
527 527
528 ret = exofs_oi_write(oi, ios); 528 ret = exofs_oi_write(oi, ios);
529 if (unlikely(ret)) { 529 if (unlikely(ret)) {
530 EXOFS_ERR("write_exec: exofs_oi_write() Faild\n"); 530 EXOFS_ERR("write_exec: exofs_oi_write() Failed\n");
531 goto err; 531 goto err;
532 } 532 }
533 533
@@ -628,7 +628,7 @@ try_again:
628 /* split the request, next loop will start again */ 628 /* split the request, next loop will start again */
629 ret = write_exec(pcol); 629 ret = write_exec(pcol);
630 if (unlikely(ret)) { 630 if (unlikely(ret)) {
631 EXOFS_DBGMSG("write_exec faild => %d", ret); 631 EXOFS_DBGMSG("write_exec failed => %d", ret);
632 goto fail; 632 goto fail;
633 } 633 }
634 634
@@ -719,7 +719,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
719 ret = simple_write_begin(file, mapping, pos, len, flags, pagep, 719 ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
720 fsdata); 720 fsdata);
721 if (ret) { 721 if (ret) {
722 EXOFS_DBGMSG("simple_write_begin faild\n"); 722 EXOFS_DBGMSG("simple_write_begin failed\n");
723 goto out; 723 goto out;
724 } 724 }
725 725
@@ -732,7 +732,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
732 if (ret) { 732 if (ret) {
733 /*SetPageError was done by _readpage. Is it ok?*/ 733 /*SetPageError was done by _readpage. Is it ok?*/
734 unlock_page(page); 734 unlock_page(page);
735 EXOFS_DBGMSG("__readpage_filler faild\n"); 735 EXOFS_DBGMSG("__readpage_filler failed\n");
736 } 736 }
737 } 737 }
738out: 738out:
@@ -1095,7 +1095,7 @@ static void create_done(struct exofs_io_state *ios, void *p)
1095 atomic_dec(&sbi->s_curr_pending); 1095 atomic_dec(&sbi->s_curr_pending);
1096 1096
1097 if (unlikely(ret)) { 1097 if (unlikely(ret)) {
1098 EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx", 1098 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1099 _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid)); 1099 _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
1100 /*TODO: When FS is corrupted creation can fail, object already 1100 /*TODO: When FS is corrupted creation can fail, object already
1101 * exist. Get rid of this asynchronous creation, if exist 1101 * exist. Get rid of this asynchronous creation, if exist
@@ -1215,7 +1215,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
1215 1215
1216 args = kzalloc(sizeof(*args), GFP_KERNEL); 1216 args = kzalloc(sizeof(*args), GFP_KERNEL);
1217 if (!args) { 1217 if (!args) {
1218 EXOFS_DBGMSG("Faild kzalloc of args\n"); 1218 EXOFS_DBGMSG("Failed kzalloc of args\n");
1219 return -ENOMEM; 1219 return -ENOMEM;
1220 } 1220 }
1221 1221
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index 6550bf70e41d..f74a2ec027a6 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -55,7 +55,7 @@ int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
55 55
56 ret = osd_finalize_request(or, 0, cred, NULL); 56 ret = osd_finalize_request(or, 0, cred, NULL);
57 if (unlikely(ret)) { 57 if (unlikely(ret)) {
58 EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret); 58 EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
59 goto out; 59 goto out;
60 } 60 }
61 61
@@ -79,7 +79,7 @@ int exofs_get_io_state(struct exofs_layout *layout,
79 */ 79 */
80 ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL); 80 ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
81 if (unlikely(!ios)) { 81 if (unlikely(!ios)) {
82 EXOFS_DBGMSG("Faild kzalloc bytes=%d\n", 82 EXOFS_DBGMSG("Failed kzalloc bytes=%d\n",
83 exofs_io_state_size(layout->s_numdevs)); 83 exofs_io_state_size(layout->s_numdevs));
84 *pios = NULL; 84 *pios = NULL;
85 return -ENOMEM; 85 return -ENOMEM;
@@ -172,7 +172,7 @@ static int exofs_io_execute(struct exofs_io_state *ios)
172 172
173 ret = osd_finalize_request(or, 0, ios->cred, NULL); 173 ret = osd_finalize_request(or, 0, ios->cred, NULL);
174 if (unlikely(ret)) { 174 if (unlikely(ret)) {
175 EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", 175 EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n",
176 ret); 176 ret);
177 return ret; 177 return ret;
178 } 178 }
@@ -361,7 +361,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
361 361
362 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); 362 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
363 if (unlikely(!per_dev->bio)) { 363 if (unlikely(!per_dev->bio)) {
364 EXOFS_DBGMSG("Faild to allocate BIO size=%u\n", 364 EXOFS_DBGMSG("Failed to allocate BIO size=%u\n",
365 bio_size); 365 bio_size);
366 return -ENOMEM; 366 return -ENOMEM;
367 } 367 }
@@ -564,7 +564,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
564 master_dev->bio->bi_max_vecs); 564 master_dev->bio->bi_max_vecs);
565 if (unlikely(!bio)) { 565 if (unlikely(!bio)) {
566 EXOFS_DBGMSG( 566 EXOFS_DBGMSG(
567 "Faild to allocate BIO size=%u\n", 567 "Failed to allocate BIO size=%u\n",
568 master_dev->bio->bi_max_vecs); 568 master_dev->bio->bi_max_vecs);
569 ret = -ENOMEM; 569 ret = -ENOMEM;
570 goto out; 570 goto out;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 940c96168868..533699c16040 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -458,7 +458,7 @@ failed_out:
458 * the same format as ext2_get_branch() would do. We are calling it after 458 * the same format as ext2_get_branch() would do. We are calling it after
459 * we had read the existing part of chain and partial points to the last 459 * we had read the existing part of chain and partial points to the last
460 * triple of that (one with zero ->key). Upon the exit we have the same 460 * triple of that (one with zero ->key). Upon the exit we have the same
461 * picture as after the successful ext2_get_block(), excpet that in one 461 * picture as after the successful ext2_get_block(), except that in one
462 * place chain is disconnected - *branch->p is still zero (we did not 462 * place chain is disconnected - *branch->p is still zero (we did not
463 * set the last link), but branch->key contains the number that should 463 * set the last link), but branch->key contains the number that should
464 * be placed into *branch->p to fill that gap. 464 * be placed into *branch->p to fill that gap.
@@ -662,7 +662,7 @@ static int ext2_get_blocks(struct inode *inode,
662 mutex_lock(&ei->truncate_mutex); 662 mutex_lock(&ei->truncate_mutex);
663 /* 663 /*
664 * If the indirect block is missing while we are reading 664 * If the indirect block is missing while we are reading
665 * the chain(ext3_get_branch() returns -EAGAIN err), or 665 * the chain(ext2_get_branch() returns -EAGAIN err), or
666 * if the chain has been changed after we grab the semaphore, 666 * if the chain has been changed after we grab the semaphore,
667 * (either because another process truncated this branch, or 667 * (either because another process truncated this branch, or
668 * another get_block allocated this branch) re-grab the chain to see if 668 * another get_block allocated this branch) re-grab the chain to see if
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 6bbd75c5589b..7c232c1487ee 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -28,12 +28,7 @@
28 * #define ATTR_KILL_SUID 2048 28 * #define ATTR_KILL_SUID 2048
29 * #define ATTR_KILL_SGID 4096 29 * #define ATTR_KILL_SGID 4096
30 * 30 *
31 * and this is because they were added in 2.5 development in this patch: 31 * and this is because they were added in 2.5 development.
32 *
33 * http://linux.bkbits.net:8080/linux-2.5/
34 * cset@3caf4a12k4XgDzK7wyK-TGpSZ9u2Ww?nav=index.html
35 * |src/.|src/include|src/include/linux|related/include/linux/fs.h
36 *
37 * Actually, they are not needed by most ->setattr() methods - they are set by 32 * Actually, they are not needed by most ->setattr() methods - they are set by
38 * callers of notify_change() to notify that the setuid/setgid bits must be 33 * callers of notify_change() to notify that the setuid/setgid bits must be
39 * dropped. 34 * dropped.
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index c51af2a14516..e1b8493b9aaa 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1010,15 +1010,13 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
1010 * option 2 - shutdown file systems 1010 * option 2 - shutdown file systems
1011 * associated with log ? 1011 * associated with log ?
1012 * option 3 - extend log ? 1012 * option 3 - extend log ?
1013 */
1014 /*
1015 * option 4 - second chance 1013 * option 4 - second chance
1016 * 1014 *
1017 * mark log wrapped, and continue. 1015 * mark log wrapped, and continue.
1018 * when all active transactions are completed, 1016 * when all active transactions are completed,
1019 * mark log vaild for recovery. 1017 * mark log valid for recovery.
1020 * if crashed during invalid state, log state 1018 * if crashed during invalid state, log state
1021 * implies invald log, forcing fsck(). 1019 * implies invalid log, forcing fsck().
1022 */ 1020 */
1023 /* mark log state log wrap in log superblock */ 1021 /* mark log state log wrap in log superblock */
1024 /* log->state = LOGWRAP; */ 1022 /* log->state = LOGWRAP; */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 7b698f2ec45a..9895595fd2f2 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -97,7 +97,7 @@ int jfs_mount(struct super_block *sb)
97 97
98 ipaimap = diReadSpecial(sb, AGGREGATE_I, 0); 98 ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
99 if (ipaimap == NULL) { 99 if (ipaimap == NULL) {
100 jfs_err("jfs_mount: Faild to read AGGREGATE_I"); 100 jfs_err("jfs_mount: Failed to read AGGREGATE_I");
101 rc = -EIO; 101 rc = -EIO;
102 goto errout20; 102 goto errout20;
103 } 103 }
@@ -148,7 +148,7 @@ int jfs_mount(struct super_block *sb)
148 if ((sbi->mntflag & JFS_BAD_SAIT) == 0) { 148 if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
149 ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1); 149 ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
150 if (!ipaimap2) { 150 if (!ipaimap2) {
151 jfs_err("jfs_mount: Faild to read AGGREGATE_I"); 151 jfs_err("jfs_mount: Failed to read AGGREGATE_I");
152 rc = -EIO; 152 rc = -EIO;
153 goto errout35; 153 goto errout35;
154 } 154 }
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 96fa7ebc530c..15fdbdf9eb4b 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -129,7 +129,7 @@ struct o2net_node {
129 129
130struct o2net_sock_container { 130struct o2net_sock_container {
131 struct kref sc_kref; 131 struct kref sc_kref;
132 /* the next two are vaild for the life time of the sc */ 132 /* the next two are valid for the life time of the sc */
133 struct socket *sc_sock; 133 struct socket *sc_sock;
134 struct o2nm_node *sc_node; 134 struct o2nm_node *sc_node;
135 135
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 5bf8a04b5d9b..789c625c7aa5 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -5,7 +5,7 @@
5 * Copyright (c) 2001-2007 Anton Altaparmakov 5 * Copyright (c) 2001-2007 Anton Altaparmakov
6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> 6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
7 * 7 *
8 * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/ 8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it under 10 * This program is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU General Public License as published by the Free Software 11 * the terms of the GNU General Public License as published by the Free Software
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
index d1fb50b28d86..374242c0971a 100644
--- a/fs/partitions/ldm.h
+++ b/fs/partitions/ldm.h
@@ -5,7 +5,7 @@
5 * Copyright (c) 2001-2007 Anton Altaparmakov 5 * Copyright (c) 2001-2007 Anton Altaparmakov
6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> 6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
7 * 7 *
8 * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/ 8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
index 513f431038f9..7cd46666ba2c 100644
--- a/fs/reiserfs/Kconfig
+++ b/fs/reiserfs/Kconfig
@@ -10,7 +10,8 @@ config REISERFS_FS
10 10
11 In general, ReiserFS is as fast as ext2, but is very efficient with 11 In general, ReiserFS is as fast as ext2, but is very efficient with
12 large directories and small files. Additional patches are needed 12 large directories and small files. Additional patches are needed
13 for NFS and quotas, please see <http://www.namesys.com/> for links. 13 for NFS and quotas, please see
14 <https://reiser4.wiki.kernel.org/index.php/Main_Page> for links.
14 15
15 It is more easily extended to have features currently found in 16 It is more easily extended to have features currently found in
16 database and keyword search systems than block allocation based file 17 database and keyword search systems than block allocation based file
@@ -18,7 +19,8 @@ config REISERFS_FS
18 plugins consistent with our motto ``It takes more than a license to 19 plugins consistent with our motto ``It takes more than a license to
19 make source code open.'' 20 make source code open.''
20 21
21 Read <http://www.namesys.com/> to learn more about reiserfs. 22 Read <https://reiser4.wiki.kernel.org/index.php/Main_Page>
23 to learn more about reiserfs.
22 24
23 Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com. 25 Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
24 26
diff --git a/fs/reiserfs/README b/fs/reiserfs/README
index 14e8c9d460e5..e2f7a264e3ff 100644
--- a/fs/reiserfs/README
+++ b/fs/reiserfs/README
@@ -43,7 +43,7 @@ to address the fair crediting issue in the next GPL version.)
43[END LICENSING] 43[END LICENSING]
44 44
45Reiserfs is a file system based on balanced tree algorithms, which is 45Reiserfs is a file system based on balanced tree algorithms, which is
46described at http://devlinux.com/namesys. 46described at https://reiser4.wiki.kernel.org/index.php/Main_Page
47 47
48Stop reading here. Go there, then return. 48Stop reading here. Go there, then return.
49 49
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e1f437be6c3c..0e7cb1395a94 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -131,7 +131,7 @@ Eoverflow:
131 */ 131 */
132ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) 132ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
133{ 133{
134 struct seq_file *m = (struct seq_file *)file->private_data; 134 struct seq_file *m = file->private_data;
135 size_t copied = 0; 135 size_t copied = 0;
136 loff_t pos; 136 loff_t pos;
137 size_t n; 137 size_t n;
@@ -280,7 +280,7 @@ EXPORT_SYMBOL(seq_read);
280 */ 280 */
281loff_t seq_lseek(struct file *file, loff_t offset, int origin) 281loff_t seq_lseek(struct file *file, loff_t offset, int origin)
282{ 282{
283 struct seq_file *m = (struct seq_file *)file->private_data; 283 struct seq_file *m = file->private_data;
284 loff_t retval = -EINVAL; 284 loff_t retval = -EINVAL;
285 285
286 mutex_lock(&m->lock); 286 mutex_lock(&m->lock);
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(seq_lseek);
324 */ 324 */
325int seq_release(struct inode *inode, struct file *file) 325int seq_release(struct inode *inode, struct file *file)
326{ 326{
327 struct seq_file *m = (struct seq_file *)file->private_data; 327 struct seq_file *m = file->private_data;
328 kfree(m->buf); 328 kfree(m->buf);
329 kfree(m); 329 kfree(m);
330 return 0; 330 return 0;
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1c96b255017c..ba98918bbd9b 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -1,5 +1,12 @@
1/* 1/*
2 * Software async crypto daemon 2 * Software async crypto daemon
3 *
4 * Added AEAD support to cryptd.
5 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
6 * Adrian Hoban <adrian.hoban@intel.com>
7 * Gabriele Paoloni <gabriele.paoloni@intel.com>
8 * Aidan O'Mahony (aidan.o.mahony@intel.com)
9 * Copyright (c) 2010, Intel Corporation.
3 */ 10 */
4 11
5#ifndef _CRYPTO_CRYPT_H 12#ifndef _CRYPTO_CRYPT_H
@@ -42,4 +49,21 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
42struct shash_desc *cryptd_shash_desc(struct ahash_request *req); 49struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
43void cryptd_free_ahash(struct cryptd_ahash *tfm); 50void cryptd_free_ahash(struct cryptd_ahash *tfm);
44 51
52struct cryptd_aead {
53 struct crypto_aead base;
54};
55
56static inline struct cryptd_aead *__cryptd_aead_cast(
57 struct crypto_aead *tfm)
58{
59 return (struct cryptd_aead *)tfm;
60}
61
62struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
63 u32 type, u32 mask);
64
65struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
66
67void cryptd_free_aead(struct cryptd_aead *tfm);
68
45#endif 69#endif
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 4086b8ebfafe..da2530e34b26 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -54,8 +54,8 @@
54 54
55/* Comment by Rik: 55/* Comment by Rik:
56 * 56 *
57 * For some background on GF(2^128) see for example: http://- 57 * For some background on GF(2^128) see for example:
58 * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf 58 * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
59 * 59 *
60 * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can 60 * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
61 * be mapped to computer memory in a variety of ways. Let's examine 61 * be mapped to computer memory in a variety of ways. Let's examine
diff --git a/include/linux/fdreg.h b/include/linux/fdreg.h
index c2eeb63b72db..61ce64169004 100644
--- a/include/linux/fdreg.h
+++ b/include/linux/fdreg.h
@@ -89,7 +89,7 @@
89/* the following commands are new in the 82078. They are not used in the 89/* the following commands are new in the 82078. They are not used in the
90 * floppy driver, except the first three. These commands may be useful for apps 90 * floppy driver, except the first three. These commands may be useful for apps
91 * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at 91 * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at
92 * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */ 92 * http://www.intel.com/design/archives/periphrl/docs/29046803.htm */
93 93
94#define FD_PARTID 0x18 /* part id ("extended" version cmd) */ 94#define FD_PARTID 0x18 /* part id ("extended" version cmd) */
95#define FD_SAVE 0x2e /* save fdc regs for later restore */ 95#define FD_SAVE 0x2e /* save fdc regs for later restore */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 42a0f1d11365..bb0f56f5c01e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -316,6 +316,7 @@ struct hid_item {
316#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 316#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
317#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 317#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
318#define HID_QUIRK_NO_IGNORE 0x40000000 318#define HID_QUIRK_NO_IGNORE 0x40000000
319#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
319 320
320/* 321/*
321 * This is the global environment of the parser. This information is 322 * This is the global environment of the parser. This information is
@@ -626,8 +627,8 @@ struct hid_driver {
626 int (*event)(struct hid_device *hdev, struct hid_field *field, 627 int (*event)(struct hid_device *hdev, struct hid_field *field,
627 struct hid_usage *usage, __s32 value); 628 struct hid_usage *usage, __s32 value);
628 629
629 void (*report_fixup)(struct hid_device *hdev, __u8 *buf, 630 __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
630 unsigned int size); 631 unsigned int *size);
631 632
632 int (*input_mapping)(struct hid_device *hdev, 633 int (*input_mapping)(struct hid_device *hdev,
633 struct hid_input *hidinput, struct hid_field *field, 634 struct hid_input *hidinput, struct hid_field *field,
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
index bb6f58baf319..a3f481a3063b 100644
--- a/include/linux/hiddev.h
+++ b/include/linux/hiddev.h
@@ -226,8 +226,6 @@ void hiddev_disconnect(struct hid_device *);
226void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, 226void hiddev_hid_event(struct hid_device *hid, struct hid_field *field,
227 struct hid_usage *usage, __s32 value); 227 struct hid_usage *usage, __s32 value);
228void hiddev_report_event(struct hid_device *hid, struct hid_report *report); 228void hiddev_report_event(struct hid_device *hid, struct hid_report *report);
229int __init hiddev_init(void);
230void hiddev_exit(void);
231#else 229#else
232static inline int hiddev_connect(struct hid_device *hid, 230static inline int hiddev_connect(struct hid_device *hid,
233 unsigned int force) 231 unsigned int force)
@@ -236,8 +234,6 @@ static inline void hiddev_disconnect(struct hid_device *hid) { }
236static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, 234static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field,
237 struct hid_usage *usage, __s32 value) { } 235 struct hid_usage *usage, __s32 value) { }
238static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } 236static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { }
239static inline int hiddev_init(void) { return 0; }
240static inline void hiddev_exit(void) { }
241#endif 237#endif
242 238
243#endif 239#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 4bae0b72ed3c..1f66fa06a97c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -384,11 +384,15 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
384 dev_set_drvdata(&dev->dev, data); 384 dev_set_drvdata(&dev->dev, data);
385} 385}
386 386
387static inline int i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) 387static inline struct i2c_adapter *
388i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
388{ 389{
389 return adapter->dev.parent != NULL 390 struct device *parent = adapter->dev.parent;
390 && adapter->dev.parent->bus == &i2c_bus_type 391
391 && adapter->dev.parent->type == &i2c_adapter_type; 392 if (parent != NULL && parent->type == &i2c_adapter_type)
393 return to_i2c_adapter(parent);
394 else
395 return NULL;
392} 396}
393 397
394/* Adapter locking functions, exported for shared pin cases */ 398/* Adapter locking functions, exported for shared pin cases */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index cdb715e58e3e..928ae712709f 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -117,10 +117,13 @@ void idr_init(struct idr *idp);
117/* 117/*
118 * IDA - IDR based id allocator, use when translation from id to 118 * IDA - IDR based id allocator, use when translation from id to
119 * pointer isn't necessary. 119 * pointer isn't necessary.
120 *
121 * IDA_BITMAP_LONGS is calculated to be one less to accommodate
122 * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
120 */ 123 */
121#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ 124#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
122#define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) 125#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
123#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) 126#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
124 127
125struct ida_bitmap { 128struct ida_bitmap {
126 long nr_busy; 129 long nr_busy;
diff --git a/include/linux/if_infiniband.h b/include/linux/if_infiniband.h
index 3e659ec7dfdd..7d958475d4ac 100644
--- a/include/linux/if_infiniband.h
+++ b/include/linux/if_infiniband.h
@@ -5,7 +5,7 @@
5 * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD 5 * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
6 * license, available in the LICENSE.TXT file accompanying this 6 * license, available in the LICENSE.TXT file accompanying this
7 * software. These details are also available at 7 * software. These details are also available at
8 * <http://openib.org/license.html>. 8 * <http://www.openfabrics.org/software_license.htm>.
9 * 9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 2a2f99fbcb16..ced1159fa4f2 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -116,7 +116,7 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
116/* A special ultra-optimized versions that knows they are hashing exactly 116/* A special ultra-optimized versions that knows they are hashing exactly
117 * 3, 2 or 1 word(s). 117 * 3, 2 or 1 word(s).
118 * 118 *
119 * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally 119 * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally
120 * done at the end is not done here. 120 * done at the end is not done here.
121 */ 121 */
122static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) 122static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 636fc381c897..919ae53adc5c 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -414,6 +414,14 @@ struct kvm_enable_cap {
414 __u8 pad[64]; 414 __u8 pad[64];
415}; 415};
416 416
417/* for KVM_PPC_GET_PVINFO */
418struct kvm_ppc_pvinfo {
419 /* out */
420 __u32 flags;
421 __u32 hcall[4];
422 __u8 pad[108];
423};
424
417#define KVMIO 0xAE 425#define KVMIO 0xAE
418 426
419/* 427/*
@@ -530,6 +538,8 @@ struct kvm_enable_cap {
530#ifdef __KVM_HAVE_XCRS 538#ifdef __KVM_HAVE_XCRS
531#define KVM_CAP_XCRS 56 539#define KVM_CAP_XCRS 56
532#endif 540#endif
541#define KVM_CAP_PPC_GET_PVINFO 57
542#define KVM_CAP_PPC_IRQ_LEVEL 58
533 543
534#ifdef KVM_CAP_IRQ_ROUTING 544#ifdef KVM_CAP_IRQ_ROUTING
535 545
@@ -664,6 +674,8 @@ struct kvm_clock_data {
664/* Available with KVM_CAP_PIT_STATE2 */ 674/* Available with KVM_CAP_PIT_STATE2 */
665#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) 675#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
666#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) 676#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
677/* Available with KVM_CAP_PPC_GET_PVINFO */
678#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
667 679
668/* 680/*
669 * ioctls for vcpu fds 681 * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ac740b26eb10..a0557422715e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -36,9 +36,10 @@
36#define KVM_REQ_PENDING_TIMER 5 36#define KVM_REQ_PENDING_TIMER 5
37#define KVM_REQ_UNHALT 6 37#define KVM_REQ_UNHALT 6
38#define KVM_REQ_MMU_SYNC 7 38#define KVM_REQ_MMU_SYNC 7
39#define KVM_REQ_KVMCLOCK_UPDATE 8 39#define KVM_REQ_CLOCK_UPDATE 8
40#define KVM_REQ_KICK 9 40#define KVM_REQ_KICK 9
41#define KVM_REQ_DEACTIVATE_FPU 10 41#define KVM_REQ_DEACTIVATE_FPU 10
42#define KVM_REQ_EVENT 11
42 43
43#define KVM_USERSPACE_IRQ_SOURCE_ID 0 44#define KVM_USERSPACE_IRQ_SOURCE_ID 0
44 45
@@ -289,6 +290,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
289void kvm_disable_largepages(void); 290void kvm_disable_largepages(void);
290void kvm_arch_flush_shadow(struct kvm *kvm); 291void kvm_arch_flush_shadow(struct kvm *kvm);
291 292
293int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
294 int nr_pages);
295
292struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 296struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
293unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 297unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
294void kvm_release_page_clean(struct page *page); 298void kvm_release_page_clean(struct page *page);
@@ -296,6 +300,8 @@ void kvm_release_page_dirty(struct page *page);
296void kvm_set_page_dirty(struct page *page); 300void kvm_set_page_dirty(struct page *page);
297void kvm_set_page_accessed(struct page *page); 301void kvm_set_page_accessed(struct page *page);
298 302
303pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
304pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
299pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 305pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
300pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 306pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
301 struct kvm_memory_slot *slot, gfn_t gfn); 307 struct kvm_memory_slot *slot, gfn_t gfn);
@@ -477,8 +483,7 @@ int kvm_deassign_device(struct kvm *kvm,
477 struct kvm_assigned_dev_kernel *assigned_dev); 483 struct kvm_assigned_dev_kernel *assigned_dev);
478#else /* CONFIG_IOMMU_API */ 484#else /* CONFIG_IOMMU_API */
479static inline int kvm_iommu_map_pages(struct kvm *kvm, 485static inline int kvm_iommu_map_pages(struct kvm *kvm,
480 gfn_t base_gfn, 486 struct kvm_memory_slot *slot)
481 unsigned long npages)
482{ 487{
483 return 0; 488 return 0;
484} 489}
@@ -518,11 +523,22 @@ static inline void kvm_guest_exit(void)
518 current->flags &= ~PF_VCPU; 523 current->flags &= ~PF_VCPU;
519} 524}
520 525
526static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
527 gfn_t gfn)
528{
529 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
530}
531
521static inline gpa_t gfn_to_gpa(gfn_t gfn) 532static inline gpa_t gfn_to_gpa(gfn_t gfn)
522{ 533{
523 return (gpa_t)gfn << PAGE_SHIFT; 534 return (gpa_t)gfn << PAGE_SHIFT;
524} 535}
525 536
537static inline gfn_t gpa_to_gfn(gpa_t gpa)
538{
539 return (gfn_t)(gpa >> PAGE_SHIFT);
540}
541
526static inline hpa_t pfn_to_hpa(pfn_t pfn) 542static inline hpa_t pfn_to_hpa(pfn_t pfn)
527{ 543{
528 return (hpa_t)pfn << PAGE_SHIFT; 544 return (hpa_t)pfn << PAGE_SHIFT;
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index d73109243fda..47a070b0520e 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -17,6 +17,8 @@
17 17
18#define KVM_HC_VAPIC_POLL_IRQ 1 18#define KVM_HC_VAPIC_POLL_IRQ 1
19#define KVM_HC_MMU_OP 2 19#define KVM_HC_MMU_OP 2
20#define KVM_HC_FEATURES 3
21#define KVM_HC_PPC_MAP_MAGIC_PAGE 4
20 22
21/* 23/*
22 * hypercalls use architecture specific 24 * hypercalls use architecture specific
@@ -24,11 +26,6 @@
24#include <asm/kvm_para.h> 26#include <asm/kvm_para.h>
25 27
26#ifdef __KERNEL__ 28#ifdef __KERNEL__
27#ifdef CONFIG_KVM_GUEST
28void __init kvm_guest_init(void);
29#else
30#define kvm_guest_init() do { } while (0)
31#endif
32 29
33static inline int kvm_para_has_feature(unsigned int feature) 30static inline int kvm_para_has_feature(unsigned int feature)
34{ 31{
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
index de24af79ebd3..54b8e0d8d916 100644
--- a/include/linux/n_r3964.h
+++ b/include/linux/n_r3964.h
@@ -4,7 +4,6 @@
4 * Copyright by 4 * Copyright by
5 * Philips Automation Projects 5 * Philips Automation Projects
6 * Kassel (Germany) 6 * Kassel (Germany)
7 * http://www.pap-philips.de
8 * ----------------------------------------------------------- 7 * -----------------------------------------------------------
9 * This software may be used and distributed according to the terms of 8 * This software may be used and distributed according to the terms of
10 * the GNU General Public License, incorporated herein by reference. 9 * the GNU General Public License, incorporated herein by reference.
diff --git a/include/linux/padata.h b/include/linux/padata.h
index bdcd1e9eacea..4633b2f726b6 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -127,8 +127,8 @@ struct padata_cpumask {
127 */ 127 */
128struct parallel_data { 128struct parallel_data {
129 struct padata_instance *pinst; 129 struct padata_instance *pinst;
130 struct padata_parallel_queue *pqueue; 130 struct padata_parallel_queue __percpu *pqueue;
131 struct padata_serial_queue *squeue; 131 struct padata_serial_queue __percpu *squeue;
132 atomic_t seq_nr; 132 atomic_t seq_nr;
133 atomic_t reorder_objects; 133 atomic_t reorder_objects;
134 atomic_t refcnt; 134 atomic_t refcnt;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 90c038c0ad96..d278dd9cb765 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -820,7 +820,7 @@
820 820
821#define PCI_VENDOR_ID_ANIGMA 0x1051 821#define PCI_VENDOR_ID_ANIGMA 0x1051
822#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 822#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100
823 823
824#define PCI_VENDOR_ID_EFAR 0x1055 824#define PCI_VENDOR_ID_EFAR 0x1055
825#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 825#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130
826#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 826#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463
@@ -1451,7 +1451,7 @@
1451 1451
1452#define PCI_VENDOR_ID_ZIATECH 0x1138 1452#define PCI_VENDOR_ID_ZIATECH 0x1138
1453#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 1453#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
1454 1454
1455 1455
1456#define PCI_VENDOR_ID_SYSKONNECT 0x1148 1456#define PCI_VENDOR_ID_SYSKONNECT 0x1148
1457#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 1457#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200
@@ -1605,8 +1605,8 @@
1605#define PCI_DEVICE_ID_RP8OCTA 0x0005 1605#define PCI_DEVICE_ID_RP8OCTA 0x0005
1606#define PCI_DEVICE_ID_RP8J 0x0006 1606#define PCI_DEVICE_ID_RP8J 0x0006
1607#define PCI_DEVICE_ID_RP4J 0x0007 1607#define PCI_DEVICE_ID_RP4J 0x0007
1608#define PCI_DEVICE_ID_RP8SNI 0x0008 1608#define PCI_DEVICE_ID_RP8SNI 0x0008
1609#define PCI_DEVICE_ID_RP16SNI 0x0009 1609#define PCI_DEVICE_ID_RP16SNI 0x0009
1610#define PCI_DEVICE_ID_RPP4 0x000A 1610#define PCI_DEVICE_ID_RPP4 0x000A
1611#define PCI_DEVICE_ID_RPP8 0x000B 1611#define PCI_DEVICE_ID_RPP8 0x000B
1612#define PCI_DEVICE_ID_RP4M 0x000D 1612#define PCI_DEVICE_ID_RP4M 0x000D
@@ -1616,9 +1616,9 @@
1616#define PCI_DEVICE_ID_URP8INTF 0x0802 1616#define PCI_DEVICE_ID_URP8INTF 0x0802
1617#define PCI_DEVICE_ID_URP16INTF 0x0803 1617#define PCI_DEVICE_ID_URP16INTF 0x0803
1618#define PCI_DEVICE_ID_URP8OCTA 0x0805 1618#define PCI_DEVICE_ID_URP8OCTA 0x0805
1619#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C 1619#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
1620#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D 1620#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
1621#define PCI_DEVICE_ID_CRP16INTF 0x0903 1621#define PCI_DEVICE_ID_CRP16INTF 0x0903
1622 1622
1623#define PCI_VENDOR_ID_CYCLADES 0x120e 1623#define PCI_VENDOR_ID_CYCLADES 0x120e
1624#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 1624#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
@@ -2144,7 +2144,7 @@
2144#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 2144#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
2145 2145
2146#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 2146#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
2147#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 2147#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
2148 2148
2149#define PCI_VENDOR_ID_MELLANOX 0x15b3 2149#define PCI_VENDOR_ID_MELLANOX 0x15b3
2150#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 2150#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
@@ -2431,7 +2431,7 @@
2431#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 2431#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
2432#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 2432#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
2433#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 2433#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
2434#define PCI_DEVICE_ID_INTEL_7505_0 0x2550 2434#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
2435#define PCI_DEVICE_ID_INTEL_7205_0 0x255d 2435#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
2436#define PCI_DEVICE_ID_INTEL_82437 0x122d 2436#define PCI_DEVICE_ID_INTEL_82437 0x122d
2437#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e 2437#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
@@ -2634,6 +2634,9 @@
2634#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 2634#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
2635#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a 2635#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
2636#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e 2636#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
2637#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
2638#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
2639#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
2637#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b 2640#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
2638#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c 2641#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
2639#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 2642#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 9f63538928c0..e4f5ed180b9b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -87,7 +87,7 @@ struct kmem_cache {
87 unsigned long min_partial; 87 unsigned long min_partial;
88 const char *name; /* Name (only for display!) */ 88 const char *name; /* Name (only for display!) */
89 struct list_head list; /* List of slab caches */ 89 struct list_head list; /* List of slab caches */
90#ifdef CONFIG_SLUB_DEBUG 90#ifdef CONFIG_SYSFS
91 struct kobject kobj; /* For sysfs */ 91 struct kobject kobj; /* For sysfs */
92#endif 92#endif
93 93
@@ -96,11 +96,8 @@ struct kmem_cache {
96 * Defragmentation by allocating from a remote node. 96 * Defragmentation by allocating from a remote node.
97 */ 97 */
98 int remote_node_defrag_ratio; 98 int remote_node_defrag_ratio;
99 struct kmem_cache_node *node[MAX_NUMNODES];
100#else
101 /* Avoid an extra cache line for UP */
102 struct kmem_cache_node local_node;
103#endif 99#endif
100 struct kmem_cache_node *node[MAX_NUMNODES];
104}; 101};
105 102
106/* 103/*
@@ -139,19 +136,16 @@ struct kmem_cache {
139 136
140#ifdef CONFIG_ZONE_DMA 137#ifdef CONFIG_ZONE_DMA
141#define SLUB_DMA __GFP_DMA 138#define SLUB_DMA __GFP_DMA
142/* Reserve extra caches for potential DMA use */
143#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
144#else 139#else
145/* Disable DMA functionality */ 140/* Disable DMA functionality */
146#define SLUB_DMA (__force gfp_t)0 141#define SLUB_DMA (__force gfp_t)0
147#define KMALLOC_CACHES SLUB_PAGE_SHIFT
148#endif 142#endif
149 143
150/* 144/*
151 * We keep the general caches in an array of slab caches that are used for 145 * We keep the general caches in an array of slab caches that are used for
152 * 2^x bytes of allocations. 146 * 2^x bytes of allocations.
153 */ 147 */
154extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; 148extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
155 149
156/* 150/*
157 * Sorry that the following has to be that ugly but some versions of GCC 151 * Sorry that the following has to be that ugly but some versions of GCC
@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
216 if (index == 0) 210 if (index == 0)
217 return NULL; 211 return NULL;
218 212
219 return &kmalloc_caches[index]; 213 return kmalloc_caches[index];
220} 214}
221 215
222void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 216void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
diff --git a/include/video/vga.h b/include/video/vga.h
index b49a5120ca2d..2b8691f7d256 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright history from vga16fb.c: 6 * Copyright history from vga16fb.c:
7 * Copyright 1999 Ben Pfaff and Petr Vandrovec 7 * Copyright 1999 Ben Pfaff and Petr Vandrovec
8 * Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm 8 * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
9 * Based on VESA framebuffer (c) 1998 Gerd Knorr 9 * Based on VESA framebuffer (c) 1998 Gerd Knorr
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General 11 * This file is subject to the terms and conditions of the GNU General
diff --git a/init/Kconfig b/init/Kconfig
index fdfd97efe0e0..3ae8ffe738eb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -186,7 +186,7 @@ config KERNEL_LZO
186 depends on HAVE_KERNEL_LZO 186 depends on HAVE_KERNEL_LZO
187 help 187 help
188 Its compression ratio is the poorest among the 4. The kernel 188 Its compression ratio is the poorest among the 4. The kernel
189 size is about about 10% bigger than gzip; however its speed 189 size is about 10% bigger than gzip; however its speed
190 (both compression and decompression) is the fastest. 190 (both compression and decompression) is the fastest.
191 191
192endchoice 192endchoice
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index a96b850ba08a..c7a8f453919e 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -399,7 +399,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
399 } else 399 } else
400 return -EINVAL; 400 return -EINVAL;
401 401
402 pm_qos_req = (struct pm_qos_request_list *)filp->private_data; 402 pm_qos_req = filp->private_data;
403 pm_qos_update_request(pm_qos_req, value); 403 pm_qos_update_request(pm_qos_req, value);
404 404
405 return count; 405 return count;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 544301d29dee..b8d2852baa4a 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -648,7 +648,7 @@ static int register_trace_probe(struct trace_probe *tp)
648 } 648 }
649 ret = register_probe_event(tp); 649 ret = register_probe_event(tp);
650 if (ret) { 650 if (ret) {
651 pr_warning("Faild to register probe event(%d)\n", ret); 651 pr_warning("Failed to register probe event(%d)\n", ret);
652 goto end; 652 goto end;
653 } 653 }
654 654
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7b2a8ca97ada..69a32664c289 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
353config SLUB_STATS 353config SLUB_STATS
354 default n 354 default n
355 bool "Enable SLUB performance statistics" 355 bool "Enable SLUB performance statistics"
356 depends on SLUB && SLUB_DEBUG && SYSFS 356 depends on SLUB && SYSFS
357 help 357 help
358 SLUB statistics are useful to debug SLUBs allocation behavior in 358 SLUB statistics are useful to debug SLUBs allocation behavior in
359 order find ways to optimize the allocator. This should never be 359 order find ways to optimize the allocator. This should never be
diff --git a/lib/idr.c b/lib/idr.c
index 7f1a4f0acf50..5e0966be0f7c 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -284,7 +284,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
284 * idr_get_new_above - allocate new idr entry above or equal to a start id 284 * idr_get_new_above - allocate new idr entry above or equal to a start id
285 * @idp: idr handle 285 * @idp: idr handle
286 * @ptr: pointer you want associated with the id 286 * @ptr: pointer you want associated with the id
287 * @start_id: id to start search at 287 * @starting_id: id to start search at
288 * @id: pointer to the allocated handle 288 * @id: pointer to the allocated handle
289 * 289 *
290 * This is the allocate id function. It should be called with any 290 * This is the allocate id function. It should be called with any
@@ -479,7 +479,7 @@ EXPORT_SYMBOL(idr_remove_all);
479 479
480/** 480/**
481 * idr_destroy - release all cached layers within an idr tree 481 * idr_destroy - release all cached layers within an idr tree
482 * idp: idr handle 482 * @idp: idr handle
483 */ 483 */
484void idr_destroy(struct idr *idp) 484void idr_destroy(struct idr *idp)
485{ 485{
@@ -586,10 +586,11 @@ EXPORT_SYMBOL(idr_for_each);
586/** 586/**
587 * idr_get_next - lookup next object of id to given id. 587 * idr_get_next - lookup next object of id to given id.
588 * @idp: idr handle 588 * @idp: idr handle
589 * @id: pointer to lookup key 589 * @nextidp: pointer to lookup key
590 * 590 *
591 * Returns pointer to registered object with id, which is next number to 591 * Returns pointer to registered object with id, which is next number to
592 * given id. 592 * given id. After being looked up, *@nextidp will be updated for the next
593 * iteration.
593 */ 594 */
594 595
595void *idr_get_next(struct idr *idp, int *nextidp) 596void *idr_get_next(struct idr *idp, int *nextidp)
@@ -758,7 +759,7 @@ EXPORT_SYMBOL(ida_pre_get);
758/** 759/**
759 * ida_get_new_above - allocate new ID above or equal to a start id 760 * ida_get_new_above - allocate new ID above or equal to a start id
760 * @ida: ida handle 761 * @ida: ida handle
761 * @staring_id: id to start search at 762 * @starting_id: id to start search at
762 * @p_id: pointer to the allocated handle 763 * @p_id: pointer to the allocated handle
763 * 764 *
764 * Allocate new ID above or equal to @ida. It should be called with 765 * Allocate new ID above or equal to @ida. It should be called with
@@ -912,7 +913,7 @@ EXPORT_SYMBOL(ida_remove);
912 913
913/** 914/**
914 * ida_destroy - release all cached layers within an ida tree 915 * ida_destroy - release all cached layers within an ida tree
915 * ida: ida handle 916 * @ida: ida handle
916 */ 917 */
917void ida_destroy(struct ida *ida) 918void ida_destroy(struct ida *ida)
918{ 919{
diff --git a/mm/percpu.c b/mm/percpu.c
index 6fc9015534f8..efe816856a9d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -31,7 +31,7 @@
31 * as small as 4 bytes. The allocator organizes chunks into lists 31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one. 32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is 33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous 34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the 35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily. 36 * chunk maps unnecessarily.
37 * 37 *
diff --git a/mm/slob.c b/mm/slob.c
index d582171c8101..617b6d6c42c7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
500 } else { 500 } else {
501 unsigned int order = get_order(size); 501 unsigned int order = get_order(size);
502 502
503 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 503 if (likely(order))
504 gfp |= __GFP_COMP;
505 ret = slob_new_pages(gfp, order, node);
504 if (ret) { 506 if (ret) {
505 struct page *page; 507 struct page *page;
506 page = virt_to_page(ret); 508 page = virt_to_page(ret);
diff --git a/mm/slub.c b/mm/slub.c
index 13fffe1f0f3d..8fd5401bb071 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -168,7 +168,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
168 168
169/* Internal SLUB flags */ 169/* Internal SLUB flags */
170#define __OBJECT_POISON 0x80000000UL /* Poison object */ 170#define __OBJECT_POISON 0x80000000UL /* Poison object */
171#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
172 171
173static int kmem_size = sizeof(struct kmem_cache); 172static int kmem_size = sizeof(struct kmem_cache);
174 173
@@ -178,7 +177,7 @@ static struct notifier_block slab_notifier;
178 177
179static enum { 178static enum {
180 DOWN, /* No slab functionality available */ 179 DOWN, /* No slab functionality available */
181 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 180 PARTIAL, /* Kmem_cache_node works */
182 UP, /* Everything works but does not show up in sysfs */ 181 UP, /* Everything works but does not show up in sysfs */
183 SYSFS /* Sysfs up */ 182 SYSFS /* Sysfs up */
184} slab_state = DOWN; 183} slab_state = DOWN;
@@ -199,7 +198,7 @@ struct track {
199 198
200enum track_item { TRACK_ALLOC, TRACK_FREE }; 199enum track_item { TRACK_ALLOC, TRACK_FREE };
201 200
202#ifdef CONFIG_SLUB_DEBUG 201#ifdef CONFIG_SYSFS
203static int sysfs_slab_add(struct kmem_cache *); 202static int sysfs_slab_add(struct kmem_cache *);
204static int sysfs_slab_alias(struct kmem_cache *, const char *); 203static int sysfs_slab_alias(struct kmem_cache *, const char *);
205static void sysfs_slab_remove(struct kmem_cache *); 204static void sysfs_slab_remove(struct kmem_cache *);
@@ -210,6 +209,7 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
210 { return 0; } 209 { return 0; }
211static inline void sysfs_slab_remove(struct kmem_cache *s) 210static inline void sysfs_slab_remove(struct kmem_cache *s)
212{ 211{
212 kfree(s->name);
213 kfree(s); 213 kfree(s);
214} 214}
215 215
@@ -233,11 +233,7 @@ int slab_is_available(void)
233 233
234static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 234static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
235{ 235{
236#ifdef CONFIG_NUMA
237 return s->node[node]; 236 return s->node[node];
238#else
239 return &s->local_node;
240#endif
241} 237}
242 238
243/* Verify that a pointer has an address that is valid within a slab page */ 239/* Verify that a pointer has an address that is valid within a slab page */
@@ -494,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
494 dump_stack(); 490 dump_stack();
495} 491}
496 492
497static void init_object(struct kmem_cache *s, void *object, int active) 493static void init_object(struct kmem_cache *s, void *object, u8 val)
498{ 494{
499 u8 *p = object; 495 u8 *p = object;
500 496
@@ -504,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active)
504 } 500 }
505 501
506 if (s->flags & SLAB_RED_ZONE) 502 if (s->flags & SLAB_RED_ZONE)
507 memset(p + s->objsize, 503 memset(p + s->objsize, val, s->inuse - s->objsize);
508 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
509 s->inuse - s->objsize);
510} 504}
511 505
512static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 506static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
@@ -641,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
641} 635}
642 636
643static int check_object(struct kmem_cache *s, struct page *page, 637static int check_object(struct kmem_cache *s, struct page *page,
644 void *object, int active) 638 void *object, u8 val)
645{ 639{
646 u8 *p = object; 640 u8 *p = object;
647 u8 *endobject = object + s->objsize; 641 u8 *endobject = object + s->objsize;
648 642
649 if (s->flags & SLAB_RED_ZONE) { 643 if (s->flags & SLAB_RED_ZONE) {
650 unsigned int red =
651 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
652
653 if (!check_bytes_and_report(s, page, object, "Redzone", 644 if (!check_bytes_and_report(s, page, object, "Redzone",
654 endobject, red, s->inuse - s->objsize)) 645 endobject, val, s->inuse - s->objsize))
655 return 0; 646 return 0;
656 } else { 647 } else {
657 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 648 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
@@ -661,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
661 } 652 }
662 653
663 if (s->flags & SLAB_POISON) { 654 if (s->flags & SLAB_POISON) {
664 if (!active && (s->flags & __OBJECT_POISON) && 655 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
665 (!check_bytes_and_report(s, page, p, "Poison", p, 656 (!check_bytes_and_report(s, page, p, "Poison", p,
666 POISON_FREE, s->objsize - 1) || 657 POISON_FREE, s->objsize - 1) ||
667 !check_bytes_and_report(s, page, p, "Poison", 658 !check_bytes_and_report(s, page, p, "Poison",
@@ -673,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
673 check_pad_bytes(s, page, p); 664 check_pad_bytes(s, page, p);
674 } 665 }
675 666
676 if (!s->offset && active) 667 if (!s->offset && val == SLUB_RED_ACTIVE)
677 /* 668 /*
678 * Object and freepointer overlap. Cannot check 669 * Object and freepointer overlap. Cannot check
679 * freepointer while object is allocated. 670 * freepointer while object is allocated.
@@ -792,6 +783,39 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
792} 783}
793 784
794/* 785/*
786 * Hooks for other subsystems that check memory allocations. In a typical
787 * production configuration these hooks all should produce no code at all.
788 */
789static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
790{
791 flags &= gfp_allowed_mask;
792 lockdep_trace_alloc(flags);
793 might_sleep_if(flags & __GFP_WAIT);
794
795 return should_failslab(s->objsize, flags, s->flags);
796}
797
798static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
799{
800 flags &= gfp_allowed_mask;
801 kmemcheck_slab_alloc(s, flags, object, s->objsize);
802 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
803}
804
805static inline void slab_free_hook(struct kmem_cache *s, void *x)
806{
807 kmemleak_free_recursive(x, s->flags);
808}
809
810static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
811{
812 kmemcheck_slab_free(s, object, s->objsize);
813 debug_check_no_locks_freed(object, s->objsize);
814 if (!(s->flags & SLAB_DEBUG_OBJECTS))
815 debug_check_no_obj_freed(object, s->objsize);
816}
817
818/*
795 * Tracking of fully allocated slabs for debugging purposes. 819 * Tracking of fully allocated slabs for debugging purposes.
796 */ 820 */
797static void add_full(struct kmem_cache_node *n, struct page *page) 821static void add_full(struct kmem_cache_node *n, struct page *page)
@@ -838,7 +862,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
838 * dilemma by deferring the increment of the count during 862 * dilemma by deferring the increment of the count during
839 * bootstrap (see early_kmem_cache_node_alloc). 863 * bootstrap (see early_kmem_cache_node_alloc).
840 */ 864 */
841 if (!NUMA_BUILD || n) { 865 if (n) {
842 atomic_long_inc(&n->nr_slabs); 866 atomic_long_inc(&n->nr_slabs);
843 atomic_long_add(objects, &n->total_objects); 867 atomic_long_add(objects, &n->total_objects);
844 } 868 }
@@ -858,11 +882,11 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
858 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 882 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
859 return; 883 return;
860 884
861 init_object(s, object, 0); 885 init_object(s, object, SLUB_RED_INACTIVE);
862 init_tracking(s, object); 886 init_tracking(s, object);
863} 887}
864 888
865static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 889static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
866 void *object, unsigned long addr) 890 void *object, unsigned long addr)
867{ 891{
868 if (!check_slab(s, page)) 892 if (!check_slab(s, page))
@@ -878,14 +902,14 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
878 goto bad; 902 goto bad;
879 } 903 }
880 904
881 if (!check_object(s, page, object, 0)) 905 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
882 goto bad; 906 goto bad;
883 907
884 /* Success perform special debug activities for allocs */ 908 /* Success perform special debug activities for allocs */
885 if (s->flags & SLAB_STORE_USER) 909 if (s->flags & SLAB_STORE_USER)
886 set_track(s, object, TRACK_ALLOC, addr); 910 set_track(s, object, TRACK_ALLOC, addr);
887 trace(s, page, object, 1); 911 trace(s, page, object, 1);
888 init_object(s, object, 1); 912 init_object(s, object, SLUB_RED_ACTIVE);
889 return 1; 913 return 1;
890 914
891bad: 915bad:
@@ -902,8 +926,8 @@ bad:
902 return 0; 926 return 0;
903} 927}
904 928
905static int free_debug_processing(struct kmem_cache *s, struct page *page, 929static noinline int free_debug_processing(struct kmem_cache *s,
906 void *object, unsigned long addr) 930 struct page *page, void *object, unsigned long addr)
907{ 931{
908 if (!check_slab(s, page)) 932 if (!check_slab(s, page))
909 goto fail; 933 goto fail;
@@ -918,7 +942,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
918 goto fail; 942 goto fail;
919 } 943 }
920 944
921 if (!check_object(s, page, object, 1)) 945 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
922 return 0; 946 return 0;
923 947
924 if (unlikely(s != page->slab)) { 948 if (unlikely(s != page->slab)) {
@@ -942,7 +966,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
942 if (s->flags & SLAB_STORE_USER) 966 if (s->flags & SLAB_STORE_USER)
943 set_track(s, object, TRACK_FREE, addr); 967 set_track(s, object, TRACK_FREE, addr);
944 trace(s, page, object, 0); 968 trace(s, page, object, 0);
945 init_object(s, object, 0); 969 init_object(s, object, SLUB_RED_INACTIVE);
946 return 1; 970 return 1;
947 971
948fail: 972fail:
@@ -1046,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s,
1046static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1070static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1047 { return 1; } 1071 { return 1; }
1048static inline int check_object(struct kmem_cache *s, struct page *page, 1072static inline int check_object(struct kmem_cache *s, struct page *page,
1049 void *object, int active) { return 1; } 1073 void *object, u8 val) { return 1; }
1050static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1074static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1051static inline unsigned long kmem_cache_flags(unsigned long objsize, 1075static inline unsigned long kmem_cache_flags(unsigned long objsize,
1052 unsigned long flags, const char *name, 1076 unsigned long flags, const char *name,
@@ -1066,7 +1090,19 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
1066 int objects) {} 1090 int objects) {}
1067static inline void dec_slabs_node(struct kmem_cache *s, int node, 1091static inline void dec_slabs_node(struct kmem_cache *s, int node,
1068 int objects) {} 1092 int objects) {}
1069#endif 1093
1094static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1095 { return 0; }
1096
1097static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1098 void *object) {}
1099
1100static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1101
1102static inline void slab_free_hook_irq(struct kmem_cache *s,
1103 void *object) {}
1104
1105#endif /* CONFIG_SLUB_DEBUG */
1070 1106
1071/* 1107/*
1072 * Slab allocation and freeing 1108 * Slab allocation and freeing
@@ -1194,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1194 slab_pad_check(s, page); 1230 slab_pad_check(s, page);
1195 for_each_object(p, s, page_address(page), 1231 for_each_object(p, s, page_address(page),
1196 page->objects) 1232 page->objects)
1197 check_object(s, page, p, 0); 1233 check_object(s, page, p, SLUB_RED_INACTIVE);
1198 } 1234 }
1199 1235
1200 kmemcheck_free_shadow(page, compound_order(page)); 1236 kmemcheck_free_shadow(page, compound_order(page));
@@ -1274,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n,
1274 spin_unlock(&n->list_lock); 1310 spin_unlock(&n->list_lock);
1275} 1311}
1276 1312
1313static inline void __remove_partial(struct kmem_cache_node *n,
1314 struct page *page)
1315{
1316 list_del(&page->lru);
1317 n->nr_partial--;
1318}
1319
1277static void remove_partial(struct kmem_cache *s, struct page *page) 1320static void remove_partial(struct kmem_cache *s, struct page *page)
1278{ 1321{
1279 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1322 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1280 1323
1281 spin_lock(&n->list_lock); 1324 spin_lock(&n->list_lock);
1282 list_del(&page->lru); 1325 __remove_partial(n, page);
1283 n->nr_partial--;
1284 spin_unlock(&n->list_lock); 1326 spin_unlock(&n->list_lock);
1285} 1327}
1286 1328
@@ -1293,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1293 struct page *page) 1335 struct page *page)
1294{ 1336{
1295 if (slab_trylock(page)) { 1337 if (slab_trylock(page)) {
1296 list_del(&page->lru); 1338 __remove_partial(n, page);
1297 n->nr_partial--;
1298 __SetPageSlubFrozen(page); 1339 __SetPageSlubFrozen(page);
1299 return 1; 1340 return 1;
1300 } 1341 }
@@ -1405,6 +1446,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1405 * On exit the slab lock will have been dropped. 1446 * On exit the slab lock will have been dropped.
1406 */ 1447 */
1407static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1448static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1449 __releases(bitlock)
1408{ 1450{
1409 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1451 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1410 1452
@@ -1447,6 +1489,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1447 * Remove the cpu slab 1489 * Remove the cpu slab
1448 */ 1490 */
1449static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1491static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1492 __releases(bitlock)
1450{ 1493{
1451 struct page *page = c->page; 1494 struct page *page = c->page;
1452 int tail = 1; 1495 int tail = 1;
@@ -1647,6 +1690,7 @@ new_slab:
1647 goto load_freelist; 1690 goto load_freelist;
1648 } 1691 }
1649 1692
1693 gfpflags &= gfp_allowed_mask;
1650 if (gfpflags & __GFP_WAIT) 1694 if (gfpflags & __GFP_WAIT)
1651 local_irq_enable(); 1695 local_irq_enable();
1652 1696
@@ -1674,7 +1718,7 @@ debug:
1674 1718
1675 c->page->inuse++; 1719 c->page->inuse++;
1676 c->page->freelist = get_freepointer(s, object); 1720 c->page->freelist = get_freepointer(s, object);
1677 c->node = -1; 1721 c->node = NUMA_NO_NODE;
1678 goto unlock_out; 1722 goto unlock_out;
1679} 1723}
1680 1724
@@ -1695,12 +1739,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1695 struct kmem_cache_cpu *c; 1739 struct kmem_cache_cpu *c;
1696 unsigned long flags; 1740 unsigned long flags;
1697 1741
1698 gfpflags &= gfp_allowed_mask; 1742 if (slab_pre_alloc_hook(s, gfpflags))
1699
1700 lockdep_trace_alloc(gfpflags);
1701 might_sleep_if(gfpflags & __GFP_WAIT);
1702
1703 if (should_failslab(s->objsize, gfpflags, s->flags))
1704 return NULL; 1743 return NULL;
1705 1744
1706 local_irq_save(flags); 1745 local_irq_save(flags);
@@ -1719,8 +1758,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1719 if (unlikely(gfpflags & __GFP_ZERO) && object) 1758 if (unlikely(gfpflags & __GFP_ZERO) && object)
1720 memset(object, 0, s->objsize); 1759 memset(object, 0, s->objsize);
1721 1760
1722 kmemcheck_slab_alloc(s, gfpflags, object, s->objsize); 1761 slab_post_alloc_hook(s, gfpflags, object);
1723 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
1724 1762
1725 return object; 1763 return object;
1726} 1764}
@@ -1754,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1754 return ret; 1792 return ret;
1755} 1793}
1756EXPORT_SYMBOL(kmem_cache_alloc_node); 1794EXPORT_SYMBOL(kmem_cache_alloc_node);
1757#endif
1758 1795
1759#ifdef CONFIG_TRACING 1796#ifdef CONFIG_TRACING
1760void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1797void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
@@ -1765,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1765} 1802}
1766EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1803EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1767#endif 1804#endif
1805#endif
1768 1806
1769/* 1807/*
1770 * Slow patch handling. This may still be called frequently since objects 1808 * Slow patch handling. This may still be called frequently since objects
@@ -1850,14 +1888,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
1850 struct kmem_cache_cpu *c; 1888 struct kmem_cache_cpu *c;
1851 unsigned long flags; 1889 unsigned long flags;
1852 1890
1853 kmemleak_free_recursive(x, s->flags); 1891 slab_free_hook(s, x);
1892
1854 local_irq_save(flags); 1893 local_irq_save(flags);
1855 c = __this_cpu_ptr(s->cpu_slab); 1894 c = __this_cpu_ptr(s->cpu_slab);
1856 kmemcheck_slab_free(s, object, s->objsize); 1895
1857 debug_check_no_locks_freed(object, s->objsize); 1896 slab_free_hook_irq(s, x);
1858 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1897
1859 debug_check_no_obj_freed(object, s->objsize); 1898 if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
1860 if (likely(page == c->page && c->node >= 0)) {
1861 set_freepointer(s, object, c->freelist); 1899 set_freepointer(s, object, c->freelist);
1862 c->freelist = object; 1900 c->freelist = object;
1863 stat(s, FREE_FASTPATH); 1901 stat(s, FREE_FASTPATH);
@@ -2062,26 +2100,18 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2062#endif 2100#endif
2063} 2101}
2064 2102
2065static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); 2103static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2066
2067static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2068{ 2104{
2069 if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) 2105 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2070 /* 2106 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2071 * Boot time creation of the kmalloc array. Use static per cpu data
2072 * since the per cpu allocator is not available yet.
2073 */
2074 s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
2075 else
2076 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2077 2107
2078 if (!s->cpu_slab) 2108 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2079 return 0;
2080 2109
2081 return 1; 2110 return s->cpu_slab != NULL;
2082} 2111}
2083 2112
2084#ifdef CONFIG_NUMA 2113static struct kmem_cache *kmem_cache_node;
2114
2085/* 2115/*
2086 * No kmalloc_node yet so do it by hand. We know that this is the first 2116 * No kmalloc_node yet so do it by hand. We know that this is the first
2087 * slab on the node for this slabcache. There are no concurrent accesses 2117 * slab on the node for this slabcache. There are no concurrent accesses
@@ -2091,15 +2121,15 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2091 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2121 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2092 * memory on a fresh node that has no slab structures yet. 2122 * memory on a fresh node that has no slab structures yet.
2093 */ 2123 */
2094static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) 2124static void early_kmem_cache_node_alloc(int node)
2095{ 2125{
2096 struct page *page; 2126 struct page *page;
2097 struct kmem_cache_node *n; 2127 struct kmem_cache_node *n;
2098 unsigned long flags; 2128 unsigned long flags;
2099 2129
2100 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2130 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2101 2131
2102 page = new_slab(kmalloc_caches, gfpflags, node); 2132 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2103 2133
2104 BUG_ON(!page); 2134 BUG_ON(!page);
2105 if (page_to_nid(page) != node) { 2135 if (page_to_nid(page) != node) {
@@ -2111,15 +2141,15 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2111 2141
2112 n = page->freelist; 2142 n = page->freelist;
2113 BUG_ON(!n); 2143 BUG_ON(!n);
2114 page->freelist = get_freepointer(kmalloc_caches, n); 2144 page->freelist = get_freepointer(kmem_cache_node, n);
2115 page->inuse++; 2145 page->inuse++;
2116 kmalloc_caches->node[node] = n; 2146 kmem_cache_node->node[node] = n;
2117#ifdef CONFIG_SLUB_DEBUG 2147#ifdef CONFIG_SLUB_DEBUG
2118 init_object(kmalloc_caches, n, 1); 2148 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2119 init_tracking(kmalloc_caches, n); 2149 init_tracking(kmem_cache_node, n);
2120#endif 2150#endif
2121 init_kmem_cache_node(n, kmalloc_caches); 2151 init_kmem_cache_node(n, kmem_cache_node);
2122 inc_slabs_node(kmalloc_caches, node, page->objects); 2152 inc_slabs_node(kmem_cache_node, node, page->objects);
2123 2153
2124 /* 2154 /*
2125 * lockdep requires consistent irq usage for each lock 2155 * lockdep requires consistent irq usage for each lock
@@ -2137,13 +2167,15 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2137 2167
2138 for_each_node_state(node, N_NORMAL_MEMORY) { 2168 for_each_node_state(node, N_NORMAL_MEMORY) {
2139 struct kmem_cache_node *n = s->node[node]; 2169 struct kmem_cache_node *n = s->node[node];
2170
2140 if (n) 2171 if (n)
2141 kmem_cache_free(kmalloc_caches, n); 2172 kmem_cache_free(kmem_cache_node, n);
2173
2142 s->node[node] = NULL; 2174 s->node[node] = NULL;
2143 } 2175 }
2144} 2176}
2145 2177
2146static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2178static int init_kmem_cache_nodes(struct kmem_cache *s)
2147{ 2179{
2148 int node; 2180 int node;
2149 2181
@@ -2151,11 +2183,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2151 struct kmem_cache_node *n; 2183 struct kmem_cache_node *n;
2152 2184
2153 if (slab_state == DOWN) { 2185 if (slab_state == DOWN) {
2154 early_kmem_cache_node_alloc(gfpflags, node); 2186 early_kmem_cache_node_alloc(node);
2155 continue; 2187 continue;
2156 } 2188 }
2157 n = kmem_cache_alloc_node(kmalloc_caches, 2189 n = kmem_cache_alloc_node(kmem_cache_node,
2158 gfpflags, node); 2190 GFP_KERNEL, node);
2159 2191
2160 if (!n) { 2192 if (!n) {
2161 free_kmem_cache_nodes(s); 2193 free_kmem_cache_nodes(s);
@@ -2167,17 +2199,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2167 } 2199 }
2168 return 1; 2200 return 1;
2169} 2201}
2170#else
2171static void free_kmem_cache_nodes(struct kmem_cache *s)
2172{
2173}
2174
2175static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2176{
2177 init_kmem_cache_node(&s->local_node, s);
2178 return 1;
2179}
2180#endif
2181 2202
2182static void set_min_partial(struct kmem_cache *s, unsigned long min) 2203static void set_min_partial(struct kmem_cache *s, unsigned long min)
2183{ 2204{
@@ -2312,7 +2333,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2312 2333
2313} 2334}
2314 2335
2315static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2336static int kmem_cache_open(struct kmem_cache *s,
2316 const char *name, size_t size, 2337 const char *name, size_t size,
2317 size_t align, unsigned long flags, 2338 size_t align, unsigned long flags,
2318 void (*ctor)(void *)) 2339 void (*ctor)(void *))
@@ -2348,10 +2369,10 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2348#ifdef CONFIG_NUMA 2369#ifdef CONFIG_NUMA
2349 s->remote_node_defrag_ratio = 1000; 2370 s->remote_node_defrag_ratio = 1000;
2350#endif 2371#endif
2351 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2372 if (!init_kmem_cache_nodes(s))
2352 goto error; 2373 goto error;
2353 2374
2354 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2375 if (alloc_kmem_cache_cpus(s))
2355 return 1; 2376 return 1;
2356 2377
2357 free_kmem_cache_nodes(s); 2378 free_kmem_cache_nodes(s);
@@ -2414,9 +2435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2414#ifdef CONFIG_SLUB_DEBUG 2435#ifdef CONFIG_SLUB_DEBUG
2415 void *addr = page_address(page); 2436 void *addr = page_address(page);
2416 void *p; 2437 void *p;
2417 long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), 2438 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
2418 GFP_ATOMIC); 2439 sizeof(long), GFP_ATOMIC);
2419
2420 if (!map) 2440 if (!map)
2421 return; 2441 return;
2422 slab_err(s, page, "%s", text); 2442 slab_err(s, page, "%s", text);
@@ -2448,9 +2468,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2448 spin_lock_irqsave(&n->list_lock, flags); 2468 spin_lock_irqsave(&n->list_lock, flags);
2449 list_for_each_entry_safe(page, h, &n->partial, lru) { 2469 list_for_each_entry_safe(page, h, &n->partial, lru) {
2450 if (!page->inuse) { 2470 if (!page->inuse) {
2451 list_del(&page->lru); 2471 __remove_partial(n, page);
2452 discard_slab(s, page); 2472 discard_slab(s, page);
2453 n->nr_partial--;
2454 } else { 2473 } else {
2455 list_slab_objects(s, page, 2474 list_slab_objects(s, page,
2456 "Objects remaining on kmem_cache_close()"); 2475 "Objects remaining on kmem_cache_close()");
@@ -2507,9 +2526,15 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2507 * Kmalloc subsystem 2526 * Kmalloc subsystem
2508 *******************************************************************/ 2527 *******************************************************************/
2509 2528
2510struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned; 2529struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
2511EXPORT_SYMBOL(kmalloc_caches); 2530EXPORT_SYMBOL(kmalloc_caches);
2512 2531
2532static struct kmem_cache *kmem_cache;
2533
2534#ifdef CONFIG_ZONE_DMA
2535static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
2536#endif
2537
2513static int __init setup_slub_min_order(char *str) 2538static int __init setup_slub_min_order(char *str)
2514{ 2539{
2515 get_option(&str, &slub_min_order); 2540 get_option(&str, &slub_min_order);
@@ -2546,116 +2571,29 @@ static int __init setup_slub_nomerge(char *str)
2546 2571
2547__setup("slub_nomerge", setup_slub_nomerge); 2572__setup("slub_nomerge", setup_slub_nomerge);
2548 2573
2549static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2574static struct kmem_cache *__init create_kmalloc_cache(const char *name,
2550 const char *name, int size, gfp_t gfp_flags) 2575 int size, unsigned int flags)
2551{ 2576{
2552 unsigned int flags = 0; 2577 struct kmem_cache *s;
2553 2578
2554 if (gfp_flags & SLUB_DMA) 2579 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
2555 flags = SLAB_CACHE_DMA;
2556 2580
2557 /* 2581 /*
2558 * This function is called with IRQs disabled during early-boot on 2582 * This function is called with IRQs disabled during early-boot on
2559 * single CPU so there's no need to take slub_lock here. 2583 * single CPU so there's no need to take slub_lock here.
2560 */ 2584 */
2561 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2585 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
2562 flags, NULL)) 2586 flags, NULL))
2563 goto panic; 2587 goto panic;
2564 2588
2565 list_add(&s->list, &slab_caches); 2589 list_add(&s->list, &slab_caches);
2566
2567 if (sysfs_slab_add(s))
2568 goto panic;
2569 return s; 2590 return s;
2570 2591
2571panic: 2592panic:
2572 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2593 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2594 return NULL;
2573} 2595}
2574 2596
2575#ifdef CONFIG_ZONE_DMA
2576static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2577
2578static void sysfs_add_func(struct work_struct *w)
2579{
2580 struct kmem_cache *s;
2581
2582 down_write(&slub_lock);
2583 list_for_each_entry(s, &slab_caches, list) {
2584 if (s->flags & __SYSFS_ADD_DEFERRED) {
2585 s->flags &= ~__SYSFS_ADD_DEFERRED;
2586 sysfs_slab_add(s);
2587 }
2588 }
2589 up_write(&slub_lock);
2590}
2591
2592static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2593
2594static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2595{
2596 struct kmem_cache *s;
2597 char *text;
2598 size_t realsize;
2599 unsigned long slabflags;
2600 int i;
2601
2602 s = kmalloc_caches_dma[index];
2603 if (s)
2604 return s;
2605
2606 /* Dynamically create dma cache */
2607 if (flags & __GFP_WAIT)
2608 down_write(&slub_lock);
2609 else {
2610 if (!down_write_trylock(&slub_lock))
2611 goto out;
2612 }
2613
2614 if (kmalloc_caches_dma[index])
2615 goto unlock_out;
2616
2617 realsize = kmalloc_caches[index].objsize;
2618 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2619 (unsigned int)realsize);
2620
2621 s = NULL;
2622 for (i = 0; i < KMALLOC_CACHES; i++)
2623 if (!kmalloc_caches[i].size)
2624 break;
2625
2626 BUG_ON(i >= KMALLOC_CACHES);
2627 s = kmalloc_caches + i;
2628
2629 /*
2630 * Must defer sysfs creation to a workqueue because we don't know
2631 * what context we are called from. Before sysfs comes up, we don't
2632 * need to do anything because our sysfs initcall will start by
2633 * adding all existing slabs to sysfs.
2634 */
2635 slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
2636 if (slab_state >= SYSFS)
2637 slabflags |= __SYSFS_ADD_DEFERRED;
2638
2639 if (!text || !kmem_cache_open(s, flags, text,
2640 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
2641 s->size = 0;
2642 kfree(text);
2643 goto unlock_out;
2644 }
2645
2646 list_add(&s->list, &slab_caches);
2647 kmalloc_caches_dma[index] = s;
2648
2649 if (slab_state >= SYSFS)
2650 schedule_work(&sysfs_add_work);
2651
2652unlock_out:
2653 up_write(&slub_lock);
2654out:
2655 return kmalloc_caches_dma[index];
2656}
2657#endif
2658
2659/* 2597/*
2660 * Conversion table for small slabs sizes / 8 to the index in the 2598 * Conversion table for small slabs sizes / 8 to the index in the
2661 * kmalloc array. This is necessary for slabs < 192 since we have non power 2599 * kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -2708,10 +2646,10 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2708 2646
2709#ifdef CONFIG_ZONE_DMA 2647#ifdef CONFIG_ZONE_DMA
2710 if (unlikely((flags & SLUB_DMA))) 2648 if (unlikely((flags & SLUB_DMA)))
2711 return dma_kmalloc_cache(index, flags); 2649 return kmalloc_dma_caches[index];
2712 2650
2713#endif 2651#endif
2714 return &kmalloc_caches[index]; 2652 return kmalloc_caches[index];
2715} 2653}
2716 2654
2717void *__kmalloc(size_t size, gfp_t flags) 2655void *__kmalloc(size_t size, gfp_t flags)
@@ -2735,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2735} 2673}
2736EXPORT_SYMBOL(__kmalloc); 2674EXPORT_SYMBOL(__kmalloc);
2737 2675
2676#ifdef CONFIG_NUMA
2738static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2677static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2739{ 2678{
2740 struct page *page; 2679 struct page *page;
@@ -2749,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2749 return ptr; 2688 return ptr;
2750} 2689}
2751 2690
2752#ifdef CONFIG_NUMA
2753void *__kmalloc_node(size_t size, gfp_t flags, int node) 2691void *__kmalloc_node(size_t size, gfp_t flags, int node)
2754{ 2692{
2755 struct kmem_cache *s; 2693 struct kmem_cache *s;
@@ -2889,8 +2827,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
2889 * may have freed the last object and be 2827 * may have freed the last object and be
2890 * waiting to release the slab. 2828 * waiting to release the slab.
2891 */ 2829 */
2892 list_del(&page->lru); 2830 __remove_partial(n, page);
2893 n->nr_partial--;
2894 slab_unlock(page); 2831 slab_unlock(page);
2895 discard_slab(s, page); 2832 discard_slab(s, page);
2896 } else { 2833 } else {
@@ -2914,7 +2851,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
2914} 2851}
2915EXPORT_SYMBOL(kmem_cache_shrink); 2852EXPORT_SYMBOL(kmem_cache_shrink);
2916 2853
2917#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2854#if defined(CONFIG_MEMORY_HOTPLUG)
2918static int slab_mem_going_offline_callback(void *arg) 2855static int slab_mem_going_offline_callback(void *arg)
2919{ 2856{
2920 struct kmem_cache *s; 2857 struct kmem_cache *s;
@@ -2956,7 +2893,7 @@ static void slab_mem_offline_callback(void *arg)
2956 BUG_ON(slabs_node(s, offline_node)); 2893 BUG_ON(slabs_node(s, offline_node));
2957 2894
2958 s->node[offline_node] = NULL; 2895 s->node[offline_node] = NULL;
2959 kmem_cache_free(kmalloc_caches, n); 2896 kmem_cache_free(kmem_cache_node, n);
2960 } 2897 }
2961 } 2898 }
2962 up_read(&slub_lock); 2899 up_read(&slub_lock);
@@ -2989,7 +2926,7 @@ static int slab_mem_going_online_callback(void *arg)
2989 * since memory is not yet available from the node that 2926 * since memory is not yet available from the node that
2990 * is brought up. 2927 * is brought up.
2991 */ 2928 */
2992 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2929 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
2993 if (!n) { 2930 if (!n) {
2994 ret = -ENOMEM; 2931 ret = -ENOMEM;
2995 goto out; 2932 goto out;
@@ -3035,46 +2972,92 @@ static int slab_memory_callback(struct notifier_block *self,
3035 * Basic setup of slabs 2972 * Basic setup of slabs
3036 *******************************************************************/ 2973 *******************************************************************/
3037 2974
2975/*
2976 * Used for early kmem_cache structures that were allocated using
2977 * the page allocator
2978 */
2979
2980static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
2981{
2982 int node;
2983
2984 list_add(&s->list, &slab_caches);
2985 s->refcount = -1;
2986
2987 for_each_node_state(node, N_NORMAL_MEMORY) {
2988 struct kmem_cache_node *n = get_node(s, node);
2989 struct page *p;
2990
2991 if (n) {
2992 list_for_each_entry(p, &n->partial, lru)
2993 p->slab = s;
2994
2995#ifdef CONFIG_SLAB_DEBUG
2996 list_for_each_entry(p, &n->full, lru)
2997 p->slab = s;
2998#endif
2999 }
3000 }
3001}
3002
3038void __init kmem_cache_init(void) 3003void __init kmem_cache_init(void)
3039{ 3004{
3040 int i; 3005 int i;
3041 int caches = 0; 3006 int caches = 0;
3007 struct kmem_cache *temp_kmem_cache;
3008 int order;
3009 struct kmem_cache *temp_kmem_cache_node;
3010 unsigned long kmalloc_size;
3011
3012 kmem_size = offsetof(struct kmem_cache, node) +
3013 nr_node_ids * sizeof(struct kmem_cache_node *);
3014
3015 /* Allocate two kmem_caches from the page allocator */
3016 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3017 order = get_order(2 * kmalloc_size);
3018 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3042 3019
3043#ifdef CONFIG_NUMA
3044 /* 3020 /*
3045 * Must first have the slab cache available for the allocations of the 3021 * Must first have the slab cache available for the allocations of the
3046 * struct kmem_cache_node's. There is special bootstrap code in 3022 * struct kmem_cache_node's. There is special bootstrap code in
3047 * kmem_cache_open for slab_state == DOWN. 3023 * kmem_cache_open for slab_state == DOWN.
3048 */ 3024 */
3049 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 3025 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3050 sizeof(struct kmem_cache_node), GFP_NOWAIT); 3026
3051 kmalloc_caches[0].refcount = -1; 3027 kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3052 caches++; 3028 sizeof(struct kmem_cache_node),
3029 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3053 3030
3054 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3031 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3055#endif
3056 3032
3057 /* Able to allocate the per node structures */ 3033 /* Able to allocate the per node structures */
3058 slab_state = PARTIAL; 3034 slab_state = PARTIAL;
3059 3035
3060 /* Caches that are not of the two-to-the-power-of size */ 3036 temp_kmem_cache = kmem_cache;
3061 if (KMALLOC_MIN_SIZE <= 32) { 3037 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3062 create_kmalloc_cache(&kmalloc_caches[1], 3038 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3063 "kmalloc-96", 96, GFP_NOWAIT); 3039 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3064 caches++; 3040 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3065 }
3066 if (KMALLOC_MIN_SIZE <= 64) {
3067 create_kmalloc_cache(&kmalloc_caches[2],
3068 "kmalloc-192", 192, GFP_NOWAIT);
3069 caches++;
3070 }
3071 3041
3072 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3042 /*
3073 create_kmalloc_cache(&kmalloc_caches[i], 3043 * Allocate kmem_cache_node properly from the kmem_cache slab.
3074 "kmalloc", 1 << i, GFP_NOWAIT); 3044 * kmem_cache_node is separately allocated so no need to
3075 caches++; 3045 * update any list pointers.
3076 } 3046 */
3047 temp_kmem_cache_node = kmem_cache_node;
3048
3049 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3050 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3051
3052 kmem_cache_bootstrap_fixup(kmem_cache_node);
3077 3053
3054 caches++;
3055 kmem_cache_bootstrap_fixup(kmem_cache);
3056 caches++;
3057 /* Free temporary boot structure */
3058 free_pages((unsigned long)temp_kmem_cache, order);
3059
3060 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3078 3061
3079 /* 3062 /*
3080 * Patch up the size_index table if we have strange large alignment 3063 * Patch up the size_index table if we have strange large alignment
@@ -3114,26 +3097,60 @@ void __init kmem_cache_init(void)
3114 size_index[size_index_elem(i)] = 8; 3097 size_index[size_index_elem(i)] = 8;
3115 } 3098 }
3116 3099
3100 /* Caches that are not of the two-to-the-power-of size */
3101 if (KMALLOC_MIN_SIZE <= 32) {
3102 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3103 caches++;
3104 }
3105
3106 if (KMALLOC_MIN_SIZE <= 64) {
3107 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3108 caches++;
3109 }
3110
3111 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3112 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3113 caches++;
3114 }
3115
3117 slab_state = UP; 3116 slab_state = UP;
3118 3117
3119 /* Provide the correct kmalloc names now that the caches are up */ 3118 /* Provide the correct kmalloc names now that the caches are up */
3119 if (KMALLOC_MIN_SIZE <= 32) {
3120 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3121 BUG_ON(!kmalloc_caches[1]->name);
3122 }
3123
3124 if (KMALLOC_MIN_SIZE <= 64) {
3125 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3126 BUG_ON(!kmalloc_caches[2]->name);
3127 }
3128
3120 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3129 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3121 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3130 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3122 3131
3123 BUG_ON(!s); 3132 BUG_ON(!s);
3124 kmalloc_caches[i].name = s; 3133 kmalloc_caches[i]->name = s;
3125 } 3134 }
3126 3135
3127#ifdef CONFIG_SMP 3136#ifdef CONFIG_SMP
3128 register_cpu_notifier(&slab_notifier); 3137 register_cpu_notifier(&slab_notifier);
3129#endif 3138#endif
3130#ifdef CONFIG_NUMA
3131 kmem_size = offsetof(struct kmem_cache, node) +
3132 nr_node_ids * sizeof(struct kmem_cache_node *);
3133#else
3134 kmem_size = sizeof(struct kmem_cache);
3135#endif
3136 3139
3140#ifdef CONFIG_ZONE_DMA
3141 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3142 struct kmem_cache *s = kmalloc_caches[i];
3143
3144 if (s && s->size) {
3145 char *name = kasprintf(GFP_NOWAIT,
3146 "dma-kmalloc-%d", s->objsize);
3147
3148 BUG_ON(!name);
3149 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3150 s->objsize, SLAB_CACHE_DMA);
3151 }
3152 }
3153#endif
3137 printk(KERN_INFO 3154 printk(KERN_INFO
3138 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3155 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3139 " CPUs=%d, Nodes=%d\n", 3156 " CPUs=%d, Nodes=%d\n",
@@ -3211,6 +3228,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3211 size_t align, unsigned long flags, void (*ctor)(void *)) 3228 size_t align, unsigned long flags, void (*ctor)(void *))
3212{ 3229{
3213 struct kmem_cache *s; 3230 struct kmem_cache *s;
3231 char *n;
3214 3232
3215 if (WARN_ON(!name)) 3233 if (WARN_ON(!name))
3216 return NULL; 3234 return NULL;
@@ -3234,19 +3252,25 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3234 return s; 3252 return s;
3235 } 3253 }
3236 3254
3255 n = kstrdup(name, GFP_KERNEL);
3256 if (!n)
3257 goto err;
3258
3237 s = kmalloc(kmem_size, GFP_KERNEL); 3259 s = kmalloc(kmem_size, GFP_KERNEL);
3238 if (s) { 3260 if (s) {
3239 if (kmem_cache_open(s, GFP_KERNEL, name, 3261 if (kmem_cache_open(s, n,
3240 size, align, flags, ctor)) { 3262 size, align, flags, ctor)) {
3241 list_add(&s->list, &slab_caches); 3263 list_add(&s->list, &slab_caches);
3242 if (sysfs_slab_add(s)) { 3264 if (sysfs_slab_add(s)) {
3243 list_del(&s->list); 3265 list_del(&s->list);
3266 kfree(n);
3244 kfree(s); 3267 kfree(s);
3245 goto err; 3268 goto err;
3246 } 3269 }
3247 up_write(&slub_lock); 3270 up_write(&slub_lock);
3248 return s; 3271 return s;
3249 } 3272 }
3273 kfree(n);
3250 kfree(s); 3274 kfree(s);
3251 } 3275 }
3252 up_write(&slub_lock); 3276 up_write(&slub_lock);
@@ -3318,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3318 return ret; 3342 return ret;
3319} 3343}
3320 3344
3345#ifdef CONFIG_NUMA
3321void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3346void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3322 int node, unsigned long caller) 3347 int node, unsigned long caller)
3323{ 3348{
@@ -3346,8 +3371,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3346 3371
3347 return ret; 3372 return ret;
3348} 3373}
3374#endif
3349 3375
3350#ifdef CONFIG_SLUB_DEBUG 3376#ifdef CONFIG_SYSFS
3351static int count_inuse(struct page *page) 3377static int count_inuse(struct page *page)
3352{ 3378{
3353 return page->inuse; 3379 return page->inuse;
@@ -3357,7 +3383,9 @@ static int count_total(struct page *page)
3357{ 3383{
3358 return page->objects; 3384 return page->objects;
3359} 3385}
3386#endif
3360 3387
3388#ifdef CONFIG_SLUB_DEBUG
3361static int validate_slab(struct kmem_cache *s, struct page *page, 3389static int validate_slab(struct kmem_cache *s, struct page *page,
3362 unsigned long *map) 3390 unsigned long *map)
3363{ 3391{
@@ -3448,65 +3476,6 @@ static long validate_slab_cache(struct kmem_cache *s)
3448 kfree(map); 3476 kfree(map);
3449 return count; 3477 return count;
3450} 3478}
3451
3452#ifdef SLUB_RESILIENCY_TEST
3453static void resiliency_test(void)
3454{
3455 u8 *p;
3456
3457 printk(KERN_ERR "SLUB resiliency testing\n");
3458 printk(KERN_ERR "-----------------------\n");
3459 printk(KERN_ERR "A. Corruption after allocation\n");
3460
3461 p = kzalloc(16, GFP_KERNEL);
3462 p[16] = 0x12;
3463 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3464 " 0x12->0x%p\n\n", p + 16);
3465
3466 validate_slab_cache(kmalloc_caches + 4);
3467
3468 /* Hmmm... The next two are dangerous */
3469 p = kzalloc(32, GFP_KERNEL);
3470 p[32 + sizeof(void *)] = 0x34;
3471 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3472 " 0x34 -> -0x%p\n", p);
3473 printk(KERN_ERR
3474 "If allocated object is overwritten then not detectable\n\n");
3475
3476 validate_slab_cache(kmalloc_caches + 5);
3477 p = kzalloc(64, GFP_KERNEL);
3478 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3479 *p = 0x56;
3480 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3481 p);
3482 printk(KERN_ERR
3483 "If allocated object is overwritten then not detectable\n\n");
3484 validate_slab_cache(kmalloc_caches + 6);
3485
3486 printk(KERN_ERR "\nB. Corruption after free\n");
3487 p = kzalloc(128, GFP_KERNEL);
3488 kfree(p);
3489 *p = 0x78;
3490 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3491 validate_slab_cache(kmalloc_caches + 7);
3492
3493 p = kzalloc(256, GFP_KERNEL);
3494 kfree(p);
3495 p[50] = 0x9a;
3496 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3497 p);
3498 validate_slab_cache(kmalloc_caches + 8);
3499
3500 p = kzalloc(512, GFP_KERNEL);
3501 kfree(p);
3502 p[512] = 0xab;
3503 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3504 validate_slab_cache(kmalloc_caches + 9);
3505}
3506#else
3507static void resiliency_test(void) {};
3508#endif
3509
3510/* 3479/*
3511 * Generate lists of code addresses where slabcache objects are allocated 3480 * Generate lists of code addresses where slabcache objects are allocated
3512 * and freed. 3481 * and freed.
@@ -3635,7 +3604,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3635 3604
3636static void process_slab(struct loc_track *t, struct kmem_cache *s, 3605static void process_slab(struct loc_track *t, struct kmem_cache *s,
3637 struct page *page, enum track_item alloc, 3606 struct page *page, enum track_item alloc,
3638 long *map) 3607 unsigned long *map)
3639{ 3608{
3640 void *addr = page_address(page); 3609 void *addr = page_address(page);
3641 void *p; 3610 void *p;
@@ -3735,7 +3704,71 @@ static int list_locations(struct kmem_cache *s, char *buf,
3735 len += sprintf(buf, "No data\n"); 3704 len += sprintf(buf, "No data\n");
3736 return len; 3705 return len;
3737} 3706}
3707#endif
3708
3709#ifdef SLUB_RESILIENCY_TEST
3710static void resiliency_test(void)
3711{
3712 u8 *p;
3738 3713
3714 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
3715
3716 printk(KERN_ERR "SLUB resiliency testing\n");
3717 printk(KERN_ERR "-----------------------\n");
3718 printk(KERN_ERR "A. Corruption after allocation\n");
3719
3720 p = kzalloc(16, GFP_KERNEL);
3721 p[16] = 0x12;
3722 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3723 " 0x12->0x%p\n\n", p + 16);
3724
3725 validate_slab_cache(kmalloc_caches[4]);
3726
3727 /* Hmmm... The next two are dangerous */
3728 p = kzalloc(32, GFP_KERNEL);
3729 p[32 + sizeof(void *)] = 0x34;
3730 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3731 " 0x34 -> -0x%p\n", p);
3732 printk(KERN_ERR
3733 "If allocated object is overwritten then not detectable\n\n");
3734
3735 validate_slab_cache(kmalloc_caches[5]);
3736 p = kzalloc(64, GFP_KERNEL);
3737 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3738 *p = 0x56;
3739 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3740 p);
3741 printk(KERN_ERR
3742 "If allocated object is overwritten then not detectable\n\n");
3743 validate_slab_cache(kmalloc_caches[6]);
3744
3745 printk(KERN_ERR "\nB. Corruption after free\n");
3746 p = kzalloc(128, GFP_KERNEL);
3747 kfree(p);
3748 *p = 0x78;
3749 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3750 validate_slab_cache(kmalloc_caches[7]);
3751
3752 p = kzalloc(256, GFP_KERNEL);
3753 kfree(p);
3754 p[50] = 0x9a;
3755 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3756 p);
3757 validate_slab_cache(kmalloc_caches[8]);
3758
3759 p = kzalloc(512, GFP_KERNEL);
3760 kfree(p);
3761 p[512] = 0xab;
3762 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3763 validate_slab_cache(kmalloc_caches[9]);
3764}
3765#else
3766#ifdef CONFIG_SYSFS
3767static void resiliency_test(void) {};
3768#endif
3769#endif
3770
3771#ifdef CONFIG_SYSFS
3739enum slab_stat_type { 3772enum slab_stat_type {
3740 SL_ALL, /* All slabs */ 3773 SL_ALL, /* All slabs */
3741 SL_PARTIAL, /* Only partially allocated slabs */ 3774 SL_PARTIAL, /* Only partially allocated slabs */
@@ -3788,6 +3821,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3788 } 3821 }
3789 } 3822 }
3790 3823
3824 down_read(&slub_lock);
3825#ifdef CONFIG_SLUB_DEBUG
3791 if (flags & SO_ALL) { 3826 if (flags & SO_ALL) {
3792 for_each_node_state(node, N_NORMAL_MEMORY) { 3827 for_each_node_state(node, N_NORMAL_MEMORY) {
3793 struct kmem_cache_node *n = get_node(s, node); 3828 struct kmem_cache_node *n = get_node(s, node);
@@ -3804,7 +3839,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3804 nodes[node] += x; 3839 nodes[node] += x;
3805 } 3840 }
3806 3841
3807 } else if (flags & SO_PARTIAL) { 3842 } else
3843#endif
3844 if (flags & SO_PARTIAL) {
3808 for_each_node_state(node, N_NORMAL_MEMORY) { 3845 for_each_node_state(node, N_NORMAL_MEMORY) {
3809 struct kmem_cache_node *n = get_node(s, node); 3846 struct kmem_cache_node *n = get_node(s, node);
3810 3847
@@ -3829,6 +3866,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3829 return x + sprintf(buf + x, "\n"); 3866 return x + sprintf(buf + x, "\n");
3830} 3867}
3831 3868
3869#ifdef CONFIG_SLUB_DEBUG
3832static int any_slab_objects(struct kmem_cache *s) 3870static int any_slab_objects(struct kmem_cache *s)
3833{ 3871{
3834 int node; 3872 int node;
@@ -3844,6 +3882,7 @@ static int any_slab_objects(struct kmem_cache *s)
3844 } 3882 }
3845 return 0; 3883 return 0;
3846} 3884}
3885#endif
3847 3886
3848#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3887#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3849#define to_slab(n) container_of(n, struct kmem_cache, kobj); 3888#define to_slab(n) container_of(n, struct kmem_cache, kobj);
@@ -3945,12 +3984,6 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3945} 3984}
3946SLAB_ATTR_RO(aliases); 3985SLAB_ATTR_RO(aliases);
3947 3986
3948static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3949{
3950 return show_slab_objects(s, buf, SO_ALL);
3951}
3952SLAB_ATTR_RO(slabs);
3953
3954static ssize_t partial_show(struct kmem_cache *s, char *buf) 3987static ssize_t partial_show(struct kmem_cache *s, char *buf)
3955{ 3988{
3956 return show_slab_objects(s, buf, SO_PARTIAL); 3989 return show_slab_objects(s, buf, SO_PARTIAL);
@@ -3975,93 +4008,83 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3975} 4008}
3976SLAB_ATTR_RO(objects_partial); 4009SLAB_ATTR_RO(objects_partial);
3977 4010
3978static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4011static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3979{
3980 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3981}
3982SLAB_ATTR_RO(total_objects);
3983
3984static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3985{ 4012{
3986 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4013 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3987} 4014}
3988 4015
3989static ssize_t sanity_checks_store(struct kmem_cache *s, 4016static ssize_t reclaim_account_store(struct kmem_cache *s,
3990 const char *buf, size_t length) 4017 const char *buf, size_t length)
3991{ 4018{
3992 s->flags &= ~SLAB_DEBUG_FREE; 4019 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3993 if (buf[0] == '1') 4020 if (buf[0] == '1')
3994 s->flags |= SLAB_DEBUG_FREE; 4021 s->flags |= SLAB_RECLAIM_ACCOUNT;
3995 return length; 4022 return length;
3996} 4023}
3997SLAB_ATTR(sanity_checks); 4024SLAB_ATTR(reclaim_account);
3998 4025
3999static ssize_t trace_show(struct kmem_cache *s, char *buf) 4026static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4000{ 4027{
4001 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4028 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4002} 4029}
4030SLAB_ATTR_RO(hwcache_align);
4003 4031
4004static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4032#ifdef CONFIG_ZONE_DMA
4005 size_t length) 4033static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4006{ 4034{
4007 s->flags &= ~SLAB_TRACE; 4035 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4008 if (buf[0] == '1')
4009 s->flags |= SLAB_TRACE;
4010 return length;
4011} 4036}
4012SLAB_ATTR(trace); 4037SLAB_ATTR_RO(cache_dma);
4038#endif
4013 4039
4014#ifdef CONFIG_FAILSLAB 4040static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4015static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4016{ 4041{
4017 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4042 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4018} 4043}
4044SLAB_ATTR_RO(destroy_by_rcu);
4019 4045
4020static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4046#ifdef CONFIG_SLUB_DEBUG
4021 size_t length) 4047static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4022{ 4048{
4023 s->flags &= ~SLAB_FAILSLAB; 4049 return show_slab_objects(s, buf, SO_ALL);
4024 if (buf[0] == '1')
4025 s->flags |= SLAB_FAILSLAB;
4026 return length;
4027} 4050}
4028SLAB_ATTR(failslab); 4051SLAB_ATTR_RO(slabs);
4029#endif
4030 4052
4031static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4053static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4032{ 4054{
4033 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4055 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4034} 4056}
4057SLAB_ATTR_RO(total_objects);
4035 4058
4036static ssize_t reclaim_account_store(struct kmem_cache *s, 4059static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4037 const char *buf, size_t length)
4038{ 4060{
4039 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4061 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4040 if (buf[0] == '1')
4041 s->flags |= SLAB_RECLAIM_ACCOUNT;
4042 return length;
4043} 4062}
4044SLAB_ATTR(reclaim_account);
4045 4063
4046static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4064static ssize_t sanity_checks_store(struct kmem_cache *s,
4065 const char *buf, size_t length)
4047{ 4066{
4048 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4067 s->flags &= ~SLAB_DEBUG_FREE;
4068 if (buf[0] == '1')
4069 s->flags |= SLAB_DEBUG_FREE;
4070 return length;
4049} 4071}
4050SLAB_ATTR_RO(hwcache_align); 4072SLAB_ATTR(sanity_checks);
4051 4073
4052#ifdef CONFIG_ZONE_DMA 4074static ssize_t trace_show(struct kmem_cache *s, char *buf)
4053static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4054{ 4075{
4055 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4076 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4056} 4077}
4057SLAB_ATTR_RO(cache_dma);
4058#endif
4059 4078
4060static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4079static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4080 size_t length)
4061{ 4081{
4062 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4082 s->flags &= ~SLAB_TRACE;
4083 if (buf[0] == '1')
4084 s->flags |= SLAB_TRACE;
4085 return length;
4063} 4086}
4064SLAB_ATTR_RO(destroy_by_rcu); 4087SLAB_ATTR(trace);
4065 4088
4066static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4089static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4067{ 4090{
@@ -4139,6 +4162,40 @@ static ssize_t validate_store(struct kmem_cache *s,
4139} 4162}
4140SLAB_ATTR(validate); 4163SLAB_ATTR(validate);
4141 4164
4165static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4166{
4167 if (!(s->flags & SLAB_STORE_USER))
4168 return -ENOSYS;
4169 return list_locations(s, buf, TRACK_ALLOC);
4170}
4171SLAB_ATTR_RO(alloc_calls);
4172
4173static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4174{
4175 if (!(s->flags & SLAB_STORE_USER))
4176 return -ENOSYS;
4177 return list_locations(s, buf, TRACK_FREE);
4178}
4179SLAB_ATTR_RO(free_calls);
4180#endif /* CONFIG_SLUB_DEBUG */
4181
4182#ifdef CONFIG_FAILSLAB
4183static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4184{
4185 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4186}
4187
4188static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4189 size_t length)
4190{
4191 s->flags &= ~SLAB_FAILSLAB;
4192 if (buf[0] == '1')
4193 s->flags |= SLAB_FAILSLAB;
4194 return length;
4195}
4196SLAB_ATTR(failslab);
4197#endif
4198
4142static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4199static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4143{ 4200{
4144 return 0; 4201 return 0;
@@ -4158,22 +4215,6 @@ static ssize_t shrink_store(struct kmem_cache *s,
4158} 4215}
4159SLAB_ATTR(shrink); 4216SLAB_ATTR(shrink);
4160 4217
4161static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4162{
4163 if (!(s->flags & SLAB_STORE_USER))
4164 return -ENOSYS;
4165 return list_locations(s, buf, TRACK_ALLOC);
4166}
4167SLAB_ATTR_RO(alloc_calls);
4168
4169static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4170{
4171 if (!(s->flags & SLAB_STORE_USER))
4172 return -ENOSYS;
4173 return list_locations(s, buf, TRACK_FREE);
4174}
4175SLAB_ATTR_RO(free_calls);
4176
4177#ifdef CONFIG_NUMA 4218#ifdef CONFIG_NUMA
4178static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4219static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4179{ 4220{
@@ -4279,25 +4320,27 @@ static struct attribute *slab_attrs[] = {
4279 &min_partial_attr.attr, 4320 &min_partial_attr.attr,
4280 &objects_attr.attr, 4321 &objects_attr.attr,
4281 &objects_partial_attr.attr, 4322 &objects_partial_attr.attr,
4282 &total_objects_attr.attr,
4283 &slabs_attr.attr,
4284 &partial_attr.attr, 4323 &partial_attr.attr,
4285 &cpu_slabs_attr.attr, 4324 &cpu_slabs_attr.attr,
4286 &ctor_attr.attr, 4325 &ctor_attr.attr,
4287 &aliases_attr.attr, 4326 &aliases_attr.attr,
4288 &align_attr.attr, 4327 &align_attr.attr,
4289 &sanity_checks_attr.attr,
4290 &trace_attr.attr,
4291 &hwcache_align_attr.attr, 4328 &hwcache_align_attr.attr,
4292 &reclaim_account_attr.attr, 4329 &reclaim_account_attr.attr,
4293 &destroy_by_rcu_attr.attr, 4330 &destroy_by_rcu_attr.attr,
4331 &shrink_attr.attr,
4332#ifdef CONFIG_SLUB_DEBUG
4333 &total_objects_attr.attr,
4334 &slabs_attr.attr,
4335 &sanity_checks_attr.attr,
4336 &trace_attr.attr,
4294 &red_zone_attr.attr, 4337 &red_zone_attr.attr,
4295 &poison_attr.attr, 4338 &poison_attr.attr,
4296 &store_user_attr.attr, 4339 &store_user_attr.attr,
4297 &validate_attr.attr, 4340 &validate_attr.attr,
4298 &shrink_attr.attr,
4299 &alloc_calls_attr.attr, 4341 &alloc_calls_attr.attr,
4300 &free_calls_attr.attr, 4342 &free_calls_attr.attr,
4343#endif
4301#ifdef CONFIG_ZONE_DMA 4344#ifdef CONFIG_ZONE_DMA
4302 &cache_dma_attr.attr, 4345 &cache_dma_attr.attr,
4303#endif 4346#endif
@@ -4377,6 +4420,7 @@ static void kmem_cache_release(struct kobject *kobj)
4377{ 4420{
4378 struct kmem_cache *s = to_slab(kobj); 4421 struct kmem_cache *s = to_slab(kobj);
4379 4422
4423 kfree(s->name);
4380 kfree(s); 4424 kfree(s);
4381} 4425}
4382 4426
@@ -4579,7 +4623,7 @@ static int __init slab_sysfs_init(void)
4579} 4623}
4580 4624
4581__initcall(slab_sysfs_init); 4625__initcall(slab_sysfs_init);
4582#endif 4626#endif /* CONFIG_SYSFS */
4583 4627
4584/* 4628/*
4585 * The /proc/slabinfo ABI 4629 * The /proc/slabinfo ABI
diff --git a/mm/util.c b/mm/util.c
index 4735ea481816..73dac81e9f78 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -245,6 +245,19 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
245} 245}
246#endif 246#endif
247 247
248/*
249 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
250 * back to the regular GUP.
251 * If the architecture not support this fucntion, simply return with no
252 * page pinned
253 */
254int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
255 int nr_pages, int write, struct page **pages)
256{
257 return 0;
258}
259EXPORT_SYMBOL_GPL(__get_user_pages_fast);
260
248/** 261/**
249 * get_user_pages_fast() - pin user pages in memory 262 * get_user_pages_fast() - pin user pages in memory
250 * @start: starting user address 263 * @start: starting user address
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5dfabf25f11..b94c9464f262 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -79,8 +79,8 @@ struct scan_control {
79 int order; 79 int order;
80 80
81 /* 81 /*
82 * Intend to reclaim enough contenious memory rather than to reclaim 82 * Intend to reclaim enough continuous memory rather than reclaim
83 * enough amount memory. I.e, it's the mode for high order allocation. 83 * enough amount of memory. i.e, mode for high order allocation.
84 */ 84 */
85 bool lumpy_reclaim_mode; 85 bool lumpy_reclaim_mode;
86 86
diff --git a/net/ax25/Kconfig b/net/ax25/Kconfig
index 2a72aa96a568..705e53ef4af0 100644
--- a/net/ax25/Kconfig
+++ b/net/ax25/Kconfig
@@ -7,7 +7,7 @@ menuconfig HAMRADIO
7 bool "Amateur Radio support" 7 bool "Amateur Radio support"
8 help 8 help
9 If you want to connect your Linux box to an amateur radio, answer Y 9 If you want to connect your Linux box to an amateur radio, answer Y
10 here. You want to read <http://www.tapr.org/tapr/html/pkthome.html> 10 here. You want to read <http://www.tapr.org/>
11 and more specifically about AX.25 on Linux 11 and more specifically about AX.25 on Linux
12 <http://www.linux-ax25.org/>. 12 <http://www.linux-ax25.org/>.
13 13
@@ -42,7 +42,7 @@ config AX25
42 check out the file <file:Documentation/networking/ax25.txt> in the 42 check out the file <file:Documentation/networking/ax25.txt> in the
43 kernel source. More information about digital amateur radio in 43 kernel source. More information about digital amateur radio in
44 general is on the WWW at 44 general is on the WWW at
45 <http://www.tapr.org/tapr/html/pkthome.html>. 45 <http://www.tapr.org/>.
46 46
47 To compile this driver as a module, choose M here: the 47 To compile this driver as a module, choose M here: the
48 module will be called ax25. 48 module will be called ax25.
@@ -89,7 +89,7 @@ config NETROM
89 <http://www.linux-ax25.org>. You also might want to check out the 89 <http://www.linux-ax25.org>. You also might want to check out the
90 file <file:Documentation/networking/ax25.txt>. More information about 90 file <file:Documentation/networking/ax25.txt>. More information about
91 digital amateur radio in general is on the WWW at 91 digital amateur radio in general is on the WWW at
92 <http://www.tapr.org/tapr/html/pkthome.html>. 92 <http://www.tapr.org/>.
93 93
94 To compile this driver as a module, choose M here: the 94 To compile this driver as a module, choose M here: the
95 module will be called netrom. 95 module will be called netrom.
@@ -108,7 +108,7 @@ config ROSE
108 <http://www.linux-ax25.org>. You also might want to check out the 108 <http://www.linux-ax25.org>. You also might want to check out the
109 file <file:Documentation/networking/ax25.txt>. More information about 109 file <file:Documentation/networking/ax25.txt>. More information about
110 digital amateur radio in general is on the WWW at 110 digital amateur radio in general is on the WWW at
111 <http://www.tapr.org/tapr/html/pkthome.html>. 111 <http://www.tapr.org/>.
112 112
113 To compile this driver as a module, choose M here: the 113 To compile this driver as a module, choose M here: the
114 module will be called rose. 114 module will be called rose.
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index e848e6c062cd..9e95d7fb6d5a 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -84,7 +84,7 @@ config IP_FIB_TRIE
84 84
85 An experimental study of compression methods for dynamic tries 85 An experimental study of compression methods for dynamic tries
86 Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. 86 Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
87 http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ 87 <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
88 88
89endchoice 89endchoice
90 90
@@ -562,7 +562,7 @@ config TCP_CONG_VENO
562 distinguishing to circumvent the difficult judgment of the packet loss 562 distinguishing to circumvent the difficult judgment of the packet loss
563 type. TCP Veno cuts down less congestion window in response to random 563 type. TCP Veno cuts down less congestion window in response to random
564 loss packets. 564 loss packets.
565 See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf 565 See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186>
566 566
567config TCP_CONG_YEAH 567config TCP_CONG_YEAH
568 tristate "YeAH TCP" 568 tristate "YeAH TCP"
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 3a92a76ae41d..094e150c6260 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * The CIPSO draft specification can be found in the kernel's Documentation 10 * The CIPSO draft specification can be found in the kernel's Documentation
11 * directory as well as the following URL: 11 * directory as well as the following URL:
12 * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt 12 * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt
13 * The FIPS-188 specification can be found at the following URL: 13 * The FIPS-188 specification can be found at the following URL:
14 * http://www.itl.nist.gov/fipspubs/fip188.htm 14 * http://www.itl.nist.gov/fipspubs/fip188.htm
15 * 15 *
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index cd5e13aee7d5..b14450895102 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -16,7 +16,7 @@
16 * 16 *
17 * An experimental study of compression methods for dynamic tries 17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. 18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ 19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
20 * 20 *
21 * 21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson 22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 8e3350643b63..babd1a2bae5f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -147,7 +147,7 @@ config IP_NF_TARGET_ULOG
147 which can only be viewed through syslog. 147 which can only be viewed through syslog.
148 148
149 The appropriate userspace logging daemon (ulogd) may be obtained from 149 The appropriate userspace logging daemon (ulogd) may be obtained from
150 <http://www.gnumonks.org/projects/ulogd/> 150 <http://www.netfilter.org/projects/ulogd/index.html>
151 151
152 To compile it as a module, choose M here. If unsure, say N. 152 To compile it as a module, choose M here. If unsure, say N.
153 153
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 1eba160b72dc..00ca688d8964 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
6 * The algorithm is described in: 6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm 7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks" 8 * for High-Speed Networks"
9 * http://www.ews.uiuc.edu/~shaoliu/papersandslides/liubassri06perf.pdf 9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
10 * 10 *
11 * Implemented from description in paper and ns-2 simulation. 11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> 12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ee0df4817498..3357f69e353d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -428,10 +428,10 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss);
428 * 428 *
429 * The algorithm for RTT estimation w/o timestamps is based on 429 * The algorithm for RTT estimation w/o timestamps is based on
430 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 430 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
431 * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps> 431 * <http://public.lanl.gov/radiant/pubs.html#DRS>
432 * 432 *
433 * More detail on this code can be found at 433 * More detail on this code can be found at
434 * <http://www.psc.edu/~jheffner/senior_thesis.ps>, 434 * <http://staff.psc.edu/jheffner/>,
435 * though this reference is out of date. A new paper 435 * though this reference is out of date. A new paper
436 * is pending. 436 * is pending.
437 */ 437 */
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index b612acf76183..38bc0b52d745 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -6,7 +6,7 @@
6 * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." 6 * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks."
7 * IEEE Journal on Selected Areas in Communication, 7 * IEEE Journal on Selected Areas in Communication,
8 * Feb. 2003. 8 * Feb. 2003.
9 * See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf 9 * See http://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf
10 */ 10 */
11 11
12#include <linux/mm.h> 12#include <linux/mm.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 0993bd454ea5..7fa86373de41 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -664,7 +664,7 @@ dev_irnet_ioctl(
664 if((val == N_SYNC_PPP) || (val == N_PPP)) 664 if((val == N_SYNC_PPP) || (val == N_PPP))
665 { 665 {
666 DEBUG(FS_INFO, "Entering PPP discipline.\n"); 666 DEBUG(FS_INFO, "Entering PPP discipline.\n");
667 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ 667 /* PPP channel setup (ap->chan in configured in dev_irnet_open())*/
668 if (mutex_lock_interruptible(&ap->lock)) 668 if (mutex_lock_interruptible(&ap->lock))
669 return -EINTR; 669 return -EINTR;
670 670
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ebd3f1d9d889..58e741128968 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -115,7 +115,7 @@ struct mesh_path {
115 * @hash_rnd: random value used for hash computations 115 * @hash_rnd: random value used for hash computations
116 * @entries: number of entries in the table 116 * @entries: number of entries in the table
117 * @free_node: function to free nodes of the table 117 * @free_node: function to free nodes of the table
118 * @copy_node: fuction to copy nodes of the table 118 * @copy_node: function to copy nodes of the table
119 * @size_order: determines size of the table, there will be 2^size_order hash 119 * @size_order: determines size of the table, there will be 2^size_order hash
120 * buckets 120 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index c4c885dca3bd..3fb2b73b24dc 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -329,8 +329,8 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
329/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering 329/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
330 in IP Filter' by Guido van Rooij. 330 in IP Filter' by Guido van Rooij.
331 331
332 http://www.nluug.nl/events/sane2000/papers.html 332 http://www.sane.nl/events/sane2000/papers.html
333 http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz 333 http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
334 334
335 The boundaries and the conditions are changed according to RFC793: 335 The boundaries and the conditions are changed according to RFC793:
336 the packet must intersect the window (i.e. segments may be 336 the packet must intersect the window (i.e. segments may be
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 28bcd52e3ce9..52f252432144 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -203,7 +203,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
203 mutex_lock(&inode->i_mutex); 203 mutex_lock(&inode->i_mutex);
204 if (rpci->ops == NULL) 204 if (rpci->ops == NULL)
205 goto out; 205 goto out;
206 msg = (struct rpc_pipe_msg *)filp->private_data; 206 msg = filp->private_data;
207 if (msg != NULL) { 207 if (msg != NULL) {
208 spin_lock(&inode->i_lock); 208 spin_lock(&inode->i_lock);
209 msg->errno = -EAGAIN; 209 msg->errno = -EAGAIN;
@@ -325,7 +325,7 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
325 len = rpci->pipelen; 325 len = rpci->pipelen;
326 if (filp->private_data) { 326 if (filp->private_data) {
327 struct rpc_pipe_msg *msg; 327 struct rpc_pipe_msg *msg;
328 msg = (struct rpc_pipe_msg *)filp->private_data; 328 msg = filp->private_data;
329 len += msg->len - msg->copied; 329 len += msg->len - msg->copied;
330 } 330 }
331 spin_unlock(&inode->i_lock); 331 spin_unlock(&inode->i_lock);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 2bf23406637a..74944a2dd436 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -471,7 +471,7 @@ static int wanrouter_device_setup(struct wan_device *wandev,
471 data = vmalloc(conf->data_size); 471 data = vmalloc(conf->data_size);
472 if (!data) { 472 if (!data) {
473 printk(KERN_INFO 473 printk(KERN_INFO
474 "%s: ERROR, Faild allocate kernel memory !\n", 474 "%s: ERROR, Failed allocate kernel memory !\n",
475 wandev->name); 475 wandev->name);
476 kfree(conf); 476 kfree(conf);
477 return -ENOBUFS; 477 return -ENOBUFS;
@@ -481,7 +481,7 @@ static int wanrouter_device_setup(struct wan_device *wandev,
481 err = wandev->setup(wandev, conf); 481 err = wandev->setup(wandev, conf);
482 } else { 482 } else {
483 printk(KERN_INFO 483 printk(KERN_INFO
484 "%s: ERROR, Faild to copy from user data !\n", 484 "%s: ERROR, Failed to copy from user data !\n",
485 wandev->name); 485 wandev->name);
486 err = -EFAULT; 486 err = -EFAULT;
487 } 487 }
diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c
index 456a1b4d7832..854c303264dc 100644
--- a/sound/oss/ac97_codec.c
+++ b/sound/oss/ac97_codec.c
@@ -21,11 +21,8 @@
21 * 21 *
22 ************************************************************************** 22 **************************************************************************
23 * 23 *
24 * The Intel Audio Codec '97 specification is available at the Intel 24 * The Intel Audio Codec '97 specification is available at:
25 * audio homepage: http://developer.intel.com/ial/scalableplatforms/audio/ 25 * http://download.intel.com/support/motherboards/desktop/sb/ac97_r23.pdf
26 *
27 * The specification itself is currently available at:
28 * ftp://download.intel.com/ial/scalableplatforms/ac97r22.pdf
29 * 26 *
30 ************************************************************************** 27 **************************************************************************
31 * 28 *
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index c7fba5379813..537cfba829a5 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -22,7 +22,7 @@
22/* Power-Management-Code ( CONFIG_PM ) 22/* Power-Management-Code ( CONFIG_PM )
23 * for ens1371 only ( FIXME ) 23 * for ens1371 only ( FIXME )
24 * derived from cs4281.c, atiixp.c and via82xx.c 24 * derived from cs4281.c, atiixp.c and via82xx.c
25 * using http://www.alsa-project.org/~iwai/writing-an-alsa-driver/c1540.htm 25 * using http://www.alsa-project.org/~tiwai/writing-an-alsa-driver/
26 * by Kurt J. Bosch 26 * by Kurt J. Bosch
27 */ 27 */
28 28
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 467749249576..400f9ebd243e 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -716,7 +716,7 @@ static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ich
716 * Intel 82443MX running a 100MHz processor system bus has a hardware bug, 716 * Intel 82443MX running a 100MHz processor system bus has a hardware bug,
717 * which aborts PCI busmaster for audio transfer. A workaround is to set 717 * which aborts PCI busmaster for audio transfer. A workaround is to set
718 * the pages as non-cached. For details, see the errata in 718 * the pages as non-cached. For details, see the errata in
719 * http://www.intel.com/design/chipsets/specupdt/245051.htm 719 * http://download.intel.com/design/chipsets/specupdt/24505108.pdf
720 */ 720 */
721static void fill_nocache(void *buf, int size, int nocache) 721static void fill_nocache(void *buf, int size, int nocache)
722{ 722{
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
index 209c25994c7e..4719558289d4 100644
--- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
+++ b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
@@ -182,7 +182,7 @@ static int neo1973_gta02_voice_hw_params(
182 if (ret < 0) 182 if (ret < 0)
183 return ret; 183 return ret;
184 184
185 /* configue and enable PLL for 12.288MHz output */ 185 /* configure and enable PLL for 12.288MHz output */
186 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 186 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
187 iis_clkrate / 4, 12288000); 187 iis_clkrate / 4, 12288000);
188 if (ret < 0) 188 if (ret < 0)
diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/s3c24xx/neo1973_wm8753.c
index 0cb4f86f6d1e..4ac620988e7c 100644
--- a/sound/soc/s3c24xx/neo1973_wm8753.c
+++ b/sound/soc/s3c24xx/neo1973_wm8753.c
@@ -201,7 +201,7 @@ static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
201 if (ret < 0) 201 if (ret < 0)
202 return ret; 202 return ret;
203 203
204 /* configue and enable PLL for 12.288MHz output */ 204 /* configure and enable PLL for 12.288MHz output */
205 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 205 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
206 iis_clkrate / 4, 12288000); 206 iis_clkrate / 4, 12288000);
207 if (ret < 0) 207 if (ret < 0)
diff --git a/usr/Kconfig b/usr/Kconfig
index e2721f5a3504..c2c7fe2f717d 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -144,7 +144,7 @@ config INITRAMFS_COMPRESSION_LZO
144 depends on RD_LZO 144 depends on RD_LZO
145 help 145 help
146 Its compression ratio is the poorest among the four. The kernel 146 Its compression ratio is the poorest among the four. The kernel
147 size is about about 10% bigger than gzip; however its speed 147 size is about 10% bigger than gzip; however its speed
148 (both compression and decompression) is the fastest. 148 (both compression and decompression) is the fastest.
149 149
150endchoice 150endchoice
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 369e38010ad5..8edca9141b78 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -17,7 +17,7 @@
17 * Authors: 17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 * 19 *
20 * Copyright 2010 Red Hat, Inc. and/or its affilates. 20 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 */ 21 */
22 22
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 60e5e4612b0b..5225052aebc1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5,7 +5,7 @@
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affilates. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Authors: 10 * Authors:
11 * Avi Kivity <avi@qumranet.com> 11 * Avi Kivity <avi@qumranet.com>
@@ -705,14 +705,12 @@ skip_lpage:
705 if (r) 705 if (r)
706 goto out_free; 706 goto out_free;
707 707
708#ifdef CONFIG_DMAR
709 /* map the pages in iommu page table */ 708 /* map the pages in iommu page table */
710 if (npages) { 709 if (npages) {
711 r = kvm_iommu_map_pages(kvm, &new); 710 r = kvm_iommu_map_pages(kvm, &new);
712 if (r) 711 if (r)
713 goto out_free; 712 goto out_free;
714 } 713 }
715#endif
716 714
717 r = -ENOMEM; 715 r = -ENOMEM;
718 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 716 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
@@ -927,35 +925,46 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
927 return memslot - slots->memslots; 925 return memslot - slots->memslots;
928} 926}
929 927
930static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 928static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
931{ 929 gfn_t *nr_pages)
932 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
933}
934
935unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
936{ 930{
937 struct kvm_memory_slot *slot; 931 struct kvm_memory_slot *slot;
938 932
939 slot = gfn_to_memslot(kvm, gfn); 933 slot = gfn_to_memslot(kvm, gfn);
940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 934 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
941 return bad_hva(); 935 return bad_hva();
936
937 if (nr_pages)
938 *nr_pages = slot->npages - (gfn - slot->base_gfn);
939
942 return gfn_to_hva_memslot(slot, gfn); 940 return gfn_to_hva_memslot(slot, gfn);
943} 941}
942
943unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
944{
945 return gfn_to_hva_many(kvm, gfn, NULL);
946}
944EXPORT_SYMBOL_GPL(gfn_to_hva); 947EXPORT_SYMBOL_GPL(gfn_to_hva);
945 948
946static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 949static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
947{ 950{
948 struct page *page[1]; 951 struct page *page[1];
949 int npages; 952 int npages;
950 pfn_t pfn; 953 pfn_t pfn;
951 954
952 might_sleep(); 955 if (atomic)
953 956 npages = __get_user_pages_fast(addr, 1, 1, page);
954 npages = get_user_pages_fast(addr, 1, 1, page); 957 else {
958 might_sleep();
959 npages = get_user_pages_fast(addr, 1, 1, page);
960 }
955 961
956 if (unlikely(npages != 1)) { 962 if (unlikely(npages != 1)) {
957 struct vm_area_struct *vma; 963 struct vm_area_struct *vma;
958 964
965 if (atomic)
966 goto return_fault_page;
967
959 down_read(&current->mm->mmap_sem); 968 down_read(&current->mm->mmap_sem);
960 if (is_hwpoison_address(addr)) { 969 if (is_hwpoison_address(addr)) {
961 up_read(&current->mm->mmap_sem); 970 up_read(&current->mm->mmap_sem);
@@ -968,6 +977,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
968 if (vma == NULL || addr < vma->vm_start || 977 if (vma == NULL || addr < vma->vm_start ||
969 !(vma->vm_flags & VM_PFNMAP)) { 978 !(vma->vm_flags & VM_PFNMAP)) {
970 up_read(&current->mm->mmap_sem); 979 up_read(&current->mm->mmap_sem);
980return_fault_page:
971 get_page(fault_page); 981 get_page(fault_page);
972 return page_to_pfn(fault_page); 982 return page_to_pfn(fault_page);
973 } 983 }
@@ -981,7 +991,13 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
981 return pfn; 991 return pfn;
982} 992}
983 993
984pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 994pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
995{
996 return hva_to_pfn(kvm, addr, true);
997}
998EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
999
1000static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
985{ 1001{
986 unsigned long addr; 1002 unsigned long addr;
987 1003
@@ -991,7 +1007,18 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
991 return page_to_pfn(bad_page); 1007 return page_to_pfn(bad_page);
992 } 1008 }
993 1009
994 return hva_to_pfn(kvm, addr); 1010 return hva_to_pfn(kvm, addr, atomic);
1011}
1012
1013pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1014{
1015 return __gfn_to_pfn(kvm, gfn, true);
1016}
1017EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1018
1019pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1020{
1021 return __gfn_to_pfn(kvm, gfn, false);
995} 1022}
996EXPORT_SYMBOL_GPL(gfn_to_pfn); 1023EXPORT_SYMBOL_GPL(gfn_to_pfn);
997 1024
@@ -999,9 +1026,26 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
999 struct kvm_memory_slot *slot, gfn_t gfn) 1026 struct kvm_memory_slot *slot, gfn_t gfn)
1000{ 1027{
1001 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1028 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1002 return hva_to_pfn(kvm, addr); 1029 return hva_to_pfn(kvm, addr, false);
1003} 1030}
1004 1031
1032int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1033 int nr_pages)
1034{
1035 unsigned long addr;
1036 gfn_t entry;
1037
1038 addr = gfn_to_hva_many(kvm, gfn, &entry);
1039 if (kvm_is_error_hva(addr))
1040 return -1;
1041
1042 if (entry < nr_pages)
1043 return 0;
1044
1045 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1046}
1047EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1048
1005struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1049struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1006{ 1050{
1007 pfn_t pfn; 1051 pfn_t pfn;
@@ -1964,7 +2008,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1964 case CPU_STARTING: 2008 case CPU_STARTING:
1965 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2009 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1966 cpu); 2010 cpu);
2011 spin_lock(&kvm_lock);
1967 hardware_enable(NULL); 2012 hardware_enable(NULL);
2013 spin_unlock(&kvm_lock);
1968 break; 2014 break;
1969 } 2015 }
1970 return NOTIFY_OK; 2016 return NOTIFY_OK;
@@ -1977,7 +2023,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
1977 /* spin while reset goes on */ 2023 /* spin while reset goes on */
1978 local_irq_enable(); 2024 local_irq_enable();
1979 while (true) 2025 while (true)
1980 ; 2026 cpu_relax();
1981 } 2027 }
1982 /* Fault while not rebooting. We want the trace. */ 2028 /* Fault while not rebooting. We want the trace. */
1983 BUG(); 2029 BUG();
@@ -2171,8 +2217,10 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2171 2217
2172static int kvm_resume(struct sys_device *dev) 2218static int kvm_resume(struct sys_device *dev)
2173{ 2219{
2174 if (kvm_usage_count) 2220 if (kvm_usage_count) {
2221 WARN_ON(spin_is_locked(&kvm_lock));
2175 hardware_enable(NULL); 2222 hardware_enable(NULL);
2223 }
2176 return 0; 2224 return 0;
2177} 2225}
2178 2226