aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-02-22 09:20:07 -0500
committerArnd Bergmann <arnd@arndb.de>2012-02-22 09:20:18 -0500
commit2daa79ec2126f8e710391e9d8e8f0d31d7c91d5f (patch)
treeab89c74c09c2986fa01c25ad8e45989a48a18899
parent7dae8c5209147ad06d424928a5f1ec45caa87691 (diff)
parent678a0222edc9da43a22145d68647500ee85e6c04 (diff)
Merge branch 'lpc32xx/drivers' into next/drivers
* lpc32xx/drivers: (566 commits) ARM: LPC32xx: ADC support for mach-lpc32xx Includes an update to Linux 3.3-rc4 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--Documentation/DocBook/device-drivers.tmpl12
-rw-r--r--Documentation/input/event-codes.txt72
-rw-r--r--Documentation/sysctl/kernel.txt2
-rw-r--r--MAINTAINERS56
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra-paz00.dts6
-rw-r--r--arch/arm/include/asm/tlb.h10
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/perf_event_v7.c28
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/signal.c5
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c9
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c8
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9_smc.h29
-rw-r--r--arch/arm/mach-at91/sam9_smc.c76
-rw-r--r--arch/arm/mach-at91/sam9_smc.h23
-rw-r--r--arch/arm/mach-bcmring/arch.c2
-rw-r--r--arch/arm/mach-bcmring/dma.c812
-rw-r--r--arch/arm/mach-bcmring/include/mach/dma.h196
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c2
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c2
-rw-r--r--arch/arm/mach-davinci/da850.c32
-rw-r--r--arch/arm/mach-dove/common.c3
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c4
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c2
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c2
-rw-r--r--arch/arm/mach-exynos/clock.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c8
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c8
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c2
-rw-r--r--arch/arm/mach-exynos/pm.c4
-rw-r--r--arch/arm/mach-kirkwood/common.c3
-rw-r--r--arch/arm/mach-kirkwood/mpp.h320
-rw-r--r--arch/arm/mach-lpc32xx/clock.c36
-rw-r--r--arch/arm/mach-lpc32xx/common.c22
-rw-r--r--arch/arm/mach-lpc32xx/common.h1
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c1
-rw-r--r--arch/arm/mach-mv78xx0/common.c3
-rw-r--r--arch/arm/mach-mv78xx0/mpp.h226
-rw-r--r--arch/arm/mach-omap2/Kconfig11
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c35
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-generic.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c23
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c24
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/devices.c1
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/hsmmc.c24
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/mux.c22
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c16
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c21
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c22
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c54
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm24xx.c8
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c1
-rw-r--r--arch/arm/mach-omap2/serial.c8
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/timer.c2
-rw-r--r--arch/arm/mach-omap2/vc.c10
-rw-r--r--arch/arm/mach-omap2/vp.c5
-rw-r--r--arch/arm/mach-orion5x/common.c4
-rw-r--r--arch/arm/mach-s3c2410/cpu-freq.c8
-rw-r--r--arch/arm/mach-s3c2410/dma.c5
-rw-r--r--arch/arm/mach-s3c2410/pll.c2
-rw-r--r--arch/arm/mach-s3c2410/pm.c2
-rw-r--r--arch/arm/mach-s3c2412/cpu-freq.c3
-rw-r--r--arch/arm/mach-s3c2412/dma.c3
-rw-r--r--arch/arm/mach-s3c2412/irq.c2
-rw-r--r--arch/arm/mach-s3c2412/pm.c2
-rw-r--r--arch/arm/mach-s3c2416/irq.c3
-rw-r--r--arch/arm/mach-s3c2416/pm.c2
-rw-r--r--arch/arm/mach-s3c2440/clock.c2
-rw-r--r--arch/arm/mach-s3c2440/dma.c3
-rw-r--r--arch/arm/mach-s3c2440/irq.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-cpufreq.c3
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-pll-12000000.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-pll-16934400.c3
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c244x-clock.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c244x-irq.c2
-rw-r--r--arch/arm/mach-s3c2443/dma.c3
-rw-r--r--arch/arm/mach-s3c2443/irq.c3
-rw-r--r--arch/arm/mach-s3c64xx/clock.c5
-rw-r--r--arch/arm/mach-s3c64xx/common.c2
-rw-r--r--arch/arm/mach-s5p64x0/pm.c2
-rw-r--r--arch/arm/mach-s5pv210/clock.c4
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c2
-rw-r--r--arch/arm/mach-tegra/board-paz00.c8
-rw-r--r--arch/arm/mach-tegra/board-paz00.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h10
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/arm/plat-omap/include/plat/omap-secure.h2
-rw-r--r--arch/arm/plat-orion/common.c9
-rw-r--r--arch/arm/plat-orion/include/plat/common.h3
-rw-r--r--arch/arm/plat-orion/mpp.c3
-rw-r--r--arch/arm/plat-samsung/devs.c4
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/microblaze/kernel/setup.c21
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/lib/iomap-pci.c4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig5
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h5
-rw-r--r--arch/powerpc/include/asm/ptrace.h20
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c4
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c48
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/drivers/pci/pci.c4
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/lib/divdi3.S16
-rw-r--r--arch/x86/include/asm/cmpxchg.h6
-rw-r--r--arch/x86/include/asm/i387.h284
-rw-r--r--arch/x86/include/asm/kvm_emulate.h16
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c2
-rw-r--r--arch/x86/kernel/dumpstack.c3
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/process_32.c25
-rw-r--r--arch/x86/kernel/process_64.c29
-rw-r--r--arch/x86/kernel/reboot.c36
-rw-r--r--arch/x86/kernel/traps.c41
-rw-r--r--arch/x86/kernel/xsave.c12
-rw-r--r--arch/x86/kvm/emulate.c51
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--arch/xtensa/include/asm/string.h3
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk-ioc.c111
-rw-r--r--block/blk-merge.c37
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c24
-rw-r--r--block/elevator.c55
-rw-r--r--crypto/sha512_generic.c83
-rw-r--r--drivers/acpi/processor_driver.c7
-rw-r--r--drivers/ata/pata_at91.c48
-rw-r--r--drivers/base/cpu.c21
-rw-r--r--drivers/base/memory.c31
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/regmap/regcache.c4
-rw-r--r--drivers/bcma/main.c4
-rw-r--r--drivers/bcma/scan.c19
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c24
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c11
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/cdrom/cdrom.c20
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/at_hdmac_regs.h17
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/shdma.c3
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c1
-rw-r--r--drivers/gpio/gpio-pch.c1
-rw-r--r--drivers/gpio/gpio-samsung.c23
-rw-r--r--drivers/gpu/drm/drm_ioc32.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/hid/hid-hyperv.c1
-rw-r--r--drivers/hid/hid-wacom.c7
-rw-r--r--drivers/hid/hid-wiimote-core.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c4
-rw-r--r--drivers/hwmon/f75375s.c11
-rw-r--r--drivers/hwmon/w83627ehf.c28
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/at91_ide.c366
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c55
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c17
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/serio_raw.c15
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/leds/leds-lm3530.c4
-rw-r--r--drivers/macintosh/adb.c4
-rw-r--r--drivers/md/dm-raid.c12
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/twl-core.c6
-rw-r--r--drivers/mfd/twl4030-power.c20
-rw-r--r--drivers/mfd/twl6040-core.c128
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c1
-rw-r--r--drivers/misc/cb710/core.c1
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/lkdtm.c6
-rw-r--r--drivers/misc/vmw_balloon.c14
-rw-r--r--drivers/mmc/card/block.c1
-rw-r--r--drivers/mmc/core/core.c49
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c26
-rw-r--r--drivers/mmc/core/sd.c22
-rw-r--r--drivers/mmc/core/sdio.c7
-rw-r--r--drivers/mmc/core/sdio_irq.c10
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/atmel-mci.c3
-rw-r--r--drivers/mmc/host/dw_mmc.c144
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c32
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c16
-rw-r--r--drivers/mmc/host/tmio_mmc.h7
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c12
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c6
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c45
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c18
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/cc770/cc770_isa.c16
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/sja1000/peak_pci.c23
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c6
-rw-r--r--drivers/net/ethernet/freescale/fec.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/skge.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c21
-rw-r--r--drivers/net/ethernet/micrel/Kconfig1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c14
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c205
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c23
-rw-r--r--drivers/net/ethernet/ti/cpmac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c18
-rw-r--r--drivers/net/hyperv/rndis_filter.c32
-rw-r--r--drivers/net/tokenring/Kconfig5
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c1
-rw-r--r--drivers/net/wireless/mwifiex/init.c3
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/pci/iov.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/remove.c28
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/pcmcia/ds.c4
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/power/bq27x00_battery.c15
-rw-r--r--drivers/power/charger-manager.c4
-rw-r--r--drivers/power/lp8727_charger.c1
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/rtc/rtc-at91sam9.c13
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-topcliff-pch.c6
-rw-r--r--drivers/ssb/driver_pcicore.c2
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/android/Kconfig5
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/android_pmem.h93
-rw-r--r--drivers/staging/android/binder.c13
-rw-r--r--drivers/staging/android/lowmemorykiller.c5
-rw-r--r--drivers/staging/android/pmem.c1345
-rw-r--r--drivers/staging/asus_oled/asus_oled.c13
-rw-r--r--drivers/staging/gma500/Kconfig33
-rw-r--r--drivers/staging/gma500/Makefile52
-rw-r--r--drivers/staging/gma500/TODO15
-rw-r--r--drivers/staging/gma500/accel_2d.c414
-rw-r--r--drivers/staging/gma500/backlight.c49
-rw-r--r--drivers/staging/gma500/cdv_device.c350
-rw-r--r--drivers/staging/gma500/cdv_device.h36
-rw-r--r--drivers/staging/gma500/cdv_intel_crt.c326
-rw-r--r--drivers/staging/gma500/cdv_intel_display.c1508
-rw-r--r--drivers/staging/gma500/cdv_intel_hdmi.c376
-rw-r--r--drivers/staging/gma500/cdv_intel_lvds.c721
-rw-r--r--drivers/staging/gma500/displays/hdmi.h33
-rw-r--r--drivers/staging/gma500/displays/pyr_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/pyr_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tpo_cmd.h35
-rw-r--r--drivers/staging/gma500/displays/tpo_vid.h33
-rw-r--r--drivers/staging/gma500/framebuffer.c856
-rw-r--r--drivers/staging/gma500/framebuffer.h48
-rw-r--r--drivers/staging/gma500/gem.c292
-rw-r--r--drivers/staging/gma500/gem_glue.c89
-rw-r--r--drivers/staging/gma500/gem_glue.h2
-rw-r--r--drivers/staging/gma500/gtt.c553
-rw-r--r--drivers/staging/gma500/gtt.h64
-rw-r--r--drivers/staging/gma500/intel_bios.c303
-rw-r--r--drivers/staging/gma500/intel_bios.h430
-rw-r--r--drivers/staging/gma500/intel_i2c.c170
-rw-r--r--drivers/staging/gma500/intel_opregion.c81
-rw-r--r--drivers/staging/gma500/mdfld_device.c714
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c761
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h173
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.c778
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.h154
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c805
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.h78
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c1014
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.h138
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.c1484
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.h184
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c1404
-rw-r--r--drivers/staging/gma500/mdfld_msic.h31
-rw-r--r--drivers/staging/gma500/mdfld_output.c171
-rw-r--r--drivers/staging/gma500/mdfld_output.h41
-rw-r--r--drivers/staging/gma500/mdfld_pyr_cmd.c558
-rw-r--r--drivers/staging/gma500/mdfld_tmd_vid.c206
-rw-r--r--drivers/staging/gma500/mdfld_tpo_cmd.c509
-rw-r--r--drivers/staging/gma500/mdfld_tpo_vid.c140
-rw-r--r--drivers/staging/gma500/medfield.h268
-rw-r--r--drivers/staging/gma500/mid_bios.c270
-rw-r--r--drivers/staging/gma500/mid_bios.h21
-rw-r--r--drivers/staging/gma500/mmu.c858
-rw-r--r--drivers/staging/gma500/mrst.h252
-rw-r--r--drivers/staging/gma500/mrst_crtc.c604
-rw-r--r--drivers/staging/gma500/mrst_device.c634
-rw-r--r--drivers/staging/gma500/mrst_hdmi.c852
-rw-r--r--drivers/staging/gma500/mrst_hdmi_i2c.c328
-rw-r--r--drivers/staging/gma500/mrst_lvds.c407
-rw-r--r--drivers/staging/gma500/power.c318
-rw-r--r--drivers/staging/gma500/power.h67
-rw-r--r--drivers/staging/gma500/psb_device.c321
-rw-r--r--drivers/staging/gma500/psb_drm.h219
-rw-r--r--drivers/staging/gma500/psb_drv.c1230
-rw-r--r--drivers/staging/gma500/psb_drv.h952
-rw-r--r--drivers/staging/gma500/psb_intel_display.c1429
-rw-r--r--drivers/staging/gma500/psb_intel_display.h28
-rw-r--r--drivers/staging/gma500/psb_intel_drv.h230
-rw-r--r--drivers/staging/gma500/psb_intel_lvds.c854
-rw-r--r--drivers/staging/gma500/psb_intel_modes.c77
-rw-r--r--drivers/staging/gma500/psb_intel_reg.h1235
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo.c1293
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo_regs.h338
-rw-r--r--drivers/staging/gma500/psb_irq.c627
-rw-r--r--drivers/staging/gma500/psb_irq.h45
-rw-r--r--drivers/staging/gma500/psb_lid.c88
-rw-r--r--drivers/staging/gma500/psb_reg.h582
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c191
-rw-r--r--drivers/staging/omapdrm/omap_drv.c81
-rw-r--r--drivers/staging/omapdrm/omap_drv.h52
-rw-r--r--drivers/staging/omapdrm/omap_fb.c224
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c59
-rw-r--r--drivers/staging/omapdrm/omap_gem.c23
-rw-r--r--drivers/staging/omapdrm/omap_plane.c344
-rw-r--r--drivers/staging/omapdrm/omap_priv.h12
-rw-r--r--drivers/staging/pohmelfs/Kconfig20
-rw-r--r--drivers/staging/pohmelfs/Makefile3
-rw-r--r--drivers/staging/pohmelfs/config.c611
-rw-r--r--drivers/staging/pohmelfs/crypto.c878
-rw-r--r--drivers/staging/pohmelfs/dir.c1102
-rw-r--r--drivers/staging/pohmelfs/inode.c2055
-rw-r--r--drivers/staging/pohmelfs/lock.c182
-rw-r--r--drivers/staging/pohmelfs/mcache.c171
-rw-r--r--drivers/staging/pohmelfs/net.c1209
-rw-r--r--drivers/staging/pohmelfs/netfs.h919
-rw-r--r--drivers/staging/pohmelfs/path_entry.c120
-rw-r--r--drivers/staging/pohmelfs/trans.c706
-rw-r--r--drivers/staging/rtl8712/drv_types.h7
-rw-r--r--drivers/staging/rtl8712/hal_init.c62
-rw-r--r--drivers/staging/rtl8712/os_intfs.c14
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c10
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c58
-rw-r--r--drivers/staging/usbip/stub_main.c4
-rw-r--r--drivers/staging/zcache/zcache-main.c25
-rw-r--r--drivers/target/iscsi/iscsi_target.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c11
-rw-r--r--drivers/target/target_core_alua.c8
-rw-r--r--drivers/target/target_core_cdb.c51
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c28
-rw-r--r--drivers/target/target_core_fabric_configfs.c4
-rw-r--r--drivers/target/target_core_iblock.c11
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c43
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c124
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c9
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c (renamed from drivers/tty/serial/8250/m32r_sio.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.h (renamed from drivers/tty/serial/8250/m32r_sio.h)0
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h (renamed from drivers/tty/serial/8250/m32r_sio_reg.h)0
-rw-r--r--drivers/tty/serial/omap-serial.c30
-rw-r--r--drivers/tty/serial/samsung.c3
-rw-r--r--drivers/tty/vt/vt_ioctl.c1
-rw-r--r--drivers/usb/gadget/f_loopback.c2
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-fsl.c11
-rw-r--r--drivers/usb/host/ehci-fsl.h1
-rw-r--r--drivers/usb/host/pci-quirks.c6
-rw-r--r--drivers/usb/musb/musb_io.h3
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c129
-rw-r--r--drivers/usb/serial/qcserial.c16
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/fsl-diu-fb.c4
-rw-r--r--drivers/video/intelfb/intelfbdrv.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c2
-rw-r--r--drivers/video/omap2/dss/dpi.c5
-rw-r--r--drivers/video/omap2/dss/dsi.c2
-rw-r--r--drivers/video/omap2/dss/dss.c2
-rw-r--r--drivers/video/omap2/dss/hdmi.c5
-rw-r--r--drivers/video/omap2/dss/rfbi.c2
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h4
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c68
-rw-r--r--drivers/video/omap2/dss/venc.c2
-rw-r--r--drivers/xen/cpu_hotplug.c3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--drivers/xen/xen-pciback/xenbus.c5
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/bio.c10
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/mds_client.c10
-rw-r--r--fs/ceph/mds_client.h7
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/Kconfig4
-rw-r--r--fs/cifs/connect.c23
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/ecryptfs/crypto.c68
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h6
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/ecryptfs/mmap.c4
-rw-r--r--fs/ecryptfs/read_write.c4
-rw-r--r--fs/ecryptfs/super.c14
-rw-r--r--fs/exec.c33
-rw-r--r--fs/fs-writeback.c16
-rw-r--r--fs/ioprio.c2
-rw-r--r--fs/jffs2/erase.c2
-rw-r--r--fs/logfs/dev_mtd.c6
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/proc/base.c126
-rw-r--r--fs/xfs/kmem.h6
-rw-r--r--fs/xfs/xfs_dquot.c103
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_qm.c291
-rw-r--r--fs/xfs/xfs_qm.h14
-rw-r--r--fs/xfs/xfs_qm_stats.c4
-rw-r--r--fs/xfs/xfs_trace.h5
-rw-r--r--include/asm-generic/pci_iomap.h10
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bitops.h20
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/iocontext.h5
-rw-r--r--[-rwxr-xr-x]include/linux/lp8727.h0
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h20
-rw-r--r--include/linux/mpi.h2
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pm_qos.h14
-rw-r--r--include/linux/proportions.h4
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sh_dma.h1
-rw-r--r--include/linux/usb/ch9.h2
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/netprio_cgroup.h48
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/sound/core.h2
-rw-r--r--include/target/target_core_backend.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/writeback.h7
-rw-r--r--include/video/omapdss.h5
-rw-r--r--kernel/events/core.c123
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/process.c7
-rw-r--r--kernel/power/user.c6
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/fair.c34
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bug.c2
-rw-r--r--lib/clz_tab.c18
-rw-r--r--lib/digsig.c52
-rw-r--r--lib/kstrtox.c18
-rw-r--r--lib/mpi/longlong.h44
-rw-r--r--lib/mpi/mpi-bit.c19
-rw-r--r--lib/mpi/mpi-div.c5
-rw-r--r--lib/mpi/mpi-pow.c2
-rw-r--r--lib/mpi/mpicoder.c91
-rw-r--r--lib/mpi/mpih-div.c4
-rw-r--r--lib/mpi/mpiutil.c5
-rw-r--r--lib/pci_iomap.c2
-rw-r--r--mm/backing-dev.c23
-rw-r--r--mm/compaction.c24
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/filemap_xip.c7
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/process_vm_access.c23
-rw-r--r--mm/swap.c2
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c45
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--sound/isa/sb/emu8000_patch.c1
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_jack.c24
-rw-r--r--sound/pci/hda/patch_ca0132.c33
-rw-r--r--sound/pci/hda/patch_cirrus.c6
-rw-r--r--sound/pci/hda/patch_realtek.c87
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/pci/hda/patch_via.c287
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c25
-rw-r--r--sound/soc/codecs/cs42l73.c2
-rw-r--r--sound/soc/codecs/wm5100.c12
-rw-r--r--sound/soc/codecs/wm8962.c8
-rw-r--r--sound/soc/codecs/wm8994.c16
-rw-r--r--sound/soc/codecs/wm8996.c2
-rw-r--r--sound/soc/codecs/wm_hubs.c18
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c65
-rw-r--r--sound/soc/sh/fsi.c6
-rw-r--r--sound/soc/soc-core.c11
-rw-r--r--sound/usb/quirks-table.h8
-rw-r--r--tools/perf/Makefile7
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S6
-rw-r--r--tools/perf/builtin-probe.c2
-rw-r--r--tools/perf/builtin-top.c13
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/symbol.c1
-rw-r--r--tools/perf/util/trace-event-parse.c3
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/ui/helpline.c1
-rw-r--r--tools/perf/util/util.h1
-rw-r--r--virt/kvm/kvm_main.c2
744 files changed, 5823 insertions, 45321 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 2f7fd4360848..9c27e5125dd2 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -102,9 +102,12 @@ X!Iinclude/linux/kobject.h
102!Iinclude/linux/device.h 102!Iinclude/linux/device.h
103 </sect1> 103 </sect1>
104 <sect1><title>Device Drivers Base</title> 104 <sect1><title>Device Drivers Base</title>
105!Idrivers/base/init.c
105!Edrivers/base/driver.c 106!Edrivers/base/driver.c
106!Edrivers/base/core.c 107!Edrivers/base/core.c
108!Edrivers/base/syscore.c
107!Edrivers/base/class.c 109!Edrivers/base/class.c
110!Idrivers/base/node.c
108!Edrivers/base/firmware_class.c 111!Edrivers/base/firmware_class.c
109!Edrivers/base/transport_class.c 112!Edrivers/base/transport_class.c
110<!-- Cannot be included, because 113<!-- Cannot be included, because
@@ -113,7 +116,7 @@ X!Iinclude/linux/kobject.h
113 exceed allowed 44 characters maximum 116 exceed allowed 44 characters maximum
114X!Edrivers/base/attribute_container.c 117X!Edrivers/base/attribute_container.c
115--> 118-->
116!Edrivers/base/sys.c 119!Edrivers/base/dd.c
117<!-- 120<!--
118X!Edrivers/base/interface.c 121X!Edrivers/base/interface.c
119--> 122-->
@@ -121,6 +124,11 @@ X!Edrivers/base/interface.c
121!Edrivers/base/platform.c 124!Edrivers/base/platform.c
122!Edrivers/base/bus.c 125!Edrivers/base/bus.c
123 </sect1> 126 </sect1>
127 <sect1><title>Device Drivers DMA Management</title>
128!Edrivers/base/dma-buf.c
129!Edrivers/base/dma-coherent.c
130!Edrivers/base/dma-mapping.c
131 </sect1>
124 <sect1><title>Device Drivers Power Management</title> 132 <sect1><title>Device Drivers Power Management</title>
125!Edrivers/base/power/main.c 133!Edrivers/base/power/main.c
126 </sect1> 134 </sect1>
@@ -219,7 +227,7 @@ X!Isound/sound_firmware.c
219 <chapter id="uart16x50"> 227 <chapter id="uart16x50">
220 <title>16x50 UART Driver</title> 228 <title>16x50 UART Driver</title>
221!Edrivers/tty/serial/serial_core.c 229!Edrivers/tty/serial/serial_core.c
222!Edrivers/tty/serial/8250.c 230!Edrivers/tty/serial/8250/8250.c
223 </chapter> 231 </chapter>
224 232
225 <chapter id="fbdev"> 233 <chapter id="fbdev">
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 23fcb05175be..53305bd08182 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -17,11 +17,11 @@ reports supported by a device are also provided by sysfs in
17class/input/event*/device/capabilities/, and the properties of a device are 17class/input/event*/device/capabilities/, and the properties of a device are
18provided in class/input/event*/device/properties. 18provided in class/input/event*/device/properties.
19 19
20Types: 20Event types:
21========== 21===========
22Types are groupings of codes under a logical input construct. Each type has a 22Event types are groupings of codes under a logical input construct. Each
23set of applicable codes to be used in generating events. See the Codes section 23type has a set of applicable codes to be used in generating events. See the
24for details on valid codes for each type. 24Codes section for details on valid codes for each type.
25 25
26* EV_SYN: 26* EV_SYN:
27 - Used as markers to separate events. Events may be separated in time or in 27 - Used as markers to separate events. Events may be separated in time or in
@@ -63,9 +63,9 @@ for details on valid codes for each type.
63* EV_FF_STATUS: 63* EV_FF_STATUS:
64 - Used to receive force feedback device status. 64 - Used to receive force feedback device status.
65 65
66Codes: 66Event codes:
67========== 67===========
68Codes define the precise type of event. 68Event codes define the precise type of event.
69 69
70EV_SYN: 70EV_SYN:
71---------- 71----------
@@ -220,6 +220,56 @@ EV_PWR:
220EV_PWR events are a special type of event used specifically for power 220EV_PWR events are a special type of event used specifically for power
221mangement. Its usage is not well defined. To be addressed later. 221mangement. Its usage is not well defined. To be addressed later.
222 222
223Device properties:
224=================
225Normally, userspace sets up an input device based on the data it emits,
226i.e., the event types. In the case of two devices emitting the same event
227types, additional information can be provided in the form of device
228properties.
229
230INPUT_PROP_DIRECT + INPUT_PROP_POINTER:
231--------------------------------------
232The INPUT_PROP_DIRECT property indicates that device coordinates should be
233directly mapped to screen coordinates (not taking into account trivial
234transformations, such as scaling, flipping and rotating). Non-direct input
235devices require non-trivial transformation, such as absolute to relative
236transformation for touchpads. Typical direct input devices: touchscreens,
237drawing tablets; non-direct devices: touchpads, mice.
238
239The INPUT_PROP_POINTER property indicates that the device is not transposed
240on the screen and thus requires use of an on-screen pointer to trace user's
241movements. Typical pointer devices: touchpads, tablets, mice; non-pointer
242device: touchscreen.
243
244If neither INPUT_PROP_DIRECT or INPUT_PROP_POINTER are set, the property is
245considered undefined and the device type should be deduced in the
246traditional way, using emitted event types.
247
248INPUT_PROP_BUTTONPAD:
249--------------------
250For touchpads where the button is placed beneath the surface, such that
251pressing down on the pad causes a button click, this property should be
252set. Common in clickpad notebooks and macbooks from 2009 and onwards.
253
254Originally, the buttonpad property was coded into the bcm5974 driver
255version field under the name integrated button. For backwards
256compatibility, both methods need to be checked in userspace.
257
258INPUT_PROP_SEMI_MT:
259------------------
260Some touchpads, most common between 2008 and 2011, can detect the presence
261of multiple contacts without resolving the individual positions; only the
262number of contacts and a rectangular shape is known. For such
263touchpads, the semi-mt property should be set.
264
265Depending on the device, the rectangle may enclose all touches, like a
266bounding box, or just some of them, for instance the two most recent
267touches. The diversity makes the rectangle of limited use, but some
268gestures can normally be extracted from it.
269
270If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
271device.
272
223Guidelines: 273Guidelines:
224========== 274==========
225The guidelines below ensure proper single-touch and multi-finger functionality. 275The guidelines below ensure proper single-touch and multi-finger functionality.
@@ -240,6 +290,8 @@ used to report when a touch is active on the screen.
240BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch 290BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
241contact. BTN_TOOL_<name> events should be reported where possible. 291contact. BTN_TOOL_<name> events should be reported where possible.
242 292
293For new hardware, INPUT_PROP_DIRECT should be set.
294
243Trackpads: 295Trackpads:
244---------- 296----------
245Legacy trackpads that only provide relative position information must report 297Legacy trackpads that only provide relative position information must report
@@ -250,6 +302,8 @@ location of the touch. BTN_TOUCH should be used to report when a touch is active
250on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should 302on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
251be used to report the number of touches active on the trackpad. 303be used to report the number of touches active on the trackpad.
252 304
305For new hardware, INPUT_PROP_POINTER should be set.
306
253Tablets: 307Tablets:
254---------- 308----------
255BTN_TOOL_<name> events must be reported when a stylus or other tool is active on 309BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
@@ -260,3 +314,5 @@ button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
260BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use 314BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
261meaningful buttons, like BTN_FORWARD, unless the button is labeled for that 315meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
262purpose on the device. 316purpose on the device.
317
318For new hardware, both INPUT_PROP_DIRECT and INPUT_PROP_POINTER should be set.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 8c20fbd8b42d..6d78841fd416 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -601,6 +601,8 @@ can be ORed together:
601 instead of using the one provided by the hardware. 601 instead of using the one provided by the hardware.
602 512 - A kernel warning has occurred. 602 512 - A kernel warning has occurred.
6031024 - A module from drivers/staging was loaded. 6031024 - A module from drivers/staging was loaded.
6042048 - The system is working around a severe firmware bug.
6054096 - An out-of-tree module has been loaded.
604 606
605============================================================== 607==============================================================
606 608
diff --git a/MAINTAINERS b/MAINTAINERS
index a1fce9a3ab20..9a648eb8e213 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -159,7 +159,7 @@ S: Maintained
159F: drivers/net/ethernet/realtek/r8169.c 159F: drivers/net/ethernet/realtek/r8169.c
160 160
1618250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1618250/16?50 (AND CLONE UARTS) SERIAL DRIVER
162M: Greg Kroah-Hartman <gregkh@suse.de> 162M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
163L: linux-serial@vger.kernel.org 163L: linux-serial@vger.kernel.org
164W: http://serial.sourceforge.net 164W: http://serial.sourceforge.net
165S: Maintained 165S: Maintained
@@ -789,12 +789,6 @@ F: arch/arm/mach-mx*/
789F: arch/arm/mach-imx/ 789F: arch/arm/mach-imx/
790F: arch/arm/plat-mxc/ 790F: arch/arm/plat-mxc/
791 791
792ARM/FREESCALE IMX51
793M: Amit Kucheria <amit.kucheria@canonical.com>
794L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
795S: Maintained
796F: arch/arm/mach-mx5/
797
798ARM/FREESCALE IMX6 792ARM/FREESCALE IMX6
799M: Shawn Guo <shawn.guo@linaro.org> 793M: Shawn Guo <shawn.guo@linaro.org>
800L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 794L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1783,9 +1777,9 @@ X: net/wireless/wext*
1783 1777
1784CHAR and MISC DRIVERS 1778CHAR and MISC DRIVERS
1785M: Arnd Bergmann <arnd@arndb.de> 1779M: Arnd Bergmann <arnd@arndb.de>
1786M: Greg Kroah-Hartman <greg@kroah.com> 1780M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1787T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 1781T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
1788S: Maintained 1782S: Supported
1789F: drivers/char/* 1783F: drivers/char/*
1790F: drivers/misc/* 1784F: drivers/misc/*
1791 1785
@@ -2287,7 +2281,7 @@ F: drivers/acpi/dock.c
2287DOCUMENTATION 2281DOCUMENTATION
2288M: Randy Dunlap <rdunlap@xenotime.net> 2282M: Randy Dunlap <rdunlap@xenotime.net>
2289L: linux-doc@vger.kernel.org 2283L: linux-doc@vger.kernel.org
2290T: quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/ 2284T: quilt http://xenotime.net/kernel-doc-patches/current/
2291S: Maintained 2285S: Maintained
2292F: Documentation/ 2286F: Documentation/
2293 2287
@@ -2320,7 +2314,7 @@ F: lib/lru_cache.c
2320F: Documentation/blockdev/drbd/ 2314F: Documentation/blockdev/drbd/
2321 2315
2322DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS 2316DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
2323M: Greg Kroah-Hartman <gregkh@suse.de> 2317M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2324T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git 2318T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
2325S: Supported 2319S: Supported
2326F: Documentation/kobject.txt 2320F: Documentation/kobject.txt
@@ -3324,6 +3318,12 @@ S: Maintained
3324F: net/ieee802154/ 3318F: net/ieee802154/
3325F: drivers/ieee802154/ 3319F: drivers/ieee802154/
3326 3320
3321IIO SUBSYSTEM AND DRIVERS
3322M: Jonathan Cameron <jic23@cam.ac.uk>
3323L: linux-iio@vger.kernel.org
3324S: Maintained
3325F: drivers/staging/iio/
3326
3327IKANOS/ADI EAGLE ADSL USB DRIVER 3327IKANOS/ADI EAGLE ADSL USB DRIVER
3328M: Matthieu Castet <castet.matthieu@free.fr> 3328M: Matthieu Castet <castet.matthieu@free.fr>
3329M: Stanislaw Gruszka <stf_xl@wp.pl> 3329M: Stanislaw Gruszka <stf_xl@wp.pl>
@@ -3992,11 +3992,11 @@ M: Rusty Russell <rusty@rustcorp.com.au>
3992L: lguest@lists.ozlabs.org 3992L: lguest@lists.ozlabs.org
3993W: http://lguest.ozlabs.org/ 3993W: http://lguest.ozlabs.org/
3994S: Odd Fixes 3994S: Odd Fixes
3995F: Documentation/virtual/lguest/ 3995F: arch/x86/include/asm/lguest*.h
3996F: arch/x86/lguest/ 3996F: arch/x86/lguest/
3997F: drivers/lguest/ 3997F: drivers/lguest/
3998F: include/linux/lguest*.h 3998F: include/linux/lguest*.h
3999F: arch/x86/include/asm/lguest*.h 3999F: tools/lguest/
4000 4000
4001LINUX FOR IBM pSERIES (RS/6000) 4001LINUX FOR IBM pSERIES (RS/6000)
4002M: Paul Mackerras <paulus@au.ibm.com> 4002M: Paul Mackerras <paulus@au.ibm.com>
@@ -4136,7 +4136,7 @@ L: linux-ntfs-dev@lists.sourceforge.net
4136W: http://www.linux-ntfs.org/content/view/19/37/ 4136W: http://www.linux-ntfs.org/content/view/19/37/
4137S: Maintained 4137S: Maintained
4138F: Documentation/ldm.txt 4138F: Documentation/ldm.txt
4139F: fs/partitions/ldm.* 4139F: block/partitions/ldm.*
4140 4140
4141LogFS 4141LogFS
4142M: Joern Engel <joern@logfs.org> 4142M: Joern Engel <joern@logfs.org>
@@ -5633,7 +5633,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/
5633S: Supported 5633S: Supported
5634F: arch/s390/ 5634F: arch/s390/
5635F: drivers/s390/ 5635F: drivers/s390/
5636F: fs/partitions/ibm.c 5636F: block/partitions/ibm.c
5637F: Documentation/s390/ 5637F: Documentation/s390/
5638F: Documentation/DocBook/s390* 5638F: Documentation/DocBook/s390*
5639 5639
@@ -6276,15 +6276,15 @@ S: Maintained
6276F: arch/alpha/kernel/srm_env.c 6276F: arch/alpha/kernel/srm_env.c
6277 6277
6278STABLE BRANCH 6278STABLE BRANCH
6279M: Greg Kroah-Hartman <greg@kroah.com> 6279M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6280L: stable@vger.kernel.org 6280L: stable@vger.kernel.org
6281S: Maintained 6281S: Supported
6282 6282
6283STAGING SUBSYSTEM 6283STAGING SUBSYSTEM
6284M: Greg Kroah-Hartman <gregkh@suse.de> 6284M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6285T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 6285T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
6286L: devel@driverdev.osuosl.org 6286L: devel@driverdev.osuosl.org
6287S: Maintained 6287S: Supported
6288F: drivers/staging/ 6288F: drivers/staging/
6289 6289
6290STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS 6290STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
@@ -6396,11 +6396,6 @@ M: Omar Ramirez Luna <omar.ramirez@ti.com>
6396S: Odd Fixes 6396S: Odd Fixes
6397F: drivers/staging/tidspbridge/ 6397F: drivers/staging/tidspbridge/
6398 6398
6399STAGING - TRIDENT TVMASTER TMxxxx USB VIDEO CAPTURE DRIVERS
6400L: linux-media@vger.kernel.org
6401S: Odd Fixes
6402F: drivers/staging/tm6000/
6403
6404STAGING - USB ENE SM/MS CARD READER DRIVER 6399STAGING - USB ENE SM/MS CARD READER DRIVER
6405M: Al Cho <acho@novell.com> 6400M: Al Cho <acho@novell.com>
6406S: Odd Fixes 6401S: Odd Fixes
@@ -6669,8 +6664,8 @@ S: Maintained
6669K: ^Subject:.*(?i)trivial 6664K: ^Subject:.*(?i)trivial
6670 6665
6671TTY LAYER 6666TTY LAYER
6672M: Greg Kroah-Hartman <gregkh@suse.de> 6667M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6673S: Maintained 6668S: Supported
6674T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git 6669T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
6675F: drivers/tty/ 6670F: drivers/tty/
6676F: drivers/tty/serial/serial_core.c 6671F: drivers/tty/serial/serial_core.c
@@ -6958,7 +6953,7 @@ S: Maintained
6958F: drivers/usb/serial/digi_acceleport.c 6953F: drivers/usb/serial/digi_acceleport.c
6959 6954
6960USB SERIAL DRIVER 6955USB SERIAL DRIVER
6961M: Greg Kroah-Hartman <gregkh@suse.de> 6956M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6962L: linux-usb@vger.kernel.org 6957L: linux-usb@vger.kernel.org
6963S: Supported 6958S: Supported
6964F: Documentation/usb/usb-serial.txt 6959F: Documentation/usb/usb-serial.txt
@@ -6973,9 +6968,8 @@ S: Maintained
6973F: drivers/usb/serial/empeg.c 6968F: drivers/usb/serial/empeg.c
6974 6969
6975USB SERIAL KEYSPAN DRIVER 6970USB SERIAL KEYSPAN DRIVER
6976M: Greg Kroah-Hartman <greg@kroah.com> 6971M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6977L: linux-usb@vger.kernel.org 6972L: linux-usb@vger.kernel.org
6978W: http://www.kroah.com/linux/
6979S: Maintained 6973S: Maintained
6980F: drivers/usb/serial/*keyspan* 6974F: drivers/usb/serial/*keyspan*
6981 6975
@@ -7003,7 +6997,7 @@ F: Documentation/video4linux/sn9c102.txt
7003F: drivers/media/video/sn9c102/ 6997F: drivers/media/video/sn9c102/
7004 6998
7005USB SUBSYSTEM 6999USB SUBSYSTEM
7006M: Greg Kroah-Hartman <gregkh@suse.de> 7000M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7007L: linux-usb@vger.kernel.org 7001L: linux-usb@vger.kernel.org
7008W: http://www.linux-usb.org 7002W: http://www.linux-usb.org
7009T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git 7003T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
@@ -7090,7 +7084,7 @@ F: fs/hppfs/
7090 7084
7091USERSPACE I/O (UIO) 7085USERSPACE I/O (UIO)
7092M: "Hans J. Koch" <hjk@hansjkoch.de> 7086M: "Hans J. Koch" <hjk@hansjkoch.de>
7093M: Greg Kroah-Hartman <gregkh@suse.de> 7087M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7094S: Maintained 7088S: Maintained
7095F: Documentation/DocBook/uio-howto.tmpl 7089F: Documentation/DocBook/uio-howto.tmpl
7096F: drivers/uio/ 7090F: drivers/uio/
diff --git a/Makefile b/Makefile
index e3b23e864a53..4ddd641ab615 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 3 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc4
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 63d7578856c1..a1dd2ee83753 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -29,6 +29,7 @@
29 compatible = "arm,cortex-a9-gic"; 29 compatible = "arm,cortex-a9-gic";
30 #interrupt-cells = <3>; 30 #interrupt-cells = <3>;
31 interrupt-controller; 31 interrupt-controller;
32 cpu-offset = <0x8000>;
32 reg = <0x10490000 0x1000>, <0x10480000 0x100>; 33 reg = <0x10490000 0x1000>, <0x10480000 0x100>;
33 }; 34 };
34 35
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
index 1a1d7023b69b..825d2957da0b 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -46,11 +46,11 @@
46 }; 46 };
47 47
48 serial@70006200 { 48 serial@70006200 {
49 status = "disable"; 49 clock-frequency = <216000000>;
50 }; 50 };
51 51
52 serial@70006300 { 52 serial@70006300 {
53 clock-frequency = <216000000>; 53 status = "disable";
54 }; 54 };
55 55
56 serial@70006400 { 56 serial@70006400 {
@@ -60,7 +60,7 @@
60 sdhci@c8000000 { 60 sdhci@c8000000 {
61 cd-gpios = <&gpio 173 0>; /* gpio PV5 */ 61 cd-gpios = <&gpio 173 0>; /* gpio PV5 */
62 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 62 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
63 power-gpios = <&gpio 155 0>; /* gpio PT3 */ 63 power-gpios = <&gpio 169 0>; /* gpio PV1 */
64 }; 64 };
65 65
66 sdhci@c8000200 { 66 sdhci@c8000200 {
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 5d3ed7e38561..314d4664eae7 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
198 unsigned long addr) 198 unsigned long addr)
199{ 199{
200 pgtable_page_dtor(pte); 200 pgtable_page_dtor(pte);
201 tlb_add_flush(tlb, addr); 201
202 /*
203 * With the classic ARM MMU, a pte page has two corresponding pmd
204 * entries, each covering 1MB.
205 */
206 addr &= PMD_MASK;
207 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
208 tlb_add_flush(tlb, addr + SZ_1M);
209
202 tlb_remove_page(tlb, pte); 210 tlb_remove_page(tlb, pte);
203} 211}
204 212
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3a456c6c7005..be16a48007b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
790 smp_dmb arm 790 smp_dmb arm
791 rsbs r0, r3, #0 @ set returned val and C flag 791 rsbs r0, r3, #0 @ set returned val and C flag
792 ldmfd sp!, {r4, r5, r6, r7} 792 ldmfd sp!, {r4, r5, r6, r7}
793 bx lr 793 usr_ret lr
794 794
795#elif !defined(CONFIG_SMP) 795#elif !defined(CONFIG_SMP)
796 796
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 460bbbb6b885..6933244c68f9 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
469 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 469 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
470 }, 470 },
471 }, 471 },
472 [C(NODE)] = {
473 [C(OP_READ)] = {
474 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
475 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
476 },
477 [C(OP_WRITE)] = {
478 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
479 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
480 },
481 [C(OP_PREFETCH)] = {
482 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
483 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
484 },
485 },
472}; 486};
473 487
474/* 488/*
@@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
579 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 593 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
580 }, 594 },
581 }, 595 },
596 [C(NODE)] = {
597 [C(OP_READ)] = {
598 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
599 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
600 },
601 [C(OP_WRITE)] = {
602 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
603 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
604 },
605 [C(OP_PREFETCH)] = {
606 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
607 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
608 },
609 },
582}; 610};
583 611
584/* 612/*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e1d5e1929fbd..e33870ff0ac0 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
699{ 699{
700 int ret; 700 int ret;
701 struct thread_info *thread = task_thread_info(target); 701 struct thread_info *thread = task_thread_info(target);
702 struct vfp_hard_struct new_vfp = thread->vfpstate.hard; 702 struct vfp_hard_struct new_vfp;
703 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); 703 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
704 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); 704 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
705 705
706 vfp_sync_hwstate(thread);
707 new_vfp = thread->vfpstate.hard;
708
706 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 709 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
707 &new_vfp.fpregs, 710 &new_vfp.fpregs,
708 user_fpregs_offset, 711 user_fpregs_offset,
@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
723 if (ret) 726 if (ret)
724 return ret; 727 return ret;
725 728
726 vfp_sync_hwstate(thread);
727 thread->vfpstate.hard = new_vfp;
728 vfp_flush_hwstate(thread); 729 vfp_flush_hwstate(thread);
730 thread->vfpstate.hard = new_vfp;
729 731
730 return 0; 732 return 0;
731} 733}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 0340224cf73c..9e617bd4a146 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
227 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 227 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
228 return -EINVAL; 228 return -EINVAL;
229 229
230 vfp_flush_hwstate(thread);
231
230 /* 232 /*
231 * Copy the floating point registers. There can be unused 233 * Copy the floating point registers. There can be unused
232 * registers see asm/hwcap.h for details. 234 * registers see asm/hwcap.h for details.
@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
251 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); 253 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
252 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); 254 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
253 255
254 if (!err)
255 vfp_flush_hwstate(thread);
256
257 return err ? -EFAULT : 0; 256 return err ? -EFAULT : 0;
258} 257}
259 258
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 99a572702509..f84dfe67724f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err)
266{ 266{
267 struct thread_info *thread = current_thread_info(); 267 struct thread_info *thread = current_thread_info();
268 int ret; 268 int ret;
269 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
269 270
270 oops_enter(); 271 oops_enter();
271 272
@@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err)
273 console_verbose(); 274 console_verbose();
274 bust_spinlocks(1); 275 bust_spinlocks(1);
275 if (!user_mode(regs)) 276 if (!user_mode(regs))
276 report_bug(regs->ARM_pc, regs); 277 bug_type = report_bug(regs->ARM_pc, regs);
278 if (bug_type != BUG_TRAP_TYPE_NONE)
279 str = "Oops - BUG";
277 ret = __die(str, err, thread, regs); 280 ret = __die(str, err, thread, regs);
278 281
279 if (regs && kexec_should_crash(thread->task)) 282 if (regs && kexec_should_crash(thread->task))
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 1e19691e0406..43a31fb06318 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -10,6 +10,7 @@
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#define PROC_INFO \ 12#define PROC_INFO \
13 . = ALIGN(4); \
13 VMLINUX_SYMBOL(__proc_info_begin) = .; \ 14 VMLINUX_SYMBOL(__proc_info_begin) = .; \
14 *(.proc.info.init) \ 15 *(.proc.info.init) \
15 VMLINUX_SYMBOL(__proc_info_end) = .; 16 VMLINUX_SYMBOL(__proc_info_end) = .;
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 18bacec2b094..97676bdae998 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
83 * USB Device (Gadget) 83 * USB Device (Gadget)
84 * -------------------------------------------------------------------- */ 84 * -------------------------------------------------------------------- */
85 85
86#ifdef CONFIG_USB_AT91 86#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
87static struct at91_udc_data udc_data; 87static struct at91_udc_data udc_data;
88 88
89static struct resource udc_resources[] = { 89static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 642ccb6d26b2..5a24f0b4554d 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
84 * USB Device (Gadget) 84 * USB Device (Gadget)
85 * -------------------------------------------------------------------- */ 85 * -------------------------------------------------------------------- */
86 86
87#ifdef CONFIG_USB_AT91 87#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
88static struct at91_udc_data udc_data; 88static struct at91_udc_data udc_data;
89 89
90static struct resource udc_resources[] = { 90static struct resource udc_resources[] = {
@@ -1215,8 +1215,7 @@ void __init at91_add_device_serial(void) {}
1215 * CF/IDE 1215 * CF/IDE
1216 * -------------------------------------------------------------------- */ 1216 * -------------------------------------------------------------------- */
1217 1217
1218#if defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE) || \ 1218#if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \
1219 defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \
1220 defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) 1219 defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
1221 1220
1222static struct at91_cf_data cf0_data; 1221static struct at91_cf_data cf0_data;
@@ -1313,10 +1312,8 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
1313 if (data->flags & AT91_CF_TRUE_IDE) 1312 if (data->flags & AT91_CF_TRUE_IDE)
1314#if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) 1313#if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE)
1315 pdev->name = "pata_at91"; 1314 pdev->name = "pata_at91";
1316#elif defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE)
1317 pdev->name = "at91_ide";
1318#else 1315#else
1319#warning "board requires AT91_CF_TRUE_IDE: enable either at91_ide or pata_at91" 1316#warning "board requires AT91_CF_TRUE_IDE: enable pata_at91"
1320#endif 1317#endif
1321 else 1318 else
1322 pdev->name = "at91_cf"; 1319 pdev->name = "at91_cf";
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index fc59cbdb0e3c..1e28bed8f425 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
87 * USB Device (Gadget) 87 * USB Device (Gadget)
88 * -------------------------------------------------------------------- */ 88 * -------------------------------------------------------------------- */
89 89
90#ifdef CONFIG_USB_AT91 90#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
91static struct at91_udc_data udc_data; 91static struct at91_udc_data udc_data;
92 92
93static struct resource udc_resources[] = { 93static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 49aa6a9f4323..70709ab0102a 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
92 * USB Device (Gadget) 92 * USB Device (Gadget)
93 * -------------------------------------------------------------------- */ 93 * -------------------------------------------------------------------- */
94 94
95#ifdef CONFIG_USB_AT91 95#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
96static struct at91_udc_data udc_data; 96static struct at91_udc_data udc_data;
97 97
98static struct resource udc_resources[] = { 98static struct resource udc_resources[] = {
@@ -355,8 +355,8 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
355 * Compact Flash (PCMCIA or IDE) 355 * Compact Flash (PCMCIA or IDE)
356 * -------------------------------------------------------------------- */ 356 * -------------------------------------------------------------------- */
357 357
358#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) || \ 358#if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \
359 defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE) 359 defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
360 360
361static struct at91_cf_data cf0_data; 361static struct at91_cf_data cf0_data;
362 362
@@ -450,7 +450,7 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
450 at91_set_A_periph(AT91_PIN_PD9, 0); /* CFCE2 */ 450 at91_set_A_periph(AT91_PIN_PD9, 0); /* CFCE2 */
451 at91_set_A_periph(AT91_PIN_PD14, 0); /* CFNRW */ 451 at91_set_A_periph(AT91_PIN_PD14, 0); /* CFNRW */
452 452
453 pdev->name = (data->flags & AT91_CF_TRUE_IDE) ? "at91_ide" : "at91_cf"; 453 pdev->name = (data->flags & AT91_CF_TRUE_IDE) ? "pata_at91" : "at91_cf";
454 platform_device_register(pdev); 454 platform_device_register(pdev);
455} 455}
456#else 456#else
diff --git a/arch/arm/mach-at91/include/mach/at91sam9_smc.h b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
index eb18a70fa647..175e1fdd9fe8 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9_smc.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
@@ -18,6 +18,35 @@
18 18
19#include <mach/cpu.h> 19#include <mach/cpu.h>
20 20
21#ifndef __ASSEMBLY__
22struct sam9_smc_config {
23 /* Setup register */
24 u8 ncs_read_setup;
25 u8 nrd_setup;
26 u8 ncs_write_setup;
27 u8 nwe_setup;
28
29 /* Pulse register */
30 u8 ncs_read_pulse;
31 u8 nrd_pulse;
32 u8 ncs_write_pulse;
33 u8 nwe_pulse;
34
35 /* Cycle register */
36 u16 read_cycle;
37 u16 write_cycle;
38
39 /* Mode register */
40 u32 mode;
41 u8 tdf_cycles:4;
42};
43
44extern void sam9_smc_configure(int id, int cs, struct sam9_smc_config *config);
45extern void sam9_smc_read(int id, int cs, struct sam9_smc_config *config);
46extern void sam9_smc_read_mode(int id, int cs, struct sam9_smc_config *config);
47extern void sam9_smc_write_mode(int id, int cs, struct sam9_smc_config *config);
48#endif
49
21#define AT91_SMC_SETUP 0x00 /* Setup Register for CS n */ 50#define AT91_SMC_SETUP 0x00 /* Setup Register for CS n */
22#define AT91_SMC_NWESETUP (0x3f << 0) /* NWE Setup Length */ 51#define AT91_SMC_NWESETUP (0x3f << 0) /* NWE Setup Length */
23#define AT91_SMC_NWESETUP_(x) ((x) << 0) 52#define AT91_SMC_NWESETUP_(x) ((x) << 0)
diff --git a/arch/arm/mach-at91/sam9_smc.c b/arch/arm/mach-at91/sam9_smc.c
index 8294783b679d..99a0a1d2b7dc 100644
--- a/arch/arm/mach-at91/sam9_smc.c
+++ b/arch/arm/mach-at91/sam9_smc.c
@@ -2,6 +2,7 @@
2 * linux/arch/arm/mach-at91/sam9_smc.c 2 * linux/arch/arm/mach-at91/sam9_smc.c
3 * 3 *
4 * Copyright (C) 2008 Andrew Victor 4 * Copyright (C) 2008 Andrew Victor
5 * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -22,7 +23,22 @@
22 23
23static void __iomem *smc_base_addr[2]; 24static void __iomem *smc_base_addr[2];
24 25
25static void __init sam9_smc_cs_configure(void __iomem *base, struct sam9_smc_config* config) 26static void sam9_smc_cs_write_mode(void __iomem *base,
27 struct sam9_smc_config *config)
28{
29 __raw_writel(config->mode
30 | AT91_SMC_TDF_(config->tdf_cycles),
31 base + AT91_SMC_MODE);
32}
33
34void sam9_smc_write_mode(int id, int cs,
35 struct sam9_smc_config *config)
36{
37 sam9_smc_cs_write_mode(AT91_SMC_CS(id, cs), config);
38}
39
40static void sam9_smc_cs_configure(void __iomem *base,
41 struct sam9_smc_config *config)
26{ 42{
27 43
28 /* Setup register */ 44 /* Setup register */
@@ -45,16 +61,66 @@ static void __init sam9_smc_cs_configure(void __iomem *base, struct sam9_smc_con
45 base + AT91_SMC_CYCLE); 61 base + AT91_SMC_CYCLE);
46 62
47 /* Mode register */ 63 /* Mode register */
48 __raw_writel(config->mode 64 sam9_smc_cs_write_mode(base, config);
49 | AT91_SMC_TDF_(config->tdf_cycles),
50 base + AT91_SMC_MODE);
51} 65}
52 66
53void __init sam9_smc_configure(int id, int cs, struct sam9_smc_config* config) 67void sam9_smc_configure(int id, int cs,
68 struct sam9_smc_config *config)
54{ 69{
55 sam9_smc_cs_configure(AT91_SMC_CS(id, cs), config); 70 sam9_smc_cs_configure(AT91_SMC_CS(id, cs), config);
56} 71}
57 72
73static void sam9_smc_cs_read_mode(void __iomem *base,
74 struct sam9_smc_config *config)
75{
76 u32 val = __raw_readl(base + AT91_SMC_MODE);
77
78 config->mode = (val & ~AT91_SMC_NWECYCLE);
79 config->tdf_cycles = (val & AT91_SMC_NWECYCLE) >> 16 ;
80}
81
82void sam9_smc_read_mode(int id, int cs,
83 struct sam9_smc_config *config)
84{
85 sam9_smc_cs_read_mode(AT91_SMC_CS(id, cs), config);
86}
87
88static void sam9_smc_cs_read(void __iomem *base,
89 struct sam9_smc_config *config)
90{
91 u32 val;
92
93 /* Setup register */
94 val = __raw_readl(base + AT91_SMC_SETUP);
95
96 config->nwe_setup = val & AT91_SMC_NWESETUP;
97 config->ncs_write_setup = (val & AT91_SMC_NCS_WRSETUP) >> 8;
98 config->nrd_setup = (val & AT91_SMC_NRDSETUP) >> 16;
99 config->ncs_read_setup = (val & AT91_SMC_NCS_RDSETUP) >> 24;
100
101 /* Pulse register */
102 val = __raw_readl(base + AT91_SMC_PULSE);
103
104 config->nwe_setup = val & AT91_SMC_NWEPULSE;
105 config->ncs_write_pulse = (val & AT91_SMC_NCS_WRPULSE) >> 8;
106 config->nrd_pulse = (val & AT91_SMC_NRDPULSE) >> 16;
107 config->ncs_read_pulse = (val & AT91_SMC_NCS_RDPULSE) >> 24;
108
109 /* Cycle register */
110 val = __raw_readl(base + AT91_SMC_CYCLE);
111
112 config->write_cycle = val & AT91_SMC_NWECYCLE;
113 config->read_cycle = (val & AT91_SMC_NRDCYCLE) >> 16;
114
115 /* Mode register */
116 sam9_smc_cs_read_mode(base, config);
117}
118
119void sam9_smc_read(int id, int cs, struct sam9_smc_config *config)
120{
121 sam9_smc_cs_read(AT91_SMC_CS(id, cs), config);
122}
123
58void __init at91sam9_ioremap_smc(int id, u32 addr) 124void __init at91sam9_ioremap_smc(int id, u32 addr)
59{ 125{
60 if (id > 1) { 126 if (id > 1) {
diff --git a/arch/arm/mach-at91/sam9_smc.h b/arch/arm/mach-at91/sam9_smc.h
index 039c5ce17aec..3e52dcd4a59f 100644
--- a/arch/arm/mach-at91/sam9_smc.h
+++ b/arch/arm/mach-at91/sam9_smc.h
@@ -8,27 +8,4 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11struct sam9_smc_config {
12 /* Setup register */
13 u8 ncs_read_setup;
14 u8 nrd_setup;
15 u8 ncs_write_setup;
16 u8 nwe_setup;
17
18 /* Pulse register */
19 u8 ncs_read_pulse;
20 u8 nrd_pulse;
21 u8 ncs_write_pulse;
22 u8 nwe_pulse;
23
24 /* Cycle register */
25 u16 read_cycle;
26 u16 write_cycle;
27
28 /* Mode register */
29 u32 mode;
30 u8 tdf_cycles:4;
31};
32
33extern void __init sam9_smc_configure(int id, int cs, struct sam9_smc_config* config);
34extern void __init at91sam9_ioremap_smc(int id, u32 addr); 11extern void __init at91sam9_ioremap_smc(int id, u32 addr);
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 9e5e7552498c..45c97b1ee9b1 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
194 .init_early = bcmring_init_early, 194 .init_early = bcmring_init_early,
195 .init_irq = bcmring_init_irq, 195 .init_irq = bcmring_init_irq,
196 .timer = &bcmring_timer, 196 .timer = &bcmring_timer,
197 .init_machine = bcmring_init_machine 197 .init_machine = bcmring_init_machine,
198 .restart = bcmring_restart, 198 .restart = bcmring_restart,
199MACHINE_END 199MACHINE_END
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 1a1a27dd5654..1024396797e1 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -33,17 +33,11 @@
33 33
34#include <mach/timer.h> 34#include <mach/timer.h>
35 35
36#include <linux/mm.h>
37#include <linux/pfn.h> 36#include <linux/pfn.h>
38#include <linux/atomic.h> 37#include <linux/atomic.h>
39#include <linux/sched.h> 38#include <linux/sched.h>
40#include <mach/dma.h> 39#include <mach/dma.h>
41 40
42/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
43/* especially since dc4 doesn't use kmalloc'd memory. */
44
45#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
46
47/* ---- Public Variables ------------------------------------------------- */ 41/* ---- Public Variables ------------------------------------------------- */
48 42
49/* ---- Private Constants and Types -------------------------------------- */ 43/* ---- Private Constants and Types -------------------------------------- */
@@ -53,24 +47,12 @@
53#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) 47#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
54#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) 48#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
55 49
56#define DMA_MAP_DEBUG 0
57
58#if DMA_MAP_DEBUG
59# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
60#else
61# define DMA_MAP_PRINT(fmt, args...)
62#endif
63 50
64/* ---- Private Variables ------------------------------------------------ */ 51/* ---- Private Variables ------------------------------------------------ */
65 52
66static DMA_Global_t gDMA; 53static DMA_Global_t gDMA;
67static struct proc_dir_entry *gDmaDir; 54static struct proc_dir_entry *gDmaDir;
68 55
69static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
70static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
71static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
72static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
73
74#include "dma_device.c" 56#include "dma_device.c"
75 57
76/* ---- Private Function Prototypes -------------------------------------- */ 58/* ---- Private Function Prototypes -------------------------------------- */
@@ -79,34 +61,6 @@ static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
79 61
80/****************************************************************************/ 62/****************************************************************************/
81/** 63/**
82* Displays information for /proc/dma/mem-type
83*/
84/****************************************************************************/
85
86static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
87 int count, int *eof, void *data)
88{
89 int len = 0;
90
91 len += sprintf(buf + len, "dma_map_mem statistics\n");
92 len +=
93 sprintf(buf + len, "coherent: %d\n",
94 atomic_read(&gDmaStatMemTypeCoherent));
95 len +=
96 sprintf(buf + len, "kmalloc: %d\n",
97 atomic_read(&gDmaStatMemTypeKmalloc));
98 len +=
99 sprintf(buf + len, "vmalloc: %d\n",
100 atomic_read(&gDmaStatMemTypeVmalloc));
101 len +=
102 sprintf(buf + len, "user: %d\n",
103 atomic_read(&gDmaStatMemTypeUser));
104
105 return len;
106}
107
108/****************************************************************************/
109/**
110* Displays information for /proc/dma/channels 64* Displays information for /proc/dma/channels
111*/ 65*/
112/****************************************************************************/ 66/****************************************************************************/
@@ -846,8 +800,6 @@ int dma_init(void)
846 dma_proc_read_channels, NULL); 800 dma_proc_read_channels, NULL);
847 create_proc_read_entry("devices", 0, gDmaDir, 801 create_proc_read_entry("devices", 0, gDmaDir,
848 dma_proc_read_devices, NULL); 802 dma_proc_read_devices, NULL);
849 create_proc_read_entry("mem-type", 0, gDmaDir,
850 dma_proc_read_mem_type, NULL);
851 } 803 }
852 804
853out: 805out:
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
1565} 1517}
1566 1518
1567EXPORT_SYMBOL(dma_set_device_handler); 1519EXPORT_SYMBOL(dma_set_device_handler);
1568
1569/****************************************************************************/
1570/**
1571* Initializes a memory mapping structure
1572*/
1573/****************************************************************************/
1574
1575int dma_init_mem_map(DMA_MemMap_t *memMap)
1576{
1577 memset(memMap, 0, sizeof(*memMap));
1578
1579 sema_init(&memMap->lock, 1);
1580
1581 return 0;
1582}
1583
1584EXPORT_SYMBOL(dma_init_mem_map);
1585
1586/****************************************************************************/
1587/**
1588* Releases any memory currently being held by a memory mapping structure.
1589*/
1590/****************************************************************************/
1591
1592int dma_term_mem_map(DMA_MemMap_t *memMap)
1593{
1594 down(&memMap->lock); /* Just being paranoid */
1595
1596 /* Free up any allocated memory */
1597
1598 up(&memMap->lock);
1599 memset(memMap, 0, sizeof(*memMap));
1600
1601 return 0;
1602}
1603
1604EXPORT_SYMBOL(dma_term_mem_map);
1605
1606/****************************************************************************/
1607/**
1608* Looks at a memory address and categorizes it.
1609*
1610* @return One of the values from the DMA_MemType_t enumeration.
1611*/
1612/****************************************************************************/
1613
1614DMA_MemType_t dma_mem_type(void *addr)
1615{
1616 unsigned long addrVal = (unsigned long)addr;
1617
1618 if (addrVal >= CONSISTENT_BASE) {
1619 /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
1620
1621 /* dma_alloc_xxx pages are physically and virtually contiguous */
1622
1623 return DMA_MEM_TYPE_DMA;
1624 }
1625
1626 /* Technically, we could add one more classification. Addresses between VMALLOC_END */
1627 /* and the beginning of the DMA virtual address could be considered to be I/O space. */
1628 /* Right now, nobody cares about this particular classification, so we ignore it. */
1629
1630 if (is_vmalloc_addr(addr)) {
1631 /* Address comes from the vmalloc'd region. Pages are virtually */
1632 /* contiguous but NOT physically contiguous */
1633
1634 return DMA_MEM_TYPE_VMALLOC;
1635 }
1636
1637 if (addrVal >= PAGE_OFFSET) {
1638 /* PAGE_OFFSET is typically 0xC0000000 */
1639
1640 /* kmalloc'd pages are physically contiguous */
1641
1642 return DMA_MEM_TYPE_KMALLOC;
1643 }
1644
1645 return DMA_MEM_TYPE_USER;
1646}
1647
1648EXPORT_SYMBOL(dma_mem_type);
1649
1650/****************************************************************************/
1651/**
1652* Looks at a memory address and determines if we support DMA'ing to/from
1653* that type of memory.
1654*
1655* @return boolean -
1656* return value != 0 means dma supported
1657* return value == 0 means dma not supported
1658*/
1659/****************************************************************************/
1660
1661int dma_mem_supports_dma(void *addr)
1662{
1663 DMA_MemType_t memType = dma_mem_type(addr);
1664
1665 return (memType == DMA_MEM_TYPE_DMA)
1666#if ALLOW_MAP_OF_KMALLOC_MEMORY
1667 || (memType == DMA_MEM_TYPE_KMALLOC)
1668#endif
1669 || (memType == DMA_MEM_TYPE_USER);
1670}
1671
1672EXPORT_SYMBOL(dma_mem_supports_dma);
1673
1674/****************************************************************************/
1675/**
1676* Maps in a memory region such that it can be used for performing a DMA.
1677*
1678* @return
1679*/
1680/****************************************************************************/
1681
1682int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
1683 enum dma_data_direction dir /* Direction that the mapping will be going */
1684 ) {
1685 int rc;
1686
1687 down(&memMap->lock);
1688
1689 DMA_MAP_PRINT("memMap: %p\n", memMap);
1690
1691 if (memMap->inUse) {
1692 printk(KERN_ERR "%s: memory map %p is already being used\n",
1693 __func__, memMap);
1694 rc = -EBUSY;
1695 goto out;
1696 }
1697
1698 memMap->inUse = 1;
1699 memMap->dir = dir;
1700 memMap->numRegionsUsed = 0;
1701
1702 rc = 0;
1703
1704out:
1705
1706 DMA_MAP_PRINT("returning %d", rc);
1707
1708 up(&memMap->lock);
1709
1710 return rc;
1711}
1712
1713EXPORT_SYMBOL(dma_map_start);
1714
1715/****************************************************************************/
1716/**
1717* Adds a segment of memory to a memory map. Each segment is both
1718* physically and virtually contiguous.
1719*
1720* @return 0 on success, error code otherwise.
1721*/
1722/****************************************************************************/
1723
1724static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
1725 DMA_Region_t *region, /* Region that the segment belongs to */
1726 void *virtAddr, /* Virtual address of the segment being added */
1727 dma_addr_t physAddr, /* Physical address of the segment being added */
1728 size_t numBytes /* Number of bytes of the segment being added */
1729 ) {
1730 DMA_Segment_t *segment;
1731
1732 DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
1733 physAddr, numBytes);
1734
1735 /* Sanity check */
1736
1737 if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
1738 || (((unsigned long)virtAddr + numBytes)) >
1739 ((unsigned long)region->virtAddr + region->numBytes)) {
1740 printk(KERN_ERR
1741 "%s: virtAddr %p is outside region @ %p len: %d\n",
1742 __func__, virtAddr, region->virtAddr, region->numBytes);
1743 return -EINVAL;
1744 }
1745
1746 if (region->numSegmentsUsed > 0) {
1747 /* Check to see if this segment is physically contiguous with the previous one */
1748
1749 segment = &region->segment[region->numSegmentsUsed - 1];
1750
1751 if ((segment->physAddr + segment->numBytes) == physAddr) {
1752 /* It is - just add on to the end */
1753
1754 DMA_MAP_PRINT("appending %d bytes to last segment\n",
1755 numBytes);
1756
1757 segment->numBytes += numBytes;
1758
1759 return 0;
1760 }
1761 }
1762
1763 /* Reallocate to hold more segments, if required. */
1764
1765 if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
1766 DMA_Segment_t *newSegment;
1767 size_t oldSize =
1768 region->numSegmentsAllocated * sizeof(*newSegment);
1769 int newAlloc = region->numSegmentsAllocated + 4;
1770 size_t newSize = newAlloc * sizeof(*newSegment);
1771
1772 newSegment = kmalloc(newSize, GFP_KERNEL);
1773 if (newSegment == NULL) {
1774 return -ENOMEM;
1775 }
1776 memcpy(newSegment, region->segment, oldSize);
1777 memset(&((uint8_t *) newSegment)[oldSize], 0,
1778 newSize - oldSize);
1779 kfree(region->segment);
1780
1781 region->numSegmentsAllocated = newAlloc;
1782 region->segment = newSegment;
1783 }
1784
1785 segment = &region->segment[region->numSegmentsUsed];
1786 region->numSegmentsUsed++;
1787
1788 segment->virtAddr = virtAddr;
1789 segment->physAddr = physAddr;
1790 segment->numBytes = numBytes;
1791
1792 DMA_MAP_PRINT("returning success\n");
1793
1794 return 0;
1795}
1796
1797/****************************************************************************/
1798/**
1799* Adds a region of memory to a memory map. Each region is virtually
1800* contiguous, but not necessarily physically contiguous.
1801*
1802* @return 0 on success, error code otherwise.
1803*/
1804/****************************************************************************/
1805
1806int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
1807 void *mem, /* Virtual address that we want to get a map of */
1808 size_t numBytes /* Number of bytes being mapped */
1809 ) {
1810 unsigned long addr = (unsigned long)mem;
1811 unsigned int offset;
1812 int rc = 0;
1813 DMA_Region_t *region;
1814 dma_addr_t physAddr;
1815
1816 down(&memMap->lock);
1817
1818 DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
1819
1820 if (!memMap->inUse) {
1821 printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
1822 __func__);
1823 rc = -EINVAL;
1824 goto out;
1825 }
1826
1827 /* Reallocate to hold more regions. */
1828
1829 if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
1830 DMA_Region_t *newRegion;
1831 size_t oldSize =
1832 memMap->numRegionsAllocated * sizeof(*newRegion);
1833 int newAlloc = memMap->numRegionsAllocated + 4;
1834 size_t newSize = newAlloc * sizeof(*newRegion);
1835
1836 newRegion = kmalloc(newSize, GFP_KERNEL);
1837 if (newRegion == NULL) {
1838 rc = -ENOMEM;
1839 goto out;
1840 }
1841 memcpy(newRegion, memMap->region, oldSize);
1842 memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
1843
1844 kfree(memMap->region);
1845
1846 memMap->numRegionsAllocated = newAlloc;
1847 memMap->region = newRegion;
1848 }
1849
1850 region = &memMap->region[memMap->numRegionsUsed];
1851 memMap->numRegionsUsed++;
1852
1853 offset = addr & ~PAGE_MASK;
1854
1855 region->memType = dma_mem_type(mem);
1856 region->virtAddr = mem;
1857 region->numBytes = numBytes;
1858 region->numSegmentsUsed = 0;
1859 region->numLockedPages = 0;
1860 region->lockedPages = NULL;
1861
1862 switch (region->memType) {
1863 case DMA_MEM_TYPE_VMALLOC:
1864 {
1865 atomic_inc(&gDmaStatMemTypeVmalloc);
1866
1867 /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
1868
1869 /* vmalloc'd pages are not physically contiguous */
1870
1871 rc = -EINVAL;
1872 break;
1873 }
1874
1875 case DMA_MEM_TYPE_KMALLOC:
1876 {
1877 atomic_inc(&gDmaStatMemTypeKmalloc);
1878
1879 /* kmalloc'd pages are physically contiguous, so they'll have exactly */
1880 /* one segment */
1881
1882#if ALLOW_MAP_OF_KMALLOC_MEMORY
1883 physAddr =
1884 dma_map_single(NULL, mem, numBytes, memMap->dir);
1885 rc = dma_map_add_segment(memMap, region, mem, physAddr,
1886 numBytes);
1887#else
1888 rc = -EINVAL;
1889#endif
1890 break;
1891 }
1892
1893 case DMA_MEM_TYPE_DMA:
1894 {
1895 /* dma_alloc_xxx pages are physically contiguous */
1896
1897 atomic_inc(&gDmaStatMemTypeCoherent);
1898
1899 physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
1900
1901 dma_sync_single_for_cpu(NULL, physAddr, numBytes,
1902 memMap->dir);
1903 rc = dma_map_add_segment(memMap, region, mem, physAddr,
1904 numBytes);
1905 break;
1906 }
1907
1908 case DMA_MEM_TYPE_USER:
1909 {
1910 size_t firstPageOffset;
1911 size_t firstPageSize;
1912 struct page **pages;
1913 struct task_struct *userTask;
1914
1915 atomic_inc(&gDmaStatMemTypeUser);
1916
1917#if 1
1918 /* If the pages are user pages, then the dma_mem_map_set_user_task function */
1919 /* must have been previously called. */
1920
1921 if (memMap->userTask == NULL) {
1922 printk(KERN_ERR
1923 "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
1924 __func__);
1925 return -EINVAL;
1926 }
1927
1928 /* User pages need to be locked. */
1929
1930 firstPageOffset =
1931 (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
1932 firstPageSize = PAGE_SIZE - firstPageOffset;
1933
1934 region->numLockedPages = (firstPageOffset
1935 + region->numBytes +
1936 PAGE_SIZE - 1) / PAGE_SIZE;
1937 pages =
1938 kmalloc(region->numLockedPages *
1939 sizeof(struct page *), GFP_KERNEL);
1940
1941 if (pages == NULL) {
1942 region->numLockedPages = 0;
1943 return -ENOMEM;
1944 }
1945
1946 userTask = memMap->userTask;
1947
1948 down_read(&userTask->mm->mmap_sem);
1949 rc = get_user_pages(userTask, /* task */
1950 userTask->mm, /* mm */
1951 (unsigned long)region->virtAddr, /* start */
1952 region->numLockedPages, /* len */
1953 memMap->dir == DMA_FROM_DEVICE, /* write */
1954 0, /* force */
1955 pages, /* pages (array of pointers to page) */
1956 NULL); /* vmas */
1957 up_read(&userTask->mm->mmap_sem);
1958
1959 if (rc != region->numLockedPages) {
1960 kfree(pages);
1961 region->numLockedPages = 0;
1962
1963 if (rc >= 0) {
1964 rc = -EINVAL;
1965 }
1966 } else {
1967 uint8_t *virtAddr = region->virtAddr;
1968 size_t bytesRemaining;
1969 int pageIdx;
1970
1971 rc = 0; /* Since get_user_pages returns +ve number */
1972
1973 region->lockedPages = pages;
1974
1975 /* We've locked the user pages. Now we need to walk them and figure */
1976 /* out the physical addresses. */
1977
1978 /* The first page may be partial */
1979
1980 dma_map_add_segment(memMap,
1981 region,
1982 virtAddr,
1983 PFN_PHYS(page_to_pfn
1984 (pages[0])) +
1985 firstPageOffset,
1986 firstPageSize);
1987
1988 virtAddr += firstPageSize;
1989 bytesRemaining =
1990 region->numBytes - firstPageSize;
1991
1992 for (pageIdx = 1;
1993 pageIdx < region->numLockedPages;
1994 pageIdx++) {
1995 size_t bytesThisPage =
1996 (bytesRemaining >
1997 PAGE_SIZE ? PAGE_SIZE :
1998 bytesRemaining);
1999
2000 DMA_MAP_PRINT
2001 ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
2002 pageIdx, pages[pageIdx],
2003 page_to_pfn(pages[pageIdx]),
2004 PFN_PHYS(page_to_pfn
2005 (pages[pageIdx])));
2006
2007 dma_map_add_segment(memMap,
2008 region,
2009 virtAddr,
2010 PFN_PHYS(page_to_pfn
2011 (pages
2012 [pageIdx])),
2013 bytesThisPage);
2014
2015 virtAddr += bytesThisPage;
2016 bytesRemaining -= bytesThisPage;
2017 }
2018 }
2019#else
2020 printk(KERN_ERR
2021 "%s: User mode pages are not yet supported\n",
2022 __func__);
2023
2024 /* user pages are not physically contiguous */
2025
2026 rc = -EINVAL;
2027#endif
2028 break;
2029 }
2030
2031 default:
2032 {
2033 printk(KERN_ERR "%s: Unsupported memory type: %d\n",
2034 __func__, region->memType);
2035
2036 rc = -EINVAL;
2037 break;
2038 }
2039 }
2040
2041 if (rc != 0) {
2042 memMap->numRegionsUsed--;
2043 }
2044
2045out:
2046
2047 DMA_MAP_PRINT("returning %d\n", rc);
2048
2049 up(&memMap->lock);
2050
2051 return rc;
2052}
2053
2054EXPORT_SYMBOL(dma_map_add_segment);
2055
2056/****************************************************************************/
2057/**
2058* Maps in a memory region such that it can be used for performing a DMA.
2059*
2060* @return 0 on success, error code otherwise.
2061*/
2062/****************************************************************************/
2063
2064int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
2065 void *mem, /* Virtual address that we want to get a map of */
2066 size_t numBytes, /* Number of bytes being mapped */
2067 enum dma_data_direction dir /* Direction that the mapping will be going */
2068 ) {
2069 int rc;
2070
2071 rc = dma_map_start(memMap, dir);
2072 if (rc == 0) {
2073 rc = dma_map_add_region(memMap, mem, numBytes);
2074 if (rc < 0) {
2075 /* Since the add fails, this function will fail, and the caller won't */
2076 /* call unmap, so we need to do it here. */
2077
2078 dma_unmap(memMap, 0);
2079 }
2080 }
2081
2082 return rc;
2083}
2084
2085EXPORT_SYMBOL(dma_map_mem);
2086
2087/****************************************************************************/
2088/**
2089* Setup a descriptor ring for a given memory map.
2090*
2091* It is assumed that the descriptor ring has already been initialized, and
2092* this routine will only reallocate a new descriptor ring if the existing
2093* one is too small.
2094*
2095* @return 0 on success, error code otherwise.
2096*/
2097/****************************************************************************/
2098
2099int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
2100 DMA_MemMap_t *memMap, /* Memory map that will be used */
2101 dma_addr_t devPhysAddr /* Physical address of device */
2102 ) {
2103 int rc;
2104 int numDescriptors;
2105 DMA_DeviceAttribute_t *devAttr;
2106 DMA_Region_t *region;
2107 DMA_Segment_t *segment;
2108 dma_addr_t srcPhysAddr;
2109 dma_addr_t dstPhysAddr;
2110 int regionIdx;
2111 int segmentIdx;
2112
2113 devAttr = &DMA_gDeviceAttribute[dev];
2114
2115 down(&memMap->lock);
2116
2117 /* Figure out how many descriptors we need */
2118
2119 numDescriptors = 0;
2120 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2121 region = &memMap->region[regionIdx];
2122
2123 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2124 segmentIdx++) {
2125 segment = &region->segment[segmentIdx];
2126
2127 if (memMap->dir == DMA_TO_DEVICE) {
2128 srcPhysAddr = segment->physAddr;
2129 dstPhysAddr = devPhysAddr;
2130 } else {
2131 srcPhysAddr = devPhysAddr;
2132 dstPhysAddr = segment->physAddr;
2133 }
2134
2135 rc =
2136 dma_calculate_descriptor_count(dev, srcPhysAddr,
2137 dstPhysAddr,
2138 segment->
2139 numBytes);
2140 if (rc < 0) {
2141 printk(KERN_ERR
2142 "%s: dma_calculate_descriptor_count failed: %d\n",
2143 __func__, rc);
2144 goto out;
2145 }
2146 numDescriptors += rc;
2147 }
2148 }
2149
2150 /* Adjust the size of the ring, if it isn't big enough */
2151
2152 if (numDescriptors > devAttr->ring.descriptorsAllocated) {
2153 dma_free_descriptor_ring(&devAttr->ring);
2154 rc =
2155 dma_alloc_descriptor_ring(&devAttr->ring,
2156 numDescriptors);
2157 if (rc < 0) {
2158 printk(KERN_ERR
2159 "%s: dma_alloc_descriptor_ring failed: %d\n",
2160 __func__, rc);
2161 goto out;
2162 }
2163 } else {
2164 rc =
2165 dma_init_descriptor_ring(&devAttr->ring,
2166 numDescriptors);
2167 if (rc < 0) {
2168 printk(KERN_ERR
2169 "%s: dma_init_descriptor_ring failed: %d\n",
2170 __func__, rc);
2171 goto out;
2172 }
2173 }
2174
2175 /* Populate the descriptors */
2176
2177 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2178 region = &memMap->region[regionIdx];
2179
2180 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2181 segmentIdx++) {
2182 segment = &region->segment[segmentIdx];
2183
2184 if (memMap->dir == DMA_TO_DEVICE) {
2185 srcPhysAddr = segment->physAddr;
2186 dstPhysAddr = devPhysAddr;
2187 } else {
2188 srcPhysAddr = devPhysAddr;
2189 dstPhysAddr = segment->physAddr;
2190 }
2191
2192 rc =
2193 dma_add_descriptors(&devAttr->ring, dev,
2194 srcPhysAddr, dstPhysAddr,
2195 segment->numBytes);
2196 if (rc < 0) {
2197 printk(KERN_ERR
2198 "%s: dma_add_descriptors failed: %d\n",
2199 __func__, rc);
2200 goto out;
2201 }
2202 }
2203 }
2204
2205 rc = 0;
2206
2207out:
2208
2209 up(&memMap->lock);
2210 return rc;
2211}
2212
2213EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2214
2215/****************************************************************************/
2216/**
2217* Maps in a memory region such that it can be used for performing a DMA.
2218*
2219* @return
2220*/
2221/****************************************************************************/
2222
2223int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2224 int dirtied /* non-zero if any of the pages were modified */
2225 ) {
2226
2227 int rc = 0;
2228 int regionIdx;
2229 int segmentIdx;
2230 DMA_Region_t *region;
2231 DMA_Segment_t *segment;
2232
2233 down(&memMap->lock);
2234
2235 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2236 region = &memMap->region[regionIdx];
2237
2238 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2239 segmentIdx++) {
2240 segment = &region->segment[segmentIdx];
2241
2242 switch (region->memType) {
2243 case DMA_MEM_TYPE_VMALLOC:
2244 {
2245 printk(KERN_ERR
2246 "%s: vmalloc'd pages are not yet supported\n",
2247 __func__);
2248 rc = -EINVAL;
2249 goto out;
2250 }
2251
2252 case DMA_MEM_TYPE_KMALLOC:
2253 {
2254#if ALLOW_MAP_OF_KMALLOC_MEMORY
2255 dma_unmap_single(NULL,
2256 segment->physAddr,
2257 segment->numBytes,
2258 memMap->dir);
2259#endif
2260 break;
2261 }
2262
2263 case DMA_MEM_TYPE_DMA:
2264 {
2265 dma_sync_single_for_cpu(NULL,
2266 segment->
2267 physAddr,
2268 segment->
2269 numBytes,
2270 memMap->dir);
2271 break;
2272 }
2273
2274 case DMA_MEM_TYPE_USER:
2275 {
2276 /* Nothing to do here. */
2277
2278 break;
2279 }
2280
2281 default:
2282 {
2283 printk(KERN_ERR
2284 "%s: Unsupported memory type: %d\n",
2285 __func__, region->memType);
2286 rc = -EINVAL;
2287 goto out;
2288 }
2289 }
2290
2291 segment->virtAddr = NULL;
2292 segment->physAddr = 0;
2293 segment->numBytes = 0;
2294 }
2295
2296 if (region->numLockedPages > 0) {
2297 int pageIdx;
2298
2299 /* Some user pages were locked. We need to go and unlock them now. */
2300
2301 for (pageIdx = 0; pageIdx < region->numLockedPages;
2302 pageIdx++) {
2303 struct page *page =
2304 region->lockedPages[pageIdx];
2305
2306 if (memMap->dir == DMA_FROM_DEVICE) {
2307 SetPageDirty(page);
2308 }
2309 page_cache_release(page);
2310 }
2311 kfree(region->lockedPages);
2312 region->numLockedPages = 0;
2313 region->lockedPages = NULL;
2314 }
2315
2316 region->memType = DMA_MEM_TYPE_NONE;
2317 region->virtAddr = NULL;
2318 region->numBytes = 0;
2319 region->numSegmentsUsed = 0;
2320 }
2321 memMap->userTask = NULL;
2322 memMap->numRegionsUsed = 0;
2323 memMap->inUse = 0;
2324
2325out:
2326 up(&memMap->lock);
2327
2328 return rc;
2329}
2330
2331EXPORT_SYMBOL(dma_unmap);
diff --git a/arch/arm/mach-bcmring/include/mach/dma.h b/arch/arm/mach-bcmring/include/mach/dma.h
index 1f2c5319c056..72543781207b 100644
--- a/arch/arm/mach-bcmring/include/mach/dma.h
+++ b/arch/arm/mach-bcmring/include/mach/dma.h
@@ -26,15 +26,9 @@
26/* ---- Include Files ---------------------------------------------------- */ 26/* ---- Include Files ---------------------------------------------------- */
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/wait.h>
30#include <linux/semaphore.h> 29#include <linux/semaphore.h>
31#include <csp/dmacHw.h> 30#include <csp/dmacHw.h>
32#include <mach/timer.h> 31#include <mach/timer.h>
33#include <linux/scatterlist.h>
34#include <linux/dma-mapping.h>
35#include <linux/mm.h>
36#include <linux/vmalloc.h>
37#include <linux/pagemap.h>
38 32
39/* ---- Constants and Types ---------------------------------------------- */ 33/* ---- Constants and Types ---------------------------------------------- */
40 34
@@ -113,78 +107,6 @@ typedef struct {
113 107
114/**************************************************************************** 108/****************************************************************************
115* 109*
116* The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup
117* DMA chains from a variety of memory sources.
118*
119*****************************************************************************/
120
121#define DMA_MEM_MAP_MIN_SIZE 4096 /* Pages less than this size are better */
122 /* off not being DMA'd. */
123
124typedef enum {
125 DMA_MEM_TYPE_NONE, /* Not a valid setting */
126 DMA_MEM_TYPE_VMALLOC, /* Memory came from vmalloc call */
127 DMA_MEM_TYPE_KMALLOC, /* Memory came from kmalloc call */
128 DMA_MEM_TYPE_DMA, /* Memory came from dma_alloc_xxx call */
129 DMA_MEM_TYPE_USER, /* Memory came from user space. */
130
131} DMA_MemType_t;
132
133/* A segment represents a physically and virtually contiguous chunk of memory. */
134/* i.e. each segment can be DMA'd */
135/* A user of the DMA code will add memory regions. Each region may need to be */
136/* represented by one or more segments. */
137
138typedef struct {
139 void *virtAddr; /* Virtual address used for this segment */
140 dma_addr_t physAddr; /* Physical address this segment maps to */
141 size_t numBytes; /* Size of the segment, in bytes */
142
143} DMA_Segment_t;
144
145/* A region represents a virtually contiguous chunk of memory, which may be */
146/* made up of multiple segments. */
147
148typedef struct {
149 DMA_MemType_t memType;
150 void *virtAddr;
151 size_t numBytes;
152
153 /* Each region (virtually contiguous) consists of one or more segments. Each */
154 /* segment is virtually and physically contiguous. */
155
156 int numSegmentsUsed;
157 int numSegmentsAllocated;
158 DMA_Segment_t *segment;
159
160 /* When a region corresponds to user memory, we need to lock all of the pages */
161 /* down before we can figure out the physical addresses. The lockedPage array contains */
162 /* the pages that were locked, and which subsequently need to be unlocked once the */
163 /* memory is unmapped. */
164
165 unsigned numLockedPages;
166 struct page **lockedPages;
167
168} DMA_Region_t;
169
170typedef struct {
171 int inUse; /* Is this mapping currently being used? */
172 struct semaphore lock; /* Acquired when using this structure */
173 enum dma_data_direction dir; /* Direction this transfer is intended for */
174
175 /* In the event that we're mapping user memory, we need to know which task */
176 /* the memory is for, so that we can obtain the correct mm locks. */
177
178 struct task_struct *userTask;
179
180 int numRegionsUsed;
181 int numRegionsAllocated;
182 DMA_Region_t *region;
183
184} DMA_MemMap_t;
185
186/****************************************************************************
187*
188* The DMA_DeviceAttribute_t contains information which describes a 110* The DMA_DeviceAttribute_t contains information which describes a
189* particular DMA device (or peripheral). 111* particular DMA device (or peripheral).
190* 112*
@@ -570,124 +492,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
570 492
571/****************************************************************************/ 493/****************************************************************************/
572/** 494/**
573* Initializes a DMA_MemMap_t data structure
574*/
575/****************************************************************************/
576
577int dma_init_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
578 );
579
580/****************************************************************************/
581/**
582* Releases any memory currently being held by a memory mapping structure.
583*/
584/****************************************************************************/
585
586int dma_term_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
587 );
588
589/****************************************************************************/
590/**
591* Looks at a memory address and categorizes it.
592*
593* @return One of the values from the DMA_MemType_t enumeration.
594*/
595/****************************************************************************/
596
597DMA_MemType_t dma_mem_type(void *addr);
598
599/****************************************************************************/
600/**
601* Sets the process (aka userTask) associated with a mem map. This is
602* required if user-mode segments will be added to the mapping.
603*/
604/****************************************************************************/
605
606static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap,
607 struct task_struct *task)
608{
609 memMap->userTask = task;
610}
611
612/****************************************************************************/
613/**
614* Looks at a memory address and determines if we support DMA'ing to/from
615* that type of memory.
616*
617* @return boolean -
618* return value != 0 means dma supported
619* return value == 0 means dma not supported
620*/
621/****************************************************************************/
622
623int dma_mem_supports_dma(void *addr);
624
625/****************************************************************************/
626/**
627* Initializes a memory map for use. Since this function acquires a
628* sempaphore within the memory map, it is VERY important that dma_unmap
629* be called when you're finished using the map.
630*/
631/****************************************************************************/
632
633int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
634 enum dma_data_direction dir /* Direction that the mapping will be going */
635 );
636
637/****************************************************************************/
638/**
639* Adds a segment of memory to a memory map.
640*
641* @return 0 on success, error code otherwise.
642*/
643/****************************************************************************/
644
645int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
646 void *mem, /* Virtual address that we want to get a map of */
647 size_t numBytes /* Number of bytes being mapped */
648 );
649
650/****************************************************************************/
651/**
652* Creates a descriptor ring from a memory mapping.
653*
654* @return 0 on success, error code otherwise.
655*/
656/****************************************************************************/
657
658int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
659 DMA_MemMap_t *memMap, /* Memory map that will be used */
660 dma_addr_t devPhysAddr /* Physical address of device */
661 );
662
663/****************************************************************************/
664/**
665* Maps in a memory region such that it can be used for performing a DMA.
666*
667* @return
668*/
669/****************************************************************************/
670
671int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
672 void *addr, /* Virtual address that we want to get a map of */
673 size_t count, /* Number of bytes being mapped */
674 enum dma_data_direction dir /* Direction that the mapping will be going */
675 );
676
677/****************************************************************************/
678/**
679* Maps in a memory region such that it can be used for performing a DMA.
680*
681* @return
682*/
683/****************************************************************************/
684
685int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
686 int dirtied /* non-zero if any of the pages were modified */
687 );
688
689/****************************************************************************/
690/**
691* Initiates a transfer when the descriptors have already been setup. 495* Initiates a transfer when the descriptors have already been setup.
692* 496*
693* This is a special case, and normally, the dma_transfer_xxx functions should 497* This is a special case, and normally, the dma_transfer_xxx functions should
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6b22b543a83f..d5088900af6c 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -44,7 +44,7 @@
44#include <mach/aemif.h> 44#include <mach/aemif.h>
45#include <mach/spi.h> 45#include <mach/spi.h>
46 46
47#define DA850_EVM_PHY_ID "0:00" 47#define DA850_EVM_PHY_ID "davinci_mdio-0:00"
48#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8) 48#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
49#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15) 49#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
50 50
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 346e1de2f5a8..849311d3cb7c 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -54,7 +54,7 @@ static inline int have_tvp7002(void)
54 return 0; 54 return 0;
55} 55}
56 56
57#define DM365_EVM_PHY_ID "0:01" 57#define DM365_EVM_PHY_ID "davinci_mdio-0:01"
58/* 58/*
59 * A MAX-II CPLD is used for various board control functions. 59 * A MAX-II CPLD is used for various board control functions.
60 */ 60 */
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a64b49cfedca..1247ecdcf752 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -40,7 +40,7 @@
40#include <mach/usb.h> 40#include <mach/usb.h>
41#include <mach/aemif.h> 41#include <mach/aemif.h>
42 42
43#define DM644X_EVM_PHY_ID "0:01" 43#define DM644X_EVM_PHY_ID "davinci_mdio-0:01"
44#define LXT971_PHY_ID (0x001378e2) 44#define LXT971_PHY_ID (0x001378e2)
45#define LXT971_PHY_MASK (0xfffffff0) 45#define LXT971_PHY_MASK (0xfffffff0)
46 46
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 64017558860b..872ac69fa049 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = {
736 .enabled_uarts = (1 << 0), 736 .enabled_uarts = (1 << 0),
737}; 737};
738 738
739#define DM646X_EVM_PHY_ID "0:01" 739#define DM646X_EVM_PHY_ID "davinci_mdio-0:01"
740/* 740/*
741 * The following EDMA channels/slots are not being used by drivers (for 741 * The following EDMA channels/slots are not being used by drivers (for
742 * example: Timer, GPIO, UART events etc) on dm646x, hence they are being 742 * example: Timer, GPIO, UART events etc) on dm646x, hence they are being
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 6c4a16415d47..8d34f513d415 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -39,7 +39,7 @@
39#include <mach/mmc.h> 39#include <mach/mmc.h>
40#include <mach/usb.h> 40#include <mach/usb.h>
41 41
42#define NEUROS_OSD2_PHY_ID "0:01" 42#define NEUROS_OSD2_PHY_ID "davinci_mdio-0:01"
43#define LXT971_PHY_ID 0x001378e2 43#define LXT971_PHY_ID 0x001378e2
44#define LXT971_PHY_MASK 0xfffffff0 44#define LXT971_PHY_MASK 0xfffffff0
45 45
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index e7c0c7c53493..45e815760a27 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -21,7 +21,7 @@
21#include <mach/da8xx.h> 21#include <mach/da8xx.h>
22#include <mach/mux.h> 22#include <mach/mux.h>
23 23
24#define HAWKBOARD_PHY_ID "0:07" 24#define HAWKBOARD_PHY_ID "davinci_mdio-0:07"
25#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) 25#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
26#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) 26#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
27 27
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 0b136a831c59..31da3c5b2ba3 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -42,7 +42,7 @@
42#include <mach/mux.h> 42#include <mach/mux.h>
43#include <mach/usb.h> 43#include <mach/usb.h>
44 44
45#define SFFSDR_PHY_ID "0:01" 45#define SFFSDR_PHY_ID "davinci_mdio-0:01"
46static struct mtd_partition davinci_sffsdr_nandflash_partition[] = { 46static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
47 /* U-Boot Environment: Block 0 47 /* U-Boot Environment: Block 0
48 * UBL: Block 1 48 * UBL: Block 1
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 0ed7fdb64efb..992c4c410185 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = {
153 .div_reg = PLLDIV3, 153 .div_reg = PLLDIV3,
154}; 154};
155 155
156static struct clk pll1_sysclk4 = {
157 .name = "pll1_sysclk4",
158 .parent = &pll1_clk,
159 .flags = CLK_PLL,
160 .div_reg = PLLDIV4,
161};
162
163static struct clk pll1_sysclk5 = {
164 .name = "pll1_sysclk5",
165 .parent = &pll1_clk,
166 .flags = CLK_PLL,
167 .div_reg = PLLDIV5,
168};
169
170static struct clk pll1_sysclk6 = {
171 .name = "pll0_sysclk6",
172 .parent = &pll0_clk,
173 .flags = CLK_PLL,
174 .div_reg = PLLDIV6,
175};
176
177static struct clk pll1_sysclk7 = {
178 .name = "pll1_sysclk7",
179 .parent = &pll1_clk,
180 .flags = CLK_PLL,
181 .div_reg = PLLDIV7,
182};
183
184static struct clk i2c0_clk = { 156static struct clk i2c0_clk = {
185 .name = "i2c0", 157 .name = "i2c0",
186 .parent = &pll0_aux_clk, 158 .parent = &pll0_aux_clk,
@@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = {
397 CLK(NULL, "pll1_aux", &pll1_aux_clk), 369 CLK(NULL, "pll1_aux", &pll1_aux_clk),
398 CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), 370 CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
399 CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), 371 CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
400 CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
401 CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
402 CLK(NULL, "pll1_sysclk6", &pll1_sysclk6),
403 CLK(NULL, "pll1_sysclk7", &pll1_sysclk7),
404 CLK("i2c_davinci.1", NULL, &i2c0_clk), 372 CLK("i2c_davinci.1", NULL, &i2c0_clk),
405 CLK(NULL, "timer0", &timerp64_0_clk), 373 CLK(NULL, "timer0", &timerp64_0_clk),
406 CLK("watchdog", NULL, &timerp64_1_clk), 374 CLK("watchdog", NULL, &timerp64_1_clk),
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index dd1429ae6405..bda7aca04ca0 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -28,6 +28,7 @@
28#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <plat/time.h> 30#include <plat/time.h>
31#include <plat/ehci-orion.h>
31#include <plat/common.h> 32#include <plat/common.h>
32#include <plat/addr-map.h> 33#include <plat/addr-map.h>
33#include "common.h" 34#include "common.h"
@@ -71,7 +72,7 @@ void __init dove_map_io(void)
71 ****************************************************************************/ 72 ****************************************************************************/
72void __init dove_ehci0_init(void) 73void __init dove_ehci0_init(void)
73{ 74{
74 orion_ehci_init(DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0); 75 orion_ehci_init(DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0, EHCI_PHY_NA);
75} 76}
76 77
77/***************************************************************************** 78/*****************************************************************************
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 03dd4012043e..d5fb44f16d31 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -32,6 +32,7 @@
32#include <mach/hardware.h> 32#include <mach/hardware.h>
33#include <mach/fb.h> 33#include <mach/fb.h>
34#include <mach/ep93xx_spi.h> 34#include <mach/ep93xx_spi.h>
35#include <mach/gpio-ep93xx.h>
35 36
36#include <asm/mach-types.h> 37#include <asm/mach-types.h>
37#include <asm/mach/map.h> 38#include <asm/mach/map.h>
@@ -153,7 +154,6 @@ static struct i2c_board_info vision_i2c_info[] __initdata = {
153 }, { 154 }, {
154 I2C_BOARD_INFO("pca9539", 0x74), 155 I2C_BOARD_INFO("pca9539", 0x74),
155 .platform_data = &pca953x_74_gpio_data, 156 .platform_data = &pca953x_74_gpio_data,
156 .irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7)),
157 }, { 157 }, {
158 I2C_BOARD_INFO("pca9539", 0x75), 158 I2C_BOARD_INFO("pca9539", 0x75),
159 .platform_data = &pca953x_75_gpio_data, 159 .platform_data = &pca953x_75_gpio_data,
@@ -348,6 +348,8 @@ static void __init vision_init_machine(void)
348 "pca9539:74")) 348 "pca9539:74"))
349 pr_warn("cannot request interrupt gpio for pca9539:74\n"); 349 pr_warn("cannot request interrupt gpio for pca9539:74\n");
350 350
351 vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
352
351 ep93xx_register_i2c(&vision_i2c_gpio_data, vision_i2c_info, 353 ep93xx_register_i2c(&vision_i2c_gpio_data, vision_i2c_info,
352 ARRAY_SIZE(vision_i2c_info)); 354 ARRAY_SIZE(vision_i2c_info));
353 ep93xx_register_spi(&vision_spi_master, vision_spi_board_info, 355 ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index a5823a7f249e..13312ccb2d93 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -32,6 +32,7 @@
32 32
33#include "common.h" 33#include "common.h"
34 34
35#ifdef CONFIG_PM_SLEEP
35static struct sleep_save exynos4210_clock_save[] = { 36static struct sleep_save exynos4210_clock_save[] = {
36 SAVE_ITEM(S5P_CLKSRC_IMAGE), 37 SAVE_ITEM(S5P_CLKSRC_IMAGE),
37 SAVE_ITEM(S5P_CLKSRC_LCD1), 38 SAVE_ITEM(S5P_CLKSRC_LCD1),
@@ -42,6 +43,7 @@ static struct sleep_save exynos4210_clock_save[] = {
42 SAVE_ITEM(S5P_CLKGATE_IP_LCD1), 43 SAVE_ITEM(S5P_CLKGATE_IP_LCD1),
43 SAVE_ITEM(S5P_CLKGATE_IP_PERIR_4210), 44 SAVE_ITEM(S5P_CLKGATE_IP_PERIR_4210),
44}; 45};
46#endif
45 47
46static struct clksrc_clk *sysclks[] = { 48static struct clksrc_clk *sysclks[] = {
47 /* nothing here yet */ 49 /* nothing here yet */
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 26a668b0d101..48af28566fa1 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -32,12 +32,14 @@
32 32
33#include "common.h" 33#include "common.h"
34 34
35#ifdef CONFIG_PM_SLEEP
35static struct sleep_save exynos4212_clock_save[] = { 36static struct sleep_save exynos4212_clock_save[] = {
36 SAVE_ITEM(S5P_CLKSRC_IMAGE), 37 SAVE_ITEM(S5P_CLKSRC_IMAGE),
37 SAVE_ITEM(S5P_CLKDIV_IMAGE), 38 SAVE_ITEM(S5P_CLKDIV_IMAGE),
38 SAVE_ITEM(S5P_CLKGATE_IP_IMAGE_4212), 39 SAVE_ITEM(S5P_CLKGATE_IP_IMAGE_4212),
39 SAVE_ITEM(S5P_CLKGATE_IP_PERIR_4212), 40 SAVE_ITEM(S5P_CLKGATE_IP_PERIR_4212),
40}; 41};
42#endif
41 43
42static struct clk *clk_src_mpll_user_list[] = { 44static struct clk *clk_src_mpll_user_list[] = {
43 [0] = &clk_fin_mpll, 45 [0] = &clk_fin_mpll,
diff --git a/arch/arm/mach-exynos/clock.c b/arch/arm/mach-exynos/clock.c
index 5a8c42e90005..187287aa57ab 100644
--- a/arch/arm/mach-exynos/clock.c
+++ b/arch/arm/mach-exynos/clock.c
@@ -30,6 +30,7 @@
30 30
31#include "common.h" 31#include "common.h"
32 32
33#ifdef CONFIG_PM_SLEEP
33static struct sleep_save exynos4_clock_save[] = { 34static struct sleep_save exynos4_clock_save[] = {
34 SAVE_ITEM(S5P_CLKDIV_LEFTBUS), 35 SAVE_ITEM(S5P_CLKDIV_LEFTBUS),
35 SAVE_ITEM(S5P_CLKGATE_IP_LEFTBUS), 36 SAVE_ITEM(S5P_CLKGATE_IP_LEFTBUS),
@@ -93,6 +94,7 @@ static struct sleep_save exynos4_clock_save[] = {
93 SAVE_ITEM(S5P_CLKGATE_SCLKCPU), 94 SAVE_ITEM(S5P_CLKGATE_SCLKCPU),
94 SAVE_ITEM(S5P_CLKGATE_IP_CPU), 95 SAVE_ITEM(S5P_CLKGATE_IP_CPU),
95}; 96};
97#endif
96 98
97struct clk clk_sclk_hdmi27m = { 99struct clk clk_sclk_hdmi27m = {
98 .name = "sclk_hdmi27m", 100 .name = "sclk_hdmi27m",
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 85fa02767d67..e6b02fdf1b09 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -15,11 +15,13 @@
15#include <linux/serial_core.h> 15#include <linux/serial_core.h>
16 16
17#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
18#include <asm/hardware/gic.h>
18#include <mach/map.h> 19#include <mach/map.h>
19 20
20#include <plat/cpu.h> 21#include <plat/cpu.h>
21#include <plat/regs-serial.h> 22#include <plat/regs-serial.h>
22#include <plat/exynos4.h> 23
24#include "common.h"
23 25
24/* 26/*
25 * The following lookup table is used to override device names when devices 27 * The following lookup table is used to override device names when devices
@@ -60,7 +62,7 @@ static const struct of_dev_auxdata exynos4210_auxdata_lookup[] __initconst = {
60 62
61static void __init exynos4210_dt_map_io(void) 63static void __init exynos4210_dt_map_io(void)
62{ 64{
63 s5p_init_io(NULL, 0, S5P_VA_CHIPID); 65 exynos_init_io(NULL, 0);
64 s3c24xx_init_clocks(24000000); 66 s3c24xx_init_clocks(24000000);
65} 67}
66 68
@@ -79,7 +81,9 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
79 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ 81 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
80 .init_irq = exynos4_init_irq, 82 .init_irq = exynos4_init_irq,
81 .map_io = exynos4210_dt_map_io, 83 .map_io = exynos4210_dt_map_io,
84 .handle_irq = gic_handle_irq,
82 .init_machine = exynos4210_dt_machine_init, 85 .init_machine = exynos4210_dt_machine_init,
83 .timer = &exynos4_timer, 86 .timer = &exynos4_timer,
84 .dt_compat = exynos4210_dt_compat, 87 .dt_compat = exynos4210_dt_compat,
88 .restart = exynos4_restart,
85MACHINE_END 89MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b895ec031105..435261f83f46 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -220,14 +220,14 @@ static struct s3c_fb_pd_win nuri_fb_win0 = {
220 .lower_margin = 1, 220 .lower_margin = 1,
221 .hsync_len = 48, 221 .hsync_len = 48,
222 .vsync_len = 3, 222 .vsync_len = 3,
223 .xres = 1280, 223 .xres = 1024,
224 .yres = 800, 224 .yres = 600,
225 .refresh = 60, 225 .refresh = 60,
226 }, 226 },
227 .max_bpp = 24, 227 .max_bpp = 24,
228 .default_bpp = 16, 228 .default_bpp = 16,
229 .virtual_x = 1280, 229 .virtual_x = 1024,
230 .virtual_y = 800, 230 .virtual_y = 2 * 600,
231}; 231};
232 232
233static struct s3c_fb_platdata nuri_fb_pdata __initdata = { 233static struct s3c_fb_platdata nuri_fb_pdata __initdata = {
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 37ac93e8d6d9..0fc65ffde8ff 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -910,7 +910,7 @@ static struct s5p_fimc_isp_info universal_camera_sensors[] = {
910 .bus_type = FIMC_MIPI_CSI2, 910 .bus_type = FIMC_MIPI_CSI2,
911 .board_info = &m5mols_board_info, 911 .board_info = &m5mols_board_info,
912 .i2c_bus_num = 0, 912 .i2c_bus_num = 0,
913 .clk_frequency = 21600000UL, 913 .clk_frequency = 24000000UL,
914 .csi_data_align = 32, 914 .csi_data_align = 32,
915 }, 915 },
916}; 916};
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index a4f61a43c7ba..e19013051772 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -206,7 +206,7 @@ static void exynos4_pm_prepare(void)
206 206
207} 207}
208 208
209static int exynos4_pm_add(struct device *dev) 209static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
210{ 210{
211 pm_cpu_prep = exynos4_pm_prepare; 211 pm_cpu_prep = exynos4_pm_prepare;
212 pm_cpu_sleep = exynos4_cpu_suspend; 212 pm_cpu_sleep = exynos4_cpu_suspend;
@@ -384,7 +384,9 @@ static void exynos4_pm_resume(void)
384 384
385 exynos4_restore_pll(); 385 exynos4_restore_pll();
386 386
387#ifdef CONFIG_SMP
387 scu_enable(S5P_VA_SCU); 388 scu_enable(S5P_VA_SCU);
389#endif
388 390
389#ifdef CONFIG_CACHE_L2X0 391#ifdef CONFIG_CACHE_L2X0
390 s3c_pm_do_restore_core(exynos4_l2cc_save, ARRAY_SIZE(exynos4_l2cc_save)); 392 s3c_pm_do_restore_core(exynos4_l2cc_save, ARRAY_SIZE(exynos4_l2cc_save));
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index cc15426787b1..77d4852e19f2 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -27,6 +27,7 @@
27#include <plat/cache-feroceon-l2.h> 27#include <plat/cache-feroceon-l2.h>
28#include <plat/mvsdio.h> 28#include <plat/mvsdio.h>
29#include <plat/orion_nand.h> 29#include <plat/orion_nand.h>
30#include <plat/ehci-orion.h>
30#include <plat/common.h> 31#include <plat/common.h>
31#include <plat/time.h> 32#include <plat/time.h>
32#include <plat/addr-map.h> 33#include <plat/addr-map.h>
@@ -73,7 +74,7 @@ unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
73void __init kirkwood_ehci_init(void) 74void __init kirkwood_ehci_init(void)
74{ 75{
75 kirkwood_clk_ctrl |= CGC_USB0; 76 kirkwood_clk_ctrl |= CGC_USB0;
76 orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB); 77 orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
77} 78}
78 79
79 80
diff --git a/arch/arm/mach-kirkwood/mpp.h b/arch/arm/mach-kirkwood/mpp.h
index e8fda45c0736..d5a0d1da2e0e 100644
--- a/arch/arm/mach-kirkwood/mpp.h
+++ b/arch/arm/mach-kirkwood/mpp.h
@@ -31,314 +31,314 @@
31#define MPP_F6282_MASK MPP( 0, 0x0, 0, 0, 0, 0, 0, 0, 1 ) 31#define MPP_F6282_MASK MPP( 0, 0x0, 0, 0, 0, 0, 0, 0, 1 )
32 32
33#define MPP0_GPIO MPP( 0, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 33#define MPP0_GPIO MPP( 0, 0x0, 1, 1, 1, 1, 1, 1, 1 )
34#define MPP0_NF_IO2 MPP( 0, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 34#define MPP0_NF_IO2 MPP( 0, 0x1, 0, 0, 1, 1, 1, 1, 1 )
35#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 35#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 0, 1, 1, 1, 1, 1 )
36 36
37#define MPP1_GPO MPP( 1, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 37#define MPP1_GPO MPP( 1, 0x0, 0, 1, 1, 1, 1, 1, 1 )
38#define MPP1_NF_IO3 MPP( 1, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 38#define MPP1_NF_IO3 MPP( 1, 0x1, 0, 0, 1, 1, 1, 1, 1 )
39#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 39#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 0, 1, 1, 1, 1, 1 )
40 40
41#define MPP2_GPO MPP( 2, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 41#define MPP2_GPO MPP( 2, 0x0, 0, 1, 1, 1, 1, 1, 1 )
42#define MPP2_NF_IO4 MPP( 2, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 42#define MPP2_NF_IO4 MPP( 2, 0x1, 0, 0, 1, 1, 1, 1, 1 )
43#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 43#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 0, 1, 1, 1, 1, 1 )
44 44
45#define MPP3_GPO MPP( 3, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 45#define MPP3_GPO MPP( 3, 0x0, 0, 1, 1, 1, 1, 1, 1 )
46#define MPP3_NF_IO5 MPP( 3, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 46#define MPP3_NF_IO5 MPP( 3, 0x1, 0, 0, 1, 1, 1, 1, 1 )
47#define MPP3_SPI_MISO MPP( 3, 0x2, 1, 0, 1, 1, 1, 1, 1 ) 47#define MPP3_SPI_MISO MPP( 3, 0x2, 0, 0, 1, 1, 1, 1, 1 )
48 48
49#define MPP4_GPIO MPP( 4, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 49#define MPP4_GPIO MPP( 4, 0x0, 1, 1, 1, 1, 1, 1, 1 )
50#define MPP4_NF_IO6 MPP( 4, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 50#define MPP4_NF_IO6 MPP( 4, 0x1, 0, 0, 1, 1, 1, 1, 1 )
51#define MPP4_UART0_RXD MPP( 4, 0x2, 1, 0, 1, 1, 1, 1, 1 ) 51#define MPP4_UART0_RXD MPP( 4, 0x2, 0, 0, 1, 1, 1, 1, 1 )
52#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 1, 0, 0, 1, 1, 1 ) 52#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 0, 0, 0, 1, 1, 1 )
53#define MPP4_LCD_VGA_HSYNC MPP( 4, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 53#define MPP4_LCD_VGA_HSYNC MPP( 4, 0xb, 0, 0, 0, 0, 0, 0, 1 )
54#define MPP4_PTP_CLK MPP( 4, 0xd, 1, 0, 1, 1, 1, 1, 0 ) 54#define MPP4_PTP_CLK MPP( 4, 0xd, 0, 0, 1, 1, 1, 1, 0 )
55 55
56#define MPP5_GPO MPP( 5, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 56#define MPP5_GPO MPP( 5, 0x0, 0, 1, 1, 1, 1, 1, 1 )
57#define MPP5_NF_IO7 MPP( 5, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 57#define MPP5_NF_IO7 MPP( 5, 0x1, 0, 0, 1, 1, 1, 1, 1 )
58#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 58#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 0, 1, 1, 1, 1, 1 )
59#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 1, 1, 1, 1, 1, 0 ) 59#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 0, 1, 1, 1, 1, 0 )
60#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 60#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 0, 0, 1, 1, 1, 1 )
61#define MPP5_LCD_VGA_VSYNC MPP( 5, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 61#define MPP5_LCD_VGA_VSYNC MPP( 5, 0xb, 0, 0, 0, 0, 0, 0, 1 )
62 62
63#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 1, 1, 1, 1, 1, 1 ) 63#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 0, 1, 1, 1, 1, 1 )
64#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 64#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 0, 1, 1, 1, 1, 1 )
65#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 1, 1, 1, 1, 1, 0 ) 65#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 0, 1, 1, 1, 1, 0 )
66 66
67#define MPP7_GPO MPP( 7, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 67#define MPP7_GPO MPP( 7, 0x0, 0, 1, 1, 1, 1, 1, 1 )
68#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 1, 1, 1, 1, 1, 0 ) 68#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 0, 1, 1, 1, 1, 0 )
69#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 69#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 0, 1, 1, 1, 1, 1 )
70#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 1, 1, 1, 1, 1, 0 ) 70#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 0, 1, 1, 1, 1, 0 )
71#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 1, 0, 0, 0, 0, 1 ) 71#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 0, 0, 0, 0, 0, 1 )
72 72
73#define MPP8_GPIO MPP( 8, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 73#define MPP8_GPIO MPP( 8, 0x0, 1, 1, 1, 1, 1, 1, 1 )
74#define MPP8_TW0_SDA MPP( 8, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 74#define MPP8_TW0_SDA MPP( 8, 0x1, 0, 0, 1, 1, 1, 1, 1 )
75#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 75#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 0, 1, 1, 1, 1, 1 )
76#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 1, 1, 1, 1, 1, 1 ) 76#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 0, 1, 1, 1, 1, 1 )
77#define MPP8_MII0_RXERR MPP( 8, 0x4, 1, 0, 0, 1, 1, 1, 1 ) 77#define MPP8_MII0_RXERR MPP( 8, 0x4, 0, 0, 0, 1, 1, 1, 1 )
78#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 1, 0, 0, 1, 1, 1 ) 78#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 0, 0, 0, 1, 1, 1 )
79#define MPP8_PTP_CLK MPP( 8, 0xc, 1, 0, 1, 1, 1, 1, 0 ) 79#define MPP8_PTP_CLK MPP( 8, 0xc, 0, 0, 1, 1, 1, 1, 0 )
80#define MPP8_MII0_COL MPP( 8, 0xd, 1, 0, 1, 1, 1, 1, 1 ) 80#define MPP8_MII0_COL MPP( 8, 0xd, 0, 0, 1, 1, 1, 1, 1 )
81 81
82#define MPP9_GPIO MPP( 9, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 82#define MPP9_GPIO MPP( 9, 0x0, 1, 1, 1, 1, 1, 1, 1 )
83#define MPP9_TW0_SCK MPP( 9, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 83#define MPP9_TW0_SCK MPP( 9, 0x1, 0, 0, 1, 1, 1, 1, 1 )
84#define MPP9_UART0_CTS MPP( 9, 0x2, 1, 0, 1, 1, 1, 1, 1 ) 84#define MPP9_UART0_CTS MPP( 9, 0x2, 0, 0, 1, 1, 1, 1, 1 )
85#define MPP9_UART1_CTS MPP( 9, 0x3, 1, 0, 1, 1, 1, 1, 1 ) 85#define MPP9_UART1_CTS MPP( 9, 0x3, 0, 0, 1, 1, 1, 1, 1 )
86#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 86#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 0, 0, 1, 1, 1, 1 )
87#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 1, 0, 1, 1, 1, 1, 0 ) 87#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 0, 0, 1, 1, 1, 1, 0 )
88#define MPP9_MII0_CRS MPP( 9, 0xd, 1, 0, 1, 1, 1, 1, 1 ) 88#define MPP9_MII0_CRS MPP( 9, 0xd, 0, 0, 1, 1, 1, 1, 1 )
89 89
90#define MPP10_GPO MPP( 10, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 90#define MPP10_GPO MPP( 10, 0x0, 0, 1, 1, 1, 1, 1, 1 )
91#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 91#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 0, 1, 1, 1, 1, 1 )
92#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 1, 1, 1, 1, 1, 1 ) 92#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 0, 1, 1, 1, 1, 1 )
93#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 1, 0, 0, 1, 1, 1 ) 93#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 0, 0, 0, 1, 1, 1 )
94#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 1, 1, 1, 1, 1, 0 ) 94#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 0, 1, 1, 1, 1, 0 )
95 95
96#define MPP11_GPIO MPP( 11, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 96#define MPP11_GPIO MPP( 11, 0x0, 1, 1, 1, 1, 1, 1, 1 )
97#define MPP11_SPI_MISO MPP( 11, 0x2, 1, 0, 1, 1, 1, 1, 1 ) 97#define MPP11_SPI_MISO MPP( 11, 0x2, 0, 0, 1, 1, 1, 1, 1 )
98#define MPP11_UART0_RXD MPP( 11, 0x3, 1, 0, 1, 1, 1, 1, 1 ) 98#define MPP11_UART0_RXD MPP( 11, 0x3, 0, 0, 1, 1, 1, 1, 1 )
99#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 1, 0, 1, 1, 1, 1, 0 ) 99#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 0, 0, 1, 1, 1, 1, 0 )
100#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 1, 1, 1, 1, 1, 0 ) 100#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 0, 1, 1, 1, 1, 0 )
101#define MPP11_PTP_CLK MPP( 11, 0xd, 1, 0, 1, 1, 1, 1, 0 ) 101#define MPP11_PTP_CLK MPP( 11, 0xd, 0, 0, 1, 1, 1, 1, 0 )
102#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 102#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 0, 0, 1, 1, 1, 1 )
103 103
104#define MPP12_GPO MPP( 12, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 104#define MPP12_GPO MPP( 12, 0x0, 0, 1, 1, 1, 1, 1, 1 )
105#define MPP12_GPIO MPP( 12, 0x0, 1, 1, 0, 0, 0, 1, 0 ) 105#define MPP12_GPIO MPP( 12, 0x0, 1, 1, 0, 0, 0, 1, 0 )
106#define MPP12_SD_CLK MPP( 12, 0x1, 0, 1, 1, 1, 1, 1, 1 ) 106#define MPP12_SD_CLK MPP( 12, 0x1, 0, 0, 1, 1, 1, 1, 1 )
107#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 1, 0, 0, 0, 0, 1 ) 107#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 0, 0, 0, 0, 0, 1 )
108#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 1, 0, 0, 0, 0, 1 ) 108#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 0, 0, 0, 0, 0, 1 )
109#define MPP12_TW1_SDA MPP( 12, 0xd, 1, 0, 0, 0, 0, 0, 1 ) 109#define MPP12_TW1_SDA MPP( 12, 0xd, 0, 0, 0, 0, 0, 0, 1 )
110 110
111#define MPP13_GPIO MPP( 13, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 111#define MPP13_GPIO MPP( 13, 0x0, 1, 1, 1, 1, 1, 1, 1 )
112#define MPP13_SD_CMD MPP( 13, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 112#define MPP13_SD_CMD MPP( 13, 0x1, 0, 0, 1, 1, 1, 1, 1 )
113#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 1, 1, 1, 1, 1, 1 ) 113#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 0, 1, 1, 1, 1, 1 )
114#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 1, 0, 0, 0, 0, 1 ) 114#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 0, 0, 0, 0, 0, 1 )
115#define MPP13_LCDPWM MPP( 13, 0xb, 0, 1, 0, 0, 0, 0, 1 ) 115#define MPP13_LCDPWM MPP( 13, 0xb, 0, 0, 0, 0, 0, 0, 1 )
116 116
117#define MPP14_GPIO MPP( 14, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 117#define MPP14_GPIO MPP( 14, 0x0, 1, 1, 1, 1, 1, 1, 1 )
118#define MPP14_SD_D0 MPP( 14, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 118#define MPP14_SD_D0 MPP( 14, 0x1, 0, 0, 1, 1, 1, 1, 1 )
119#define MPP14_UART1_RXD MPP( 14, 0x3, 1, 0, 1, 1, 1, 1, 1 ) 119#define MPP14_UART1_RXD MPP( 14, 0x3, 0, 0, 1, 1, 1, 1, 1 )
120#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 120#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 0, 0, 0, 1, 1, 1 )
121#define MPP14_AU_SPDIFI MPP( 14, 0xa, 1, 0, 0, 0, 0, 0, 1 ) 121#define MPP14_AU_SPDIFI MPP( 14, 0xa, 0, 0, 0, 0, 0, 0, 1 )
122#define MPP14_AU_I2SDI MPP( 14, 0xb, 1, 0, 0, 0, 0, 0, 1 ) 122#define MPP14_AU_I2SDI MPP( 14, 0xb, 0, 0, 0, 0, 0, 0, 1 )
123#define MPP14_MII0_COL MPP( 14, 0xd, 1, 0, 1, 1, 1, 1, 1 ) 123#define MPP14_MII0_COL MPP( 14, 0xd, 0, 0, 1, 1, 1, 1, 1 )
124 124
125#define MPP15_GPIO MPP( 15, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 125#define MPP15_GPIO MPP( 15, 0x0, 1, 1, 1, 1, 1, 1, 1 )
126#define MPP15_SD_D1 MPP( 15, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 126#define MPP15_SD_D1 MPP( 15, 0x1, 0, 0, 1, 1, 1, 1, 1 )
127#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 1, 1, 1, 1, 1, 1 ) 127#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 0, 1, 1, 1, 1, 1 )
128#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 1, 1, 1, 1, 1, 1 ) 128#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 0, 1, 1, 1, 1, 1 )
129#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 1, 0, 1, 1, 1, 1 ) 129#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 0, 0, 1, 1, 1, 1 )
130#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 1, 0, 0, 0, 0, 1 ) 130#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 0, 0, 0, 0, 0, 1 )
131 131
132#define MPP16_GPIO MPP( 16, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 132#define MPP16_GPIO MPP( 16, 0x0, 1, 1, 1, 1, 1, 1, 1 )
133#define MPP16_SD_D2 MPP( 16, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 133#define MPP16_SD_D2 MPP( 16, 0x1, 0, 0, 1, 1, 1, 1, 1 )
134#define MPP16_UART0_CTS MPP( 16, 0x2, 1, 0, 1, 1, 1, 1, 1 ) 134#define MPP16_UART0_CTS MPP( 16, 0x2, 0, 0, 1, 1, 1, 1, 1 )
135#define MPP16_UART1_RXD MPP( 16, 0x3, 1, 0, 1, 1, 1, 1, 1 ) 135#define MPP16_UART1_RXD MPP( 16, 0x3, 0, 0, 1, 1, 1, 1, 1 )
136#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 136#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 0, 0, 0, 1, 1, 1 )
137#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 1, 0, 0, 0, 0, 0, 1 ) 137#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 0, 0, 0, 0, 0, 0, 1 )
138#define MPP16_MII0_CRS MPP( 16, 0xd, 1, 0, 1, 1, 1, 1, 1 ) 138#define MPP16_MII0_CRS MPP( 16, 0xd, 0, 0, 1, 1, 1, 1, 1 )
139 139
140#define MPP17_GPIO MPP( 17, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 140#define MPP17_GPIO MPP( 17, 0x0, 1, 1, 1, 1, 1, 1, 1 )
141#define MPP17_SD_D3 MPP( 17, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 141#define MPP17_SD_D3 MPP( 17, 0x1, 0, 0, 1, 1, 1, 1, 1 )
142#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 1, 0, 1, 1, 1, 1 ) 142#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 0, 0, 1, 1, 1, 1 )
143#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 1, 0, 0, 0, 0, 1 ) 143#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 0, 0, 0, 0, 0, 1 )
144#define MPP17_TW1_SCK MPP( 17, 0xd, 1, 1, 0, 0, 0, 0, 1 ) 144#define MPP17_TW1_SCK MPP( 17, 0xd, 0, 0, 0, 0, 0, 0, 1 )
145 145
146#define MPP18_GPO MPP( 18, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 146#define MPP18_GPO MPP( 18, 0x0, 0, 1, 1, 1, 1, 1, 1 )
147#define MPP18_NF_IO0 MPP( 18, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 147#define MPP18_NF_IO0 MPP( 18, 0x1, 0, 0, 1, 1, 1, 1, 1 )
148#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 1, 0, 0, 0, 0, 1 ) 148#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 0, 0, 0, 0, 0, 1 )
149 149
150#define MPP19_GPO MPP( 19, 0x0, 0, 1, 1, 1, 1, 1, 1 ) 150#define MPP19_GPO MPP( 19, 0x0, 0, 1, 1, 1, 1, 1, 1 )
151#define MPP19_NF_IO1 MPP( 19, 0x1, 1, 1, 1, 1, 1, 1, 1 ) 151#define MPP19_NF_IO1 MPP( 19, 0x1, 0, 0, 1, 1, 1, 1, 1 )
152 152
153#define MPP20_GPIO MPP( 20, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 153#define MPP20_GPIO MPP( 20, 0x0, 1, 1, 0, 1, 1, 1, 1 )
154#define MPP20_TSMP0 MPP( 20, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 154#define MPP20_TSMP0 MPP( 20, 0x1, 0, 0, 0, 0, 1, 1, 1 )
155#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 155#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 0, 0, 0, 1, 1, 1 )
156#define MPP20_GE1_TXD0 MPP( 20, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 156#define MPP20_GE1_TXD0 MPP( 20, 0x3, 0, 0, 0, 1, 1, 1, 1 )
157#define MPP20_AU_SPDIFI MPP( 20, 0x4, 1, 0, 0, 0, 1, 1, 1 ) 157#define MPP20_AU_SPDIFI MPP( 20, 0x4, 0, 0, 0, 0, 1, 1, 1 )
158#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 1, 0, 0, 1, 1, 1 ) 158#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 0, 0, 0, 1, 1, 1 )
159#define MPP20_LCD_D0 MPP( 20, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 159#define MPP20_LCD_D0 MPP( 20, 0xb, 0, 0, 0, 0, 0, 0, 1 )
160 160
161#define MPP21_GPIO MPP( 21, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 161#define MPP21_GPIO MPP( 21, 0x0, 1, 1, 0, 1, 1, 1, 1 )
162#define MPP21_TSMP1 MPP( 21, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 162#define MPP21_TSMP1 MPP( 21, 0x1, 0, 0, 0, 0, 1, 1, 1 )
163#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 163#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 0, 0, 0, 1, 1, 1 )
164#define MPP21_GE1_TXD1 MPP( 21, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 164#define MPP21_GE1_TXD1 MPP( 21, 0x3, 0, 0, 0, 1, 1, 1, 1 )
165#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 165#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 0, 0, 0, 1, 1, 1 )
166#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 166#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 0, 0, 1, 1, 1, 1 )
167#define MPP21_LCD_D1 MPP( 21, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 167#define MPP21_LCD_D1 MPP( 21, 0xb, 0, 0, 0, 0, 0, 0, 1 )
168 168
169#define MPP22_GPIO MPP( 22, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 169#define MPP22_GPIO MPP( 22, 0x0, 1, 1, 0, 1, 1, 1, 1 )
170#define MPP22_TSMP2 MPP( 22, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 170#define MPP22_TSMP2 MPP( 22, 0x1, 0, 0, 0, 0, 1, 1, 1 )
171#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 171#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 0, 0, 0, 1, 1, 1 )
172#define MPP22_GE1_TXD2 MPP( 22, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 172#define MPP22_GE1_TXD2 MPP( 22, 0x3, 0, 0, 0, 1, 1, 1, 1 )
173#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 173#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 0, 0, 0, 1, 1, 1 )
174#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 1, 0, 0, 1, 1, 1 ) 174#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 0, 0, 0, 1, 1, 1 )
175#define MPP22_LCD_D2 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 175#define MPP22_LCD_D2 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
176 176
177#define MPP23_GPIO MPP( 23, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 177#define MPP23_GPIO MPP( 23, 0x0, 1, 1, 0, 1, 1, 1, 1 )
178#define MPP23_TSMP3 MPP( 23, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 178#define MPP23_TSMP3 MPP( 23, 0x1, 0, 0, 0, 0, 1, 1, 1 )
179#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 1, 0, 0, 0, 1, 1, 1 ) 179#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 0, 0, 0, 0, 1, 1, 1 )
180#define MPP23_GE1_TXD3 MPP( 23, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 180#define MPP23_GE1_TXD3 MPP( 23, 0x3, 0, 0, 0, 1, 1, 1, 1 )
181#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 181#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 0, 0, 0, 1, 1, 1 )
182#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 182#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 0, 0, 1, 1, 1, 1 )
183#define MPP23_LCD_D3 MPP( 23, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 183#define MPP23_LCD_D3 MPP( 23, 0xb, 0, 0, 0, 0, 0, 0, 1 )
184 184
185#define MPP24_GPIO MPP( 24, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 185#define MPP24_GPIO MPP( 24, 0x0, 1, 1, 0, 1, 1, 1, 1 )
186#define MPP24_TSMP4 MPP( 24, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 186#define MPP24_TSMP4 MPP( 24, 0x1, 0, 0, 0, 0, 1, 1, 1 )
187#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 187#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 0, 0, 0, 1, 1, 1 )
188#define MPP24_GE1_RXD0 MPP( 24, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 188#define MPP24_GE1_RXD0 MPP( 24, 0x3, 0, 0, 0, 1, 1, 1, 1 )
189#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 189#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 0, 0, 0, 1, 1, 1 )
190#define MPP24_LCD_D4 MPP( 24, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 190#define MPP24_LCD_D4 MPP( 24, 0xb, 0, 0, 0, 0, 0, 0, 1 )
191 191
192#define MPP25_GPIO MPP( 25, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 192#define MPP25_GPIO MPP( 25, 0x0, 1, 1, 0, 1, 1, 1, 1 )
193#define MPP25_TSMP5 MPP( 25, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 193#define MPP25_TSMP5 MPP( 25, 0x1, 0, 0, 0, 0, 1, 1, 1 )
194#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 194#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 0, 0, 0, 1, 1, 1 )
195#define MPP25_GE1_RXD1 MPP( 25, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 195#define MPP25_GE1_RXD1 MPP( 25, 0x3, 0, 0, 0, 1, 1, 1, 1 )
196#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 196#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 0, 0, 0, 1, 1, 1 )
197#define MPP25_LCD_D5 MPP( 25, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 197#define MPP25_LCD_D5 MPP( 25, 0xb, 0, 0, 0, 0, 0, 0, 1 )
198 198
199#define MPP26_GPIO MPP( 26, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 199#define MPP26_GPIO MPP( 26, 0x0, 1, 1, 0, 1, 1, 1, 1 )
200#define MPP26_TSMP6 MPP( 26, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 200#define MPP26_TSMP6 MPP( 26, 0x1, 0, 0, 0, 0, 1, 1, 1 )
201#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 1, 0, 0, 0, 1, 1, 1 ) 201#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 0, 0, 0, 0, 1, 1, 1 )
202#define MPP26_GE1_RXD2 MPP( 26, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 202#define MPP26_GE1_RXD2 MPP( 26, 0x3, 0, 0, 0, 1, 1, 1, 1 )
203#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 1, 0, 0, 1, 1, 1 ) 203#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 0, 0, 0, 1, 1, 1 )
204#define MPP26_LCD_D6 MPP( 26, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 204#define MPP26_LCD_D6 MPP( 26, 0xb, 0, 0, 0, 0, 0, 0, 1 )
205 205
206#define MPP27_GPIO MPP( 27, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 206#define MPP27_GPIO MPP( 27, 0x0, 1, 1, 0, 1, 1, 1, 1 )
207#define MPP27_TSMP7 MPP( 27, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 207#define MPP27_TSMP7 MPP( 27, 0x1, 0, 0, 0, 0, 1, 1, 1 )
208#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 208#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 0, 0, 0, 1, 1, 1 )
209#define MPP27_GE1_RXD3 MPP( 27, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 209#define MPP27_GE1_RXD3 MPP( 27, 0x3, 0, 0, 0, 1, 1, 1, 1 )
210#define MPP27_AU_I2SDI MPP( 27, 0x4, 1, 0, 0, 0, 1, 1, 1 ) 210#define MPP27_AU_I2SDI MPP( 27, 0x4, 0, 0, 0, 0, 1, 1, 1 )
211#define MPP27_LCD_D7 MPP( 27, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 211#define MPP27_LCD_D7 MPP( 27, 0xb, 0, 0, 0, 0, 0, 0, 1 )
212 212
213#define MPP28_GPIO MPP( 28, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 213#define MPP28_GPIO MPP( 28, 0x0, 1, 1, 0, 1, 1, 1, 1 )
214#define MPP28_TSMP8 MPP( 28, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 214#define MPP28_TSMP8 MPP( 28, 0x1, 0, 0, 0, 0, 1, 1, 1 )
215#define MPP28_TDM_CODEC_INTn MPP( 28, 0x2, 0, 0, 0, 0, 1, 1, 1 ) 215#define MPP28_TDM_CODEC_INTn MPP( 28, 0x2, 0, 0, 0, 0, 1, 1, 1 )
216#define MPP28_GE1_COL MPP( 28, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 216#define MPP28_GE1_COL MPP( 28, 0x3, 0, 0, 0, 1, 1, 1, 1 )
217#define MPP28_AU_EXTCLK MPP( 28, 0x4, 1, 0, 0, 0, 1, 1, 1 ) 217#define MPP28_AU_EXTCLK MPP( 28, 0x4, 0, 0, 0, 0, 1, 1, 1 )
218#define MPP28_LCD_D8 MPP( 28, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 218#define MPP28_LCD_D8 MPP( 28, 0xb, 0, 0, 0, 0, 0, 0, 1 )
219 219
220#define MPP29_GPIO MPP( 29, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 220#define MPP29_GPIO MPP( 29, 0x0, 1, 1, 0, 1, 1, 1, 1 )
221#define MPP29_TSMP9 MPP( 29, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 221#define MPP29_TSMP9 MPP( 29, 0x1, 0, 0, 0, 0, 1, 1, 1 )
222#define MPP29_TDM_CODEC_RSTn MPP( 29, 0x2, 0, 0, 0, 0, 1, 1, 1 ) 222#define MPP29_TDM_CODEC_RSTn MPP( 29, 0x2, 0, 0, 0, 0, 1, 1, 1 )
223#define MPP29_GE1_TCLK MPP( 29, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 223#define MPP29_GE1_TCLK MPP( 29, 0x3, 0, 0, 0, 1, 1, 1, 1 )
224#define MPP29_LCD_D9 MPP( 29, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 224#define MPP29_LCD_D9 MPP( 29, 0xb, 0, 0, 0, 0, 0, 0, 1 )
225 225
226#define MPP30_GPIO MPP( 30, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 226#define MPP30_GPIO MPP( 30, 0x0, 1, 1, 0, 1, 1, 1, 1 )
227#define MPP30_TSMP10 MPP( 30, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 227#define MPP30_TSMP10 MPP( 30, 0x1, 0, 0, 0, 0, 1, 1, 1 )
228#define MPP30_TDM_PCLK MPP( 30, 0x2, 1, 1, 0, 0, 1, 1, 1 ) 228#define MPP30_TDM_PCLK MPP( 30, 0x2, 0, 0, 0, 0, 1, 1, 1 )
229#define MPP30_GE1_RXCTL MPP( 30, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 229#define MPP30_GE1_RXCTL MPP( 30, 0x3, 0, 0, 0, 1, 1, 1, 1 )
230#define MPP30_LCD_D10 MPP( 30, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 230#define MPP30_LCD_D10 MPP( 30, 0xb, 0, 0, 0, 0, 0, 0, 1 )
231 231
232#define MPP31_GPIO MPP( 31, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 232#define MPP31_GPIO MPP( 31, 0x0, 1, 1, 0, 1, 1, 1, 1 )
233#define MPP31_TSMP11 MPP( 31, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 233#define MPP31_TSMP11 MPP( 31, 0x1, 0, 0, 0, 0, 1, 1, 1 )
234#define MPP31_TDM_FS MPP( 31, 0x2, 1, 1, 0, 0, 1, 1, 1 ) 234#define MPP31_TDM_FS MPP( 31, 0x2, 0, 0, 0, 0, 1, 1, 1 )
235#define MPP31_GE1_RXCLK MPP( 31, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 235#define MPP31_GE1_RXCLK MPP( 31, 0x3, 0, 0, 0, 1, 1, 1, 1 )
236#define MPP31_LCD_D11 MPP( 31, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 236#define MPP31_LCD_D11 MPP( 31, 0xb, 0, 0, 0, 0, 0, 0, 1 )
237 237
238#define MPP32_GPIO MPP( 32, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 238#define MPP32_GPIO MPP( 32, 0x0, 1, 1, 0, 1, 1, 1, 1 )
239#define MPP32_TSMP12 MPP( 32, 0x1, 1, 1, 0, 0, 1, 1, 1 ) 239#define MPP32_TSMP12 MPP( 32, 0x1, 0, 0, 0, 0, 1, 1, 1 )
240#define MPP32_TDM_DRX MPP( 32, 0x2, 1, 0, 0, 0, 1, 1, 1 ) 240#define MPP32_TDM_DRX MPP( 32, 0x2, 0, 0, 0, 0, 1, 1, 1 )
241#define MPP32_GE1_TCLKOUT MPP( 32, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 241#define MPP32_GE1_TCLKOUT MPP( 32, 0x3, 0, 0, 0, 1, 1, 1, 1 )
242#define MPP32_LCD_D12 MPP( 32, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 242#define MPP32_LCD_D12 MPP( 32, 0xb, 0, 0, 0, 0, 0, 0, 1 )
243 243
244#define MPP33_GPO MPP( 33, 0x0, 0, 1, 0, 1, 1, 1, 1 ) 244#define MPP33_GPO MPP( 33, 0x0, 0, 1, 0, 1, 1, 1, 1 )
245#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 245#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 0, 0, 0, 1, 1, 1 )
246#define MPP33_GE1_TXCTL MPP( 33, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 246#define MPP33_GE1_TXCTL MPP( 33, 0x3, 0, 0, 0, 1, 1, 1, 1 )
247#define MPP33_LCD_D13 MPP( 33, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 247#define MPP33_LCD_D13 MPP( 33, 0xb, 0, 0, 0, 0, 0, 0, 1 )
248 248
249#define MPP34_GPIO MPP( 34, 0x0, 1, 1, 0, 1, 1, 1, 1 ) 249#define MPP34_GPIO MPP( 34, 0x0, 1, 1, 0, 1, 1, 1, 1 )
250#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 250#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 0, 0, 0, 1, 1, 1 )
251#define MPP34_GE1_TXEN MPP( 34, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 251#define MPP34_GE1_TXEN MPP( 34, 0x3, 0, 0, 0, 1, 1, 1, 1 )
252#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 1, 0, 0, 0, 1, 1 ) 252#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 0, 0, 0, 0, 1, 1 )
253#define MPP34_LCD_D14 MPP( 34, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 253#define MPP34_LCD_D14 MPP( 34, 0xb, 0, 0, 0, 0, 0, 0, 1 )
254 254
255#define MPP35_GPIO MPP( 35, 0x0, 1, 1, 1, 1, 1, 1, 1 ) 255#define MPP35_GPIO MPP( 35, 0x0, 1, 1, 1, 1, 1, 1, 1 )
256#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 1, 0, 0, 1, 1, 1 ) 256#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 0, 0, 0, 1, 1, 1 )
257#define MPP35_GE1_RXERR MPP( 35, 0x3, 0, 0, 0, 1, 1, 1, 1 ) 257#define MPP35_GE1_RXERR MPP( 35, 0x3, 0, 0, 0, 1, 1, 1, 1 )
258#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 1, 0, 1, 1, 1, 1 ) 258#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 0, 0, 1, 1, 1, 1 )
259#define MPP35_LCD_D15 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 259#define MPP35_LCD_D15 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
260#define MPP35_MII0_RXERR MPP( 35, 0xc, 1, 0, 1, 1, 1, 1, 1 ) 260#define MPP35_MII0_RXERR MPP( 35, 0xc, 0, 0, 1, 1, 1, 1, 1 )
261 261
262#define MPP36_GPIO MPP( 36, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 262#define MPP36_GPIO MPP( 36, 0x0, 1, 1, 1, 0, 0, 1, 1 )
263#define MPP36_TSMP0 MPP( 36, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 263#define MPP36_TSMP0 MPP( 36, 0x1, 0, 0, 0, 0, 0, 1, 1 )
264#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 264#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 0, 0, 0, 0, 1, 1 )
265#define MPP36_AU_SPDIFI MPP( 36, 0x4, 1, 0, 1, 0, 0, 1, 1 ) 265#define MPP36_AU_SPDIFI MPP( 36, 0x4, 0, 0, 1, 0, 0, 1, 1 )
266#define MPP36_TW1_SDA MPP( 36, 0xb, 1, 1, 0, 0, 0, 0, 1 ) 266#define MPP36_TW1_SDA MPP( 36, 0xb, 0, 0, 0, 0, 0, 0, 1 )
267 267
268#define MPP37_GPIO MPP( 37, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 268#define MPP37_GPIO MPP( 37, 0x0, 1, 1, 1, 0, 0, 1, 1 )
269#define MPP37_TSMP1 MPP( 37, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 269#define MPP37_TSMP1 MPP( 37, 0x1, 0, 0, 0, 0, 0, 1, 1 )
270#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 270#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 0, 0, 0, 0, 1, 1 )
271#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 271#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 0, 1, 0, 0, 1, 1 )
272#define MPP37_TW1_SCK MPP( 37, 0xb, 1, 1, 0, 0, 0, 0, 1 ) 272#define MPP37_TW1_SCK MPP( 37, 0xb, 0, 0, 0, 0, 0, 0, 1 )
273 273
274#define MPP38_GPIO MPP( 38, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 274#define MPP38_GPIO MPP( 38, 0x0, 1, 1, 1, 0, 0, 1, 1 )
275#define MPP38_TSMP2 MPP( 38, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 275#define MPP38_TSMP2 MPP( 38, 0x1, 0, 0, 0, 0, 0, 1, 1 )
276#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 276#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 0, 0, 0, 0, 1, 1 )
277#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 277#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 0, 1, 0, 0, 1, 1 )
278#define MPP38_LCD_D18 MPP( 38, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 278#define MPP38_LCD_D18 MPP( 38, 0xb, 0, 0, 0, 0, 0, 0, 1 )
279 279
280#define MPP39_GPIO MPP( 39, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 280#define MPP39_GPIO MPP( 39, 0x0, 1, 1, 1, 0, 0, 1, 1 )
281#define MPP39_TSMP3 MPP( 39, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 281#define MPP39_TSMP3 MPP( 39, 0x1, 0, 0, 0, 0, 0, 1, 1 )
282#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 282#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 0, 0, 0, 0, 1, 1 )
283#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 283#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 0, 1, 0, 0, 1, 1 )
284#define MPP39_LCD_D19 MPP( 39, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 284#define MPP39_LCD_D19 MPP( 39, 0xb, 0, 0, 0, 0, 0, 0, 1 )
285 285
286#define MPP40_GPIO MPP( 40, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 286#define MPP40_GPIO MPP( 40, 0x0, 1, 1, 1, 0, 0, 1, 1 )
287#define MPP40_TSMP4 MPP( 40, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 287#define MPP40_TSMP4 MPP( 40, 0x1, 0, 0, 0, 0, 0, 1, 1 )
288#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 288#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 0, 0, 0, 0, 1, 1 )
289#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 289#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 0, 1, 0, 0, 1, 1 )
290#define MPP40_LCD_D20 MPP( 40, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 290#define MPP40_LCD_D20 MPP( 40, 0xb, 0, 0, 0, 0, 0, 0, 1 )
291 291
292#define MPP41_GPIO MPP( 41, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 292#define MPP41_GPIO MPP( 41, 0x0, 1, 1, 1, 0, 0, 1, 1 )
293#define MPP41_TSMP5 MPP( 41, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 293#define MPP41_TSMP5 MPP( 41, 0x1, 0, 0, 0, 0, 0, 1, 1 )
294#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 1, 0, 0, 0, 0, 1, 1 ) 294#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 0, 0, 0, 0, 0, 1, 1 )
295#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 295#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 0, 1, 0, 0, 1, 1 )
296#define MPP41_LCD_D21 MPP( 41, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 296#define MPP41_LCD_D21 MPP( 41, 0xb, 0, 0, 0, 0, 0, 0, 1 )
297 297
298#define MPP42_GPIO MPP( 42, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 298#define MPP42_GPIO MPP( 42, 0x0, 1, 1, 1, 0, 0, 1, 1 )
299#define MPP42_TSMP6 MPP( 42, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 299#define MPP42_TSMP6 MPP( 42, 0x1, 0, 0, 0, 0, 0, 1, 1 )
300#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 300#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 0, 0, 0, 0, 1, 1 )
301#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 1, 1, 0, 0, 1, 1 ) 301#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 0, 1, 0, 0, 1, 1 )
302#define MPP42_LCD_D22 MPP( 42, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 302#define MPP42_LCD_D22 MPP( 42, 0xb, 0, 0, 0, 0, 0, 0, 1 )
303 303
304#define MPP43_GPIO MPP( 43, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 304#define MPP43_GPIO MPP( 43, 0x0, 1, 1, 1, 0, 0, 1, 1 )
305#define MPP43_TSMP7 MPP( 43, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 305#define MPP43_TSMP7 MPP( 43, 0x1, 0, 0, 0, 0, 0, 1, 1 )
306#define MPP43_TDM_CODEC_INTn MPP( 43, 0x2, 0, 0, 0, 0, 0, 1, 1 ) 306#define MPP43_TDM_CODEC_INTn MPP( 43, 0x2, 0, 0, 0, 0, 0, 1, 1 )
307#define MPP43_AU_I2SDI MPP( 43, 0x4, 1, 0, 1, 0, 0, 1, 1 ) 307#define MPP43_AU_I2SDI MPP( 43, 0x4, 0, 0, 1, 0, 0, 1, 1 )
308#define MPP43_LCD_D23 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 308#define MPP43_LCD_D23 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
309 309
310#define MPP44_GPIO MPP( 44, 0x0, 1, 1, 1, 0, 0, 1, 1 ) 310#define MPP44_GPIO MPP( 44, 0x0, 1, 1, 1, 0, 0, 1, 1 )
311#define MPP44_TSMP8 MPP( 44, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 311#define MPP44_TSMP8 MPP( 44, 0x1, 0, 0, 0, 0, 0, 1, 1 )
312#define MPP44_TDM_CODEC_RSTn MPP( 44, 0x2, 0, 0, 0, 0, 0, 1, 1 ) 312#define MPP44_TDM_CODEC_RSTn MPP( 44, 0x2, 0, 0, 0, 0, 0, 1, 1 )
313#define MPP44_AU_EXTCLK MPP( 44, 0x4, 1, 0, 1, 0, 0, 1, 1 ) 313#define MPP44_AU_EXTCLK MPP( 44, 0x4, 0, 0, 1, 0, 0, 1, 1 )
314#define MPP44_LCD_CLK MPP( 44, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 314#define MPP44_LCD_CLK MPP( 44, 0xb, 0, 0, 0, 0, 0, 0, 1 )
315 315
316#define MPP45_GPIO MPP( 45, 0x0, 1, 1, 0, 0, 0, 1, 1 ) 316#define MPP45_GPIO MPP( 45, 0x0, 1, 1, 0, 0, 0, 1, 1 )
317#define MPP45_TSMP9 MPP( 45, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 317#define MPP45_TSMP9 MPP( 45, 0x1, 0, 0, 0, 0, 0, 1, 1 )
318#define MPP45_TDM_PCLK MPP( 45, 0x2, 1, 1, 0, 0, 0, 1, 1 ) 318#define MPP45_TDM_PCLK MPP( 45, 0x2, 0, 0, 0, 0, 0, 1, 1 )
319#define MPP245_LCD_E MPP( 45, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 319#define MPP245_LCD_E MPP( 45, 0xb, 0, 0, 0, 0, 0, 0, 1 )
320 320
321#define MPP46_GPIO MPP( 46, 0x0, 1, 1, 0, 0, 0, 1, 1 ) 321#define MPP46_GPIO MPP( 46, 0x0, 1, 1, 0, 0, 0, 1, 1 )
322#define MPP46_TSMP10 MPP( 46, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 322#define MPP46_TSMP10 MPP( 46, 0x1, 0, 0, 0, 0, 0, 1, 1 )
323#define MPP46_TDM_FS MPP( 46, 0x2, 1, 1, 0, 0, 0, 1, 1 ) 323#define MPP46_TDM_FS MPP( 46, 0x2, 0, 0, 0, 0, 0, 1, 1 )
324#define MPP46_LCD_HSYNC MPP( 46, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 324#define MPP46_LCD_HSYNC MPP( 46, 0xb, 0, 0, 0, 0, 0, 0, 1 )
325 325
326#define MPP47_GPIO MPP( 47, 0x0, 1, 1, 0, 0, 0, 1, 1 ) 326#define MPP47_GPIO MPP( 47, 0x0, 1, 1, 0, 0, 0, 1, 1 )
327#define MPP47_TSMP11 MPP( 47, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 327#define MPP47_TSMP11 MPP( 47, 0x1, 0, 0, 0, 0, 0, 1, 1 )
328#define MPP47_TDM_DRX MPP( 47, 0x2, 1, 0, 0, 0, 0, 1, 1 ) 328#define MPP47_TDM_DRX MPP( 47, 0x2, 0, 0, 0, 0, 0, 1, 1 )
329#define MPP47_LCD_VSYNC MPP( 47, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 329#define MPP47_LCD_VSYNC MPP( 47, 0xb, 0, 0, 0, 0, 0, 0, 1 )
330 330
331#define MPP48_GPIO MPP( 48, 0x0, 1, 1, 0, 0, 0, 1, 1 ) 331#define MPP48_GPIO MPP( 48, 0x0, 1, 1, 0, 0, 0, 1, 1 )
332#define MPP48_TSMP12 MPP( 48, 0x1, 1, 1, 0, 0, 0, 1, 1 ) 332#define MPP48_TSMP12 MPP( 48, 0x1, 0, 0, 0, 0, 0, 1, 1 )
333#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 333#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 0, 0, 0, 0, 1, 1 )
334#define MPP48_LCD_D16 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 334#define MPP48_LCD_D16 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
335 335
336#define MPP49_GPIO MPP( 49, 0x0, 1, 1, 0, 0, 0, 1, 0 ) 336#define MPP49_GPIO MPP( 49, 0x0, 1, 1, 0, 0, 0, 1, 0 )
337#define MPP49_GPO MPP( 49, 0x0, 0, 1, 0, 0, 0, 0, 1 ) 337#define MPP49_GPO MPP( 49, 0x0, 0, 1, 0, 0, 0, 0, 1 )
338#define MPP49_TSMP9 MPP( 49, 0x1, 1, 1, 0, 0, 0, 1, 0 ) 338#define MPP49_TSMP9 MPP( 49, 0x1, 0, 0, 0, 0, 0, 1, 0 )
339#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 1, 0, 0, 0, 1, 1 ) 339#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 0, 0, 0, 0, 1, 1 )
340#define MPP49_PTP_CLK MPP( 49, 0x5, 1, 0, 0, 0, 0, 1, 0 ) 340#define MPP49_PTP_CLK MPP( 49, 0x5, 0, 0, 0, 0, 0, 1, 0 )
341#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 1, 0, 0, 0, 0, 1 ) 341#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 0, 0, 0, 0, 0, 1 )
342#define MPP49_LCD_D17 MPP( 49, 0xb, 0, 0, 0, 0, 0, 0, 1 ) 342#define MPP49_LCD_D17 MPP( 49, 0xb, 0, 0, 0, 0, 0, 0, 1 )
343 343
344#define MPP_MAX 49 344#define MPP_MAX 49
diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c
index 1e027514096d..473015ac07bd 100644
--- a/arch/arm/mach-lpc32xx/clock.c
+++ b/arch/arm/mach-lpc32xx/clock.c
@@ -719,6 +719,41 @@ static struct clk clk_tsc = {
719 .get_rate = local_return_parent_rate, 719 .get_rate = local_return_parent_rate,
720}; 720};
721 721
722static int adc_onoff_enable(struct clk *clk, int enable)
723{
724 u32 tmp;
725 u32 divider;
726
727 /* Use PERIPH_CLOCK */
728 tmp = __raw_readl(LPC32XX_CLKPWR_ADC_CLK_CTRL_1);
729 tmp |= LPC32XX_CLKPWR_ADCCTRL1_PCLK_SEL;
730 /*
731 * Set clock divider so that we have equal to or less than
732 * 4.5MHz clock at ADC
733 */
734 divider = clk->get_rate(clk) / 4500000 + 1;
735 tmp |= divider;
736 __raw_writel(tmp, LPC32XX_CLKPWR_ADC_CLK_CTRL_1);
737
738 /* synchronize rate of this clock w/ actual HW setting */
739 clk->rate = clk->get_rate(clk->parent) / divider;
740
741 if (enable == 0)
742 __raw_writel(0, clk->enable_reg);
743 else
744 __raw_writel(clk->enable_mask, clk->enable_reg);
745
746 return 0;
747}
748
749static struct clk clk_adc = {
750 .parent = &clk_pclk,
751 .enable = adc_onoff_enable,
752 .enable_reg = LPC32XX_CLKPWR_ADC_CLK_CTRL,
753 .enable_mask = LPC32XX_CLKPWR_ADC32CLKCTRL_CLK_EN,
754 .get_rate = local_return_parent_rate,
755};
756
722static int mmc_onoff_enable(struct clk *clk, int enable) 757static int mmc_onoff_enable(struct clk *clk, int enable)
723{ 758{
724 u32 tmp; 759 u32 tmp;
@@ -1075,6 +1110,7 @@ static struct clk_lookup lookups[] = {
1075 _REGISTER_CLOCK("dev:ssp1", NULL, clk_ssp1) 1110 _REGISTER_CLOCK("dev:ssp1", NULL, clk_ssp1)
1076 _REGISTER_CLOCK("lpc32xx_keys.0", NULL, clk_kscan) 1111 _REGISTER_CLOCK("lpc32xx_keys.0", NULL, clk_kscan)
1077 _REGISTER_CLOCK("lpc32xx-nand.0", "nand_ck", clk_nand) 1112 _REGISTER_CLOCK("lpc32xx-nand.0", "nand_ck", clk_nand)
1113 _REGISTER_CLOCK("lpc32xx-adc", NULL, clk_adc)
1078 _REGISTER_CLOCK("tbd", "i2s0_ck", clk_i2s0) 1114 _REGISTER_CLOCK("tbd", "i2s0_ck", clk_i2s0)
1079 _REGISTER_CLOCK("tbd", "i2s1_ck", clk_i2s1) 1115 _REGISTER_CLOCK("tbd", "i2s1_ck", clk_i2s1)
1080 _REGISTER_CLOCK("ts-lpc32xx", NULL, clk_tsc) 1116 _REGISTER_CLOCK("ts-lpc32xx", NULL, clk_tsc)
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c
index 369b152896cd..6c76bb36559b 100644
--- a/arch/arm/mach-lpc32xx/common.c
+++ b/arch/arm/mach-lpc32xx/common.c
@@ -138,6 +138,28 @@ struct platform_device lpc32xx_rtc_device = {
138}; 138};
139 139
140/* 140/*
141 * ADC support
142 */
143static struct resource adc_resources[] = {
144 {
145 .start = LPC32XX_ADC_BASE,
146 .end = LPC32XX_ADC_BASE + SZ_4K - 1,
147 .flags = IORESOURCE_MEM,
148 }, {
149 .start = IRQ_LPC32XX_TS_IRQ,
150 .end = IRQ_LPC32XX_TS_IRQ,
151 .flags = IORESOURCE_IRQ,
152 },
153};
154
155struct platform_device lpc32xx_adc_device = {
156 .name = "lpc32xx-adc",
157 .id = -1,
158 .num_resources = ARRAY_SIZE(adc_resources),
159 .resource = adc_resources,
160};
161
162/*
141 * Returns the unique ID for the device 163 * Returns the unique ID for the device
142 */ 164 */
143void lpc32xx_get_uid(u32 devid[4]) 165void lpc32xx_get_uid(u32 devid[4])
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h
index 4b4e700343c1..04b72739eb9c 100644
--- a/arch/arm/mach-lpc32xx/common.h
+++ b/arch/arm/mach-lpc32xx/common.h
@@ -29,6 +29,7 @@ extern struct platform_device lpc32xx_i2c0_device;
29extern struct platform_device lpc32xx_i2c1_device; 29extern struct platform_device lpc32xx_i2c1_device;
30extern struct platform_device lpc32xx_i2c2_device; 30extern struct platform_device lpc32xx_i2c2_device;
31extern struct platform_device lpc32xx_tsc_device; 31extern struct platform_device lpc32xx_tsc_device;
32extern struct platform_device lpc32xx_adc_device;
32extern struct platform_device lpc32xx_rtc_device; 33extern struct platform_device lpc32xx_rtc_device;
33 34
34/* 35/*
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 5d51c102c255..a539f4f72f28 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -252,6 +252,7 @@ static struct platform_device *phy3250_devs[] __initdata = {
252 &lpc32xx_i2c2_device, 252 &lpc32xx_i2c2_device,
253 &lpc32xx_watchdog_device, 253 &lpc32xx_watchdog_device,
254 &lpc32xx_gpio_led_device, 254 &lpc32xx_gpio_led_device,
255 &lpc32xx_adc_device,
255}; 256};
256 257
257static struct amba_device *amba_devs[] __initdata = { 258static struct amba_device *amba_devs[] __initdata = {
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 0cdd41004ad0..a5dcf766a3f9 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -19,6 +19,7 @@
19#include <mach/mv78xx0.h> 19#include <mach/mv78xx0.h>
20#include <mach/bridge-regs.h> 20#include <mach/bridge-regs.h>
21#include <plat/cache-feroceon-l2.h> 21#include <plat/cache-feroceon-l2.h>
22#include <plat/ehci-orion.h>
22#include <plat/orion_nand.h> 23#include <plat/orion_nand.h>
23#include <plat/time.h> 24#include <plat/time.h>
24#include <plat/common.h> 25#include <plat/common.h>
@@ -169,7 +170,7 @@ void __init mv78xx0_map_io(void)
169 ****************************************************************************/ 170 ****************************************************************************/
170void __init mv78xx0_ehci0_init(void) 171void __init mv78xx0_ehci0_init(void)
171{ 172{
172 orion_ehci_init(USB0_PHYS_BASE, IRQ_MV78XX0_USB_0); 173 orion_ehci_init(USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA);
173} 174}
174 175
175 176
diff --git a/arch/arm/mach-mv78xx0/mpp.h b/arch/arm/mach-mv78xx0/mpp.h
index b61b50927123..3752302ae2ee 100644
--- a/arch/arm/mach-mv78xx0/mpp.h
+++ b/arch/arm/mach-mv78xx0/mpp.h
@@ -24,296 +24,296 @@
24#define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1) 24#define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1)
25 25
26#define MPP0_GPIO MPP(0, 0x0, 1, 1, 1) 26#define MPP0_GPIO MPP(0, 0x0, 1, 1, 1)
27#define MPP0_GE0_COL MPP(0, 0x1, 1, 0, 1) 27#define MPP0_GE0_COL MPP(0, 0x1, 0, 0, 1)
28#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 1, 1) 28#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 0, 1)
29#define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1) 29#define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1)
30 30
31#define MPP1_GPIO MPP(1, 0x0, 1, 1, 1) 31#define MPP1_GPIO MPP(1, 0x0, 1, 1, 1)
32#define MPP1_GE0_RXERR MPP(1, 0x1, 1, 0, 1) 32#define MPP1_GE0_RXERR MPP(1, 0x1, 0, 0, 1)
33#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 1, 1) 33#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 0, 1)
34#define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1) 34#define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1)
35 35
36#define MPP2_GPIO MPP(2, 0x0, 1, 1, 1) 36#define MPP2_GPIO MPP(2, 0x0, 1, 1, 1)
37#define MPP2_GE0_CRS MPP(2, 0x1, 1, 0, 1) 37#define MPP2_GE0_CRS MPP(2, 0x1, 0, 0, 1)
38#define MPP2_GE1_RXCTL MPP(2, 0x2, 1, 0, 1) 38#define MPP2_GE1_RXCTL MPP(2, 0x2, 0, 0, 1)
39#define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1) 39#define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1)
40 40
41#define MPP3_GPIO MPP(3, 0x0, 1, 1, 1) 41#define MPP3_GPIO MPP(3, 0x0, 1, 1, 1)
42#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 1, 1) 42#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 0, 1)
43#define MPP3_GE1_RXCLK MPP(3, 0x2, 1, 0, 1) 43#define MPP3_GE1_RXCLK MPP(3, 0x2, 0, 0, 1)
44#define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1) 44#define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1)
45 45
46#define MPP4_GPIO MPP(4, 0x0, 1, 1, 1) 46#define MPP4_GPIO MPP(4, 0x0, 1, 1, 1)
47#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 1, 1) 47#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 0, 1)
48#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 1, 1) 48#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 0, 1)
49#define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1) 49#define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1)
50 50
51#define MPP5_GPIO MPP(5, 0x0, 1, 1, 1) 51#define MPP5_GPIO MPP(5, 0x0, 1, 1, 1)
52#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 1, 1) 52#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 0, 1)
53#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 1, 1) 53#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 0, 1)
54#define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1) 54#define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1)
55 55
56#define MPP6_GPIO MPP(6, 0x0, 1, 1, 1) 56#define MPP6_GPIO MPP(6, 0x0, 1, 1, 1)
57#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 1, 1) 57#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 0, 1)
58#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 1, 1) 58#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 0, 1)
59#define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1) 59#define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1)
60 60
61#define MPP7_GPIO MPP(7, 0x0, 1, 1, 1) 61#define MPP7_GPIO MPP(7, 0x0, 1, 1, 1)
62#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 1, 1) 62#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 0, 1)
63#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 1, 1) 63#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 0, 1)
64#define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1) 64#define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1)
65 65
66#define MPP8_GPIO MPP(8, 0x0, 1, 1, 1) 66#define MPP8_GPIO MPP(8, 0x0, 1, 1, 1)
67#define MPP8_GE0_RXD4 MPP(8, 0x1, 1, 0, 1) 67#define MPP8_GE0_RXD4 MPP(8, 0x1, 0, 0, 1)
68#define MPP8_GE1_RXD0 MPP(8, 0x2, 1, 0, 1) 68#define MPP8_GE1_RXD0 MPP(8, 0x2, 0, 0, 1)
69#define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1) 69#define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1)
70 70
71#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1) 71#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1)
72#define MPP9_GE0_RXD5 MPP(9, 0x1, 1, 0, 1) 72#define MPP9_GE0_RXD5 MPP(9, 0x1, 0, 0, 1)
73#define MPP9_GE1_RXD1 MPP(9, 0x2, 1, 0, 1) 73#define MPP9_GE1_RXD1 MPP(9, 0x2, 0, 0, 1)
74#define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1) 74#define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1)
75 75
76#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1) 76#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1)
77#define MPP10_GE0_RXD6 MPP(10, 0x1, 1, 0, 1) 77#define MPP10_GE0_RXD6 MPP(10, 0x1, 0, 0, 1)
78#define MPP10_GE1_RXD2 MPP(10, 0x2, 1, 0, 1) 78#define MPP10_GE1_RXD2 MPP(10, 0x2, 0, 0, 1)
79#define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1) 79#define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1)
80 80
81#define MPP11_GPIO MPP(11, 0x0, 1, 1, 1) 81#define MPP11_GPIO MPP(11, 0x0, 1, 1, 1)
82#define MPP11_GE0_RXD7 MPP(11, 0x1, 1, 0, 1) 82#define MPP11_GE0_RXD7 MPP(11, 0x1, 0, 0, 1)
83#define MPP11_GE1_RXD3 MPP(11, 0x2, 1, 0, 1) 83#define MPP11_GE1_RXD3 MPP(11, 0x2, 0, 0, 1)
84#define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1) 84#define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1)
85 85
86#define MPP12_GPIO MPP(12, 0x0, 1, 1, 1) 86#define MPP12_GPIO MPP(12, 0x0, 1, 1, 1)
87#define MPP12_M_BB MPP(12, 0x3, 1, 0, 1) 87#define MPP12_M_BB MPP(12, 0x3, 0, 0, 1)
88#define MPP12_UA0_CTSn MPP(12, 0x4, 1, 0, 1) 88#define MPP12_UA0_CTSn MPP(12, 0x4, 0, 0, 1)
89#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 1, 1) 89#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 0, 1)
90#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 1, 1) 90#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 0, 1)
91#define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1) 91#define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1)
92 92
93#define MPP13_GPIO MPP(13, 0x0, 1, 1, 1) 93#define MPP13_GPIO MPP(13, 0x0, 1, 1, 1)
94#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 1, 1) 94#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 0, 1)
95#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 1, 1) 95#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 0, 1)
96#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 1, 1) 96#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 0, 1)
97#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 1, 1) 97#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 0, 1)
98#define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1) 98#define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1)
99 99
100#define MPP14_GPIO MPP(14, 0x0, 1, 1, 1) 100#define MPP14_GPIO MPP(14, 0x0, 1, 1, 1)
101#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 1, 1) 101#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 0, 1)
102#define MPP14_UA1_CTSn MPP(14, 0x4, 1, 0, 1) 102#define MPP14_UA1_CTSn MPP(14, 0x4, 0, 0, 1)
103#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 1, 1) 103#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 0, 1)
104#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 1, 1) 104#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 0, 1)
105#define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1) 105#define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1)
106 106
107#define MPP15_GPIO MPP(15, 0x0, 1, 1, 1) 107#define MPP15_GPIO MPP(15, 0x0, 1, 1, 1)
108#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 1, 1) 108#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 0, 1)
109#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 1, 1) 109#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 0, 1)
110#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 1, 1) 110#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 0, 1)
111#define MPP15_TDM_SMISO MPP(15, 0x6, 1, 0, 1) 111#define MPP15_TDM_SMISO MPP(15, 0x6, 0, 0, 1)
112#define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1) 112#define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1)
113 113
114#define MPP16_GPIO MPP(16, 0x0, 1, 1, 1) 114#define MPP16_GPIO MPP(16, 0x0, 1, 1, 1)
115#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 1, 1) 115#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 0, 1)
116#define MPP16_UA2_TXD MPP(16, 0x4, 0, 1, 1) 116#define MPP16_UA2_TXD MPP(16, 0x4, 0, 0, 1)
117#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 1, 1) 117#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 0, 1)
118#define MPP16_TDM_INTn MPP(16, 0x6, 1, 0, 1) 118#define MPP16_TDM_INTn MPP(16, 0x6, 0, 0, 1)
119#define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1) 119#define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1)
120 120
121 121
122#define MPP17_GPIO MPP(17, 0x0, 1, 1, 1) 122#define MPP17_GPIO MPP(17, 0x0, 1, 1, 1)
123#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 1, 1) 123#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 0, 1)
124#define MPP17_UA2_RXD MPP(17, 0x4, 1, 0, 1) 124#define MPP17_UA2_RXD MPP(17, 0x4, 0, 0, 1)
125#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 1, 1) 125#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 0, 1)
126#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 1, 1) 126#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 0, 1)
127#define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1) 127#define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1)
128 128
129 129
130#define MPP18_GPIO MPP(18, 0x0, 1, 1, 1) 130#define MPP18_GPIO MPP(18, 0x0, 1, 1, 1)
131#define MPP18_UA0_CTSn MPP(18, 0x4, 1, 0, 1) 131#define MPP18_UA0_CTSn MPP(18, 0x4, 0, 0, 1)
132#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 1, 1) 132#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 0, 1)
133#define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1) 133#define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1)
134 134
135 135
136 136
137#define MPP19_GPIO MPP(19, 0x0, 1, 1, 1) 137#define MPP19_GPIO MPP(19, 0x0, 1, 1, 1)
138#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 1, 1) 138#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 0, 1)
139#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 1, 1) 139#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 0, 1)
140#define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1) 140#define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1)
141 141
142 142
143#define MPP20_GPIO MPP(20, 0x0, 1, 1, 1) 143#define MPP20_GPIO MPP(20, 0x0, 1, 1, 1)
144#define MPP20_UA1_CTSs MPP(20, 0x4, 1, 0, 1) 144#define MPP20_UA1_CTSs MPP(20, 0x4, 0, 0, 1)
145#define MPP20_TDM_PCLK MPP(20, 0x6, 1, 1, 0) 145#define MPP20_TDM_PCLK MPP(20, 0x6, 0, 0, 0)
146#define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1) 146#define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1)
147 147
148 148
149 149
150#define MPP21_GPIO MPP(21, 0x0, 1, 1, 1) 150#define MPP21_GPIO MPP(21, 0x0, 1, 1, 1)
151#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 1, 1) 151#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 0, 1)
152#define MPP21_TDM_FSYNC MPP(21, 0x6, 1, 1, 0) 152#define MPP21_TDM_FSYNC MPP(21, 0x6, 0, 0, 0)
153#define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1) 153#define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1)
154 154
155 155
156 156
157#define MPP22_GPIO MPP(22, 0x0, 1, 1, 1) 157#define MPP22_GPIO MPP(22, 0x0, 1, 1, 1)
158#define MPP22_UA3_TDX MPP(22, 0x4, 0, 1, 1) 158#define MPP22_UA3_TDX MPP(22, 0x4, 0, 0, 1)
159#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 1, 1) 159#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 0, 1)
160#define MPP22_TDM_DRX MPP(22, 0x6, 1, 0, 1) 160#define MPP22_TDM_DRX MPP(22, 0x6, 0, 0, 1)
161#define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1) 161#define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1)
162 162
163 163
164 164
165#define MPP23_GPIO MPP(23, 0x0, 1, 1, 1) 165#define MPP23_GPIO MPP(23, 0x0, 1, 1, 1)
166#define MPP23_UA3_RDX MPP(23, 0x4, 1, 0, 1) 166#define MPP23_UA3_RDX MPP(23, 0x4, 0, 0, 1)
167#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 1, 1) 167#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 0, 1)
168#define MPP23_TDM_DTX MPP(23, 0x6, 0, 1, 1) 168#define MPP23_TDM_DTX MPP(23, 0x6, 0, 0, 1)
169#define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1) 169#define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1)
170 170
171 171
172#define MPP24_GPIO MPP(24, 0x0, 1, 1, 1) 172#define MPP24_GPIO MPP(24, 0x0, 1, 1, 1)
173#define MPP24_UA2_TXD MPP(24, 0x4, 0, 1, 1) 173#define MPP24_UA2_TXD MPP(24, 0x4, 0, 0, 1)
174#define MPP24_TDM_INTn MPP(24, 0x6, 1, 0, 1) 174#define MPP24_TDM_INTn MPP(24, 0x6, 0, 0, 1)
175#define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1) 175#define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1)
176 176
177 177
178#define MPP25_GPIO MPP(25, 0x0, 1, 1, 1) 178#define MPP25_GPIO MPP(25, 0x0, 1, 1, 1)
179#define MPP25_UA2_RXD MPP(25, 0x4, 1, 0, 1) 179#define MPP25_UA2_RXD MPP(25, 0x4, 0, 0, 1)
180#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 1, 1) 180#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 0, 1)
181#define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1) 181#define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1)
182 182
183 183
184#define MPP26_GPIO MPP(26, 0x0, 1, 1, 1) 184#define MPP26_GPIO MPP(26, 0x0, 1, 1, 1)
185#define MPP26_UA2_CTSn MPP(26, 0x4, 1, 0, 1) 185#define MPP26_UA2_CTSn MPP(26, 0x4, 0, 0, 1)
186#define MPP26_TDM_PCLK MPP(26, 0x6, 1, 1, 1) 186#define MPP26_TDM_PCLK MPP(26, 0x6, 0, 0, 1)
187#define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1) 187#define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1)
188 188
189 189
190#define MPP27_GPIO MPP(27, 0x0, 1, 1, 1) 190#define MPP27_GPIO MPP(27, 0x0, 1, 1, 1)
191#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 1, 1) 191#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 0, 1)
192#define MPP27_TDM_FSYNC MPP(27, 0x6, 1, 1, 1) 192#define MPP27_TDM_FSYNC MPP(27, 0x6, 0, 0, 1)
193#define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1) 193#define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1)
194 194
195 195
196#define MPP28_GPIO MPP(28, 0x0, 1, 1, 1) 196#define MPP28_GPIO MPP(28, 0x0, 1, 1, 1)
197#define MPP28_UA3_TXD MPP(28, 0x4, 0, 1, 1) 197#define MPP28_UA3_TXD MPP(28, 0x4, 0, 0, 1)
198#define MPP28_TDM_DRX MPP(28, 0x6, 1, 0, 1) 198#define MPP28_TDM_DRX MPP(28, 0x6, 0, 0, 1)
199#define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1) 199#define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1)
200 200
201#define MPP29_GPIO MPP(29, 0x0, 1, 1, 1) 201#define MPP29_GPIO MPP(29, 0x0, 1, 1, 1)
202#define MPP29_UA3_RXD MPP(29, 0x4, 1, 0, 1) 202#define MPP29_UA3_RXD MPP(29, 0x4, 0, 0, 1)
203#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 1, 1) 203#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 0, 1)
204#define MPP29_TDM_DTX MPP(29, 0x6, 0, 1, 1) 204#define MPP29_TDM_DTX MPP(29, 0x6, 0, 0, 1)
205#define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1) 205#define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1)
206 206
207#define MPP30_GPIO MPP(30, 0x0, 1, 1, 1) 207#define MPP30_GPIO MPP(30, 0x0, 1, 1, 1)
208#define MPP30_UA3_CTSn MPP(30, 0x4, 1, 0, 1) 208#define MPP30_UA3_CTSn MPP(30, 0x4, 0, 0, 1)
209#define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1) 209#define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1)
210 210
211#define MPP31_GPIO MPP(31, 0x0, 1, 1, 1) 211#define MPP31_GPIO MPP(31, 0x0, 1, 1, 1)
212#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 1, 1) 212#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 0, 1)
213#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 1, 1) 213#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 0, 1)
214#define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1) 214#define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1)
215 215
216 216
217#define MPP32_GPIO MPP(32, 0x1, 1, 1, 1) 217#define MPP32_GPIO MPP(32, 0x1, 1, 1, 1)
218#define MPP32_UA3_TDX MPP(32, 0x4, 0, 1, 1) 218#define MPP32_UA3_TDX MPP(32, 0x4, 0, 0, 1)
219#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 1, 1) 219#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 0, 1)
220#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 1, 1) 220#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 0, 1)
221#define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1) 221#define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1)
222 222
223 223
224#define MPP33_GPIO MPP(33, 0x1, 1, 1, 1) 224#define MPP33_GPIO MPP(33, 0x1, 1, 1, 1)
225#define MPP33_UA3_RDX MPP(33, 0x4, 1, 0, 1) 225#define MPP33_UA3_RDX MPP(33, 0x4, 0, 0, 1)
226#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 1, 1) 226#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 0, 1)
227#define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1) 227#define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1)
228 228
229 229
230 230
231#define MPP34_GPIO MPP(34, 0x1, 1, 1, 1) 231#define MPP34_GPIO MPP(34, 0x1, 1, 1, 1)
232#define MPP34_UA2_TDX MPP(34, 0x4, 0, 1, 1) 232#define MPP34_UA2_TDX MPP(34, 0x4, 0, 0, 1)
233#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 1, 1) 233#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 0, 1)
234#define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1) 234#define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1)
235 235
236 236
237 237
238#define MPP35_GPIO MPP(35, 0x1, 1, 1, 1) 238#define MPP35_GPIO MPP(35, 0x1, 1, 1, 1)
239#define MPP35_UA2_RDX MPP(35, 0x4, 1, 0, 1) 239#define MPP35_UA2_RDX MPP(35, 0x4, 0, 0, 1)
240#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 1, 1) 240#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 0, 1)
241#define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1) 241#define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1)
242 242
243#define MPP36_GPIO MPP(36, 0x1, 1, 1, 1) 243#define MPP36_GPIO MPP(36, 0x1, 1, 1, 1)
244#define MPP36_UA0_CTSn MPP(36, 0x2, 1, 0, 1) 244#define MPP36_UA0_CTSn MPP(36, 0x2, 0, 0, 1)
245#define MPP36_UA2_TDX MPP(36, 0x4, 0, 1, 1) 245#define MPP36_UA2_TDX MPP(36, 0x4, 0, 0, 1)
246#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 1, 1) 246#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 0, 1)
247#define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1) 247#define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1)
248 248
249 249
250#define MPP37_GPIO MPP(37, 0x1, 1, 1, 1) 250#define MPP37_GPIO MPP(37, 0x1, 1, 1, 1)
251#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 1, 1) 251#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 0, 1)
252#define MPP37_UA2_RXD MPP(37, 0x4, 1, 0, 1) 252#define MPP37_UA2_RXD MPP(37, 0x4, 0, 0, 1)
253#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 1, 1) 253#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 0, 1)
254#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 1, 1) 254#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 0, 1)
255#define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1) 255#define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1)
256 256
257 257
258 258
259 259
260#define MPP38_GPIO MPP(38, 0x1, 1, 1, 1) 260#define MPP38_GPIO MPP(38, 0x1, 1, 1, 1)
261#define MPP38_UA1_CTSn MPP(38, 0x2, 1, 0, 1) 261#define MPP38_UA1_CTSn MPP(38, 0x2, 0, 0, 1)
262#define MPP38_UA3_TXD MPP(38, 0x4, 0, 1, 1) 262#define MPP38_UA3_TXD MPP(38, 0x4, 0, 0, 1)
263#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 1, 1) 263#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 0, 1)
264#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 1, 1) 264#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 0, 1)
265#define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1) 265#define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1)
266 266
267 267
268 268
269 269
270#define MPP39_GPIO MPP(39, 0x1, 1, 1, 1) 270#define MPP39_GPIO MPP(39, 0x1, 1, 1, 1)
271#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 1, 1) 271#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 0, 1)
272#define MPP39_UA3_RXD MPP(39, 0x4, 1, 0, 1) 272#define MPP39_UA3_RXD MPP(39, 0x4, 0, 0, 1)
273#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 1, 1) 273#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 0, 1)
274#define MPP39_TDM_SMISO MPP(39, 0x6, 1, 0, 1) 274#define MPP39_TDM_SMISO MPP(39, 0x6, 0, 0, 1)
275#define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1) 275#define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1)
276 276
277 277
278 278
279#define MPP40_GPIO MPP(40, 0x1, 1, 1, 1) 279#define MPP40_GPIO MPP(40, 0x1, 1, 1, 1)
280#define MPP40_TDM_INTn MPP(40, 0x6, 1, 0, 1) 280#define MPP40_TDM_INTn MPP(40, 0x6, 0, 0, 1)
281#define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1) 281#define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1)
282 282
283 283
284 284
285#define MPP41_GPIO MPP(41, 0x1, 1, 1, 1) 285#define MPP41_GPIO MPP(41, 0x1, 1, 1, 1)
286#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 1, 1) 286#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 0, 1)
287#define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1) 287#define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1)
288 288
289 289
290 290
291#define MPP42_GPIO MPP(42, 0x1, 1, 1, 1) 291#define MPP42_GPIO MPP(42, 0x1, 1, 1, 1)
292#define MPP42_TDM_PCLK MPP(42, 0x6, 1, 1, 1) 292#define MPP42_TDM_PCLK MPP(42, 0x6, 0, 0, 1)
293#define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1) 293#define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1)
294 294
295 295
296 296
297#define MPP43_GPIO MPP(43, 0x1, 1, 1, 1) 297#define MPP43_GPIO MPP(43, 0x1, 1, 1, 1)
298#define MPP43_TDM_FSYNC MPP(43, 0x6, 1, 1, 1) 298#define MPP43_TDM_FSYNC MPP(43, 0x6, 0, 0, 1)
299#define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1) 299#define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1)
300 300
301 301
302 302
303#define MPP44_GPIO MPP(44, 0x1, 1, 1, 1) 303#define MPP44_GPIO MPP(44, 0x1, 1, 1, 1)
304#define MPP44_TDM_DRX MPP(44, 0x6, 1, 0, 1) 304#define MPP44_TDM_DRX MPP(44, 0x6, 0, 0, 1)
305#define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1) 305#define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1)
306 306
307 307
308 308
309#define MPP45_GPIO MPP(45, 0x1, 1, 1, 1) 309#define MPP45_GPIO MPP(45, 0x1, 1, 1, 1)
310#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 1, 1) 310#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 0, 1)
311#define MPP45_TDM_DRX MPP(45, 0x6, 0, 1, 1) 311#define MPP45_TDM_DRX MPP(45, 0x6, 0, 0, 1)
312#define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1) 312#define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1)
313 313
314 314
315#define MPP46_GPIO MPP(46, 0x1, 1, 1, 1) 315#define MPP46_GPIO MPP(46, 0x1, 1, 1, 1)
316#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 1, 1) 316#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 0, 1)
317#define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1) 317#define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1)
318 318
319 319
@@ -323,14 +323,14 @@
323 323
324 324
325#define MPP48_GPIO MPP(48, 0x1, 1, 1, 1) 325#define MPP48_GPIO MPP(48, 0x1, 1, 1, 1)
326#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 1, 1) 326#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 0, 1)
327#define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1) 327#define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1)
328 328
329 329
330 330
331#define MPP49_GPIO MPP(49, 0x1, 1, 1, 1) 331#define MPP49_GPIO MPP(49, 0x1, 1, 1, 1)
332#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 1, 1) 332#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 0, 1)
333#define MPP49_M_BB MPP(49, 0x4, 1, 0, 1) 333#define MPP49_M_BB MPP(49, 0x4, 0, 0, 1)
334#define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1) 334#define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1)
335 335
336 336
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 41e6612ecbaf..d965da45160e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -213,13 +213,12 @@ config MACH_OMAP3_PANDORA
213 depends on ARCH_OMAP3 213 depends on ARCH_OMAP3
214 default y 214 default y
215 select OMAP_PACKAGE_CBB 215 select OMAP_PACKAGE_CBB
216 select REGULATOR_FIXED_VOLTAGE 216 select REGULATOR_FIXED_VOLTAGE if REGULATOR
217 217
218config MACH_OMAP3_TOUCHBOOK 218config MACH_OMAP3_TOUCHBOOK
219 bool "OMAP3 Touch Book" 219 bool "OMAP3 Touch Book"
220 depends on ARCH_OMAP3 220 depends on ARCH_OMAP3
221 default y 221 default y
222 select BACKLIGHT_CLASS_DEVICE
223 222
224config MACH_OMAP_3430SDP 223config MACH_OMAP_3430SDP
225 bool "OMAP 3430 SDP board" 224 bool "OMAP 3430 SDP board"
@@ -265,7 +264,7 @@ config MACH_OMAP_ZOOM2
265 select SERIAL_8250 264 select SERIAL_8250
266 select SERIAL_CORE_CONSOLE 265 select SERIAL_CORE_CONSOLE
267 select SERIAL_8250_CONSOLE 266 select SERIAL_8250_CONSOLE
268 select REGULATOR_FIXED_VOLTAGE 267 select REGULATOR_FIXED_VOLTAGE if REGULATOR
269 268
270config MACH_OMAP_ZOOM3 269config MACH_OMAP_ZOOM3
271 bool "OMAP3630 Zoom3 board" 270 bool "OMAP3630 Zoom3 board"
@@ -275,7 +274,7 @@ config MACH_OMAP_ZOOM3
275 select SERIAL_8250 274 select SERIAL_8250
276 select SERIAL_CORE_CONSOLE 275 select SERIAL_CORE_CONSOLE
277 select SERIAL_8250_CONSOLE 276 select SERIAL_8250_CONSOLE
278 select REGULATOR_FIXED_VOLTAGE 277 select REGULATOR_FIXED_VOLTAGE if REGULATOR
279 278
280config MACH_CM_T35 279config MACH_CM_T35
281 bool "CompuLab CM-T35/CM-T3730 modules" 280 bool "CompuLab CM-T35/CM-T3730 modules"
@@ -334,7 +333,7 @@ config MACH_OMAP_4430SDP
334 depends on ARCH_OMAP4 333 depends on ARCH_OMAP4
335 select OMAP_PACKAGE_CBL 334 select OMAP_PACKAGE_CBL
336 select OMAP_PACKAGE_CBS 335 select OMAP_PACKAGE_CBS
337 select REGULATOR_FIXED_VOLTAGE 336 select REGULATOR_FIXED_VOLTAGE if REGULATOR
338 337
339config MACH_OMAP4_PANDA 338config MACH_OMAP4_PANDA
340 bool "OMAP4 Panda Board" 339 bool "OMAP4 Panda Board"
@@ -342,7 +341,7 @@ config MACH_OMAP4_PANDA
342 depends on ARCH_OMAP4 341 depends on ARCH_OMAP4
343 select OMAP_PACKAGE_CBL 342 select OMAP_PACKAGE_CBL
344 select OMAP_PACKAGE_CBS 343 select OMAP_PACKAGE_CBS
345 select REGULATOR_FIXED_VOLTAGE 344 select REGULATOR_FIXED_VOLTAGE if REGULATOR
346 345
347config OMAP3_EMU 346config OMAP3_EMU
348 bool "OMAP3 debugging peripherals" 347 bool "OMAP3 debugging peripherals"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index fc9b238cbc19..bd76394ccaf8 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -11,9 +11,9 @@ hwmod-common = omap_hwmod.o \
11 omap_hwmod_common_data.o 11 omap_hwmod_common_data.o
12clock-common = clock.o clock_common_data.o \ 12clock-common = clock.o clock_common_data.o \
13 clkt_dpll.o clkt_clksel.o 13 clkt_dpll.o clkt_clksel.o
14secure-common = omap-smc.o omap-secure.o 14secure-common = omap-smc.o omap-secure.o
15 15
16obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 16obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
17obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 17obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
18obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) 18obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
19 19
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 39fba9df17fb..4e9071589bfb 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -52,8 +52,9 @@
52#define ETH_KS8851_QUART 138 52#define ETH_KS8851_QUART 138
53#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 53#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
54#define OMAP4_SFH7741_ENABLE_GPIO 188 54#define OMAP4_SFH7741_ENABLE_GPIO 188
55#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ 55#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
56#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ 56#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
57#define HDMI_GPIO_HPD 63 /* Hotplug detect */
57#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */ 58#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */
58#define DLP_POWER_ON_GPIO 40 59#define DLP_POWER_ON_GPIO 40
59 60
@@ -603,8 +604,9 @@ static void __init omap_sfh7741prox_init(void)
603} 604}
604 605
605static struct gpio sdp4430_hdmi_gpios[] = { 606static struct gpio sdp4430_hdmi_gpios[] = {
606 { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, 607 { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
607 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, 608 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
609 { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
608}; 610};
609 611
610static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) 612static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -621,8 +623,7 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
621 623
622static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev) 624static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
623{ 625{
624 gpio_free(HDMI_GPIO_LS_OE); 626 gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
625 gpio_free(HDMI_GPIO_HPD);
626} 627}
627 628
628static struct nokia_dsi_panel_data dsi1_panel = { 629static struct nokia_dsi_panel_data dsi1_panel = {
@@ -738,6 +739,10 @@ static void sdp4430_lcd_init(void)
738 pr_err("%s: Could not get lcd2_reset_gpio\n", __func__); 739 pr_err("%s: Could not get lcd2_reset_gpio\n", __func__);
739} 740}
740 741
742static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
743 .hpd_gpio = HDMI_GPIO_HPD,
744};
745
741static struct omap_dss_device sdp4430_hdmi_device = { 746static struct omap_dss_device sdp4430_hdmi_device = {
742 .name = "hdmi", 747 .name = "hdmi",
743 .driver_name = "hdmi_panel", 748 .driver_name = "hdmi_panel",
@@ -745,6 +750,7 @@ static struct omap_dss_device sdp4430_hdmi_device = {
745 .platform_enable = sdp4430_panel_enable_hdmi, 750 .platform_enable = sdp4430_panel_enable_hdmi,
746 .platform_disable = sdp4430_panel_disable_hdmi, 751 .platform_disable = sdp4430_panel_disable_hdmi,
747 .channel = OMAP_DSS_CHANNEL_DIGIT, 752 .channel = OMAP_DSS_CHANNEL_DIGIT,
753 .data = &sdp4430_hdmi_data,
748}; 754};
749 755
750static struct picodlp_panel_data sdp4430_picodlp_pdata = { 756static struct picodlp_panel_data sdp4430_picodlp_pdata = {
@@ -808,7 +814,7 @@ static struct omap_dss_board_info sdp4430_dss_data = {
808 .default_device = &sdp4430_lcd_device, 814 .default_device = &sdp4430_lcd_device,
809}; 815};
810 816
811static void omap_4430sdp_display_init(void) 817static void __init omap_4430sdp_display_init(void)
812{ 818{
813 int r; 819 int r;
814 820
@@ -829,6 +835,10 @@ static void omap_4430sdp_display_init(void)
829 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); 835 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
830 else 836 else
831 omap_hdmi_init(0); 837 omap_hdmi_init(0);
838
839 omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
840 omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
841 omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
832} 842}
833 843
834#ifdef CONFIG_OMAP_MUX 844#ifdef CONFIG_OMAP_MUX
@@ -841,7 +851,7 @@ static struct omap_board_mux board_mux[] __initdata = {
841#define board_mux NULL 851#define board_mux NULL
842 #endif 852 #endif
843 853
844static void omap4_sdp4430_wifi_mux_init(void) 854static void __init omap4_sdp4430_wifi_mux_init(void)
845{ 855{
846 omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT | 856 omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT |
847 OMAP_PIN_OFF_WAKEUPENABLE); 857 OMAP_PIN_OFF_WAKEUPENABLE);
@@ -868,12 +878,17 @@ static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = {
868 .board_tcxo_clock = WL12XX_TCXOCLOCK_26, 878 .board_tcxo_clock = WL12XX_TCXOCLOCK_26,
869}; 879};
870 880
871static void omap4_sdp4430_wifi_init(void) 881static void __init omap4_sdp4430_wifi_init(void)
872{ 882{
883 int ret;
884
873 omap4_sdp4430_wifi_mux_init(); 885 omap4_sdp4430_wifi_mux_init();
874 if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data)) 886 ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
875 pr_err("Error setting wl12xx data\n"); 887 if (ret)
876 platform_device_register(&omap_vwlan_device); 888 pr_err("Error setting wl12xx data: %d\n", ret);
889 ret = platform_device_register(&omap_vwlan_device);
890 if (ret)
891 pr_err("Error registering wl12xx device: %d\n", ret);
877} 892}
878 893
879static void __init omap_4430sdp_init(void) 894static void __init omap_4430sdp_init(void)
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index e921e3be24a4..d73316ed4207 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -437,7 +437,7 @@ static struct usbhs_omap_board_data usbhs_bdata __initdata = {
437 .reset_gpio_port[2] = -EINVAL 437 .reset_gpio_port[2] = -EINVAL
438}; 438};
439 439
440static void cm_t35_init_usbh(void) 440static void __init cm_t35_init_usbh(void)
441{ 441{
442 int err; 442 int err;
443 443
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index d58756060483..ad497620539b 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -17,6 +17,7 @@
17#include <linux/i2c/twl.h> 17#include <linux/i2c/twl.h>
18 18
19#include <mach/hardware.h> 19#include <mach/hardware.h>
20#include <asm/hardware/gic.h>
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
21 22
22#include <plat/board.h> 23#include <plat/board.h>
@@ -102,6 +103,7 @@ DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)")
102 .map_io = omap242x_map_io, 103 .map_io = omap242x_map_io,
103 .init_early = omap2420_init_early, 104 .init_early = omap2420_init_early,
104 .init_irq = omap2_init_irq, 105 .init_irq = omap2_init_irq,
106 .handle_irq = omap2_intc_handle_irq,
105 .init_machine = omap_generic_init, 107 .init_machine = omap_generic_init,
106 .timer = &omap2_timer, 108 .timer = &omap2_timer,
107 .dt_compat = omap242x_boards_compat, 109 .dt_compat = omap242x_boards_compat,
@@ -141,6 +143,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
141 .map_io = omap3_map_io, 143 .map_io = omap3_map_io,
142 .init_early = omap3430_init_early, 144 .init_early = omap3430_init_early,
143 .init_irq = omap3_init_irq, 145 .init_irq = omap3_init_irq,
146 .handle_irq = omap3_intc_handle_irq,
144 .init_machine = omap3_init, 147 .init_machine = omap3_init,
145 .timer = &omap3_timer, 148 .timer = &omap3_timer,
146 .dt_compat = omap3_boards_compat, 149 .dt_compat = omap3_boards_compat,
@@ -160,6 +163,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
160 .map_io = omap4_map_io, 163 .map_io = omap4_map_io,
161 .init_early = omap4430_init_early, 164 .init_early = omap4430_init_early,
162 .init_irq = gic_init_irq, 165 .init_irq = gic_init_irq,
166 .handle_irq = gic_handle_irq,
163 .init_machine = omap4_init, 167 .init_machine = omap4_init,
164 .timer = &omap4_timer, 168 .timer = &omap4_timer,
165 .dt_compat = omap4_boards_compat, 169 .dt_compat = omap4_boards_compat,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 003fe34c9343..c775bead1497 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -617,6 +617,21 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
617 { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" }, 617 { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
618}; 618};
619 619
620static void __init omap3_evm_wl12xx_init(void)
621{
622#ifdef CONFIG_WL12XX_PLATFORM_DATA
623 int ret;
624
625 /* WL12xx WLAN Init */
626 ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
627 if (ret)
628 pr_err("error setting wl12xx data: %d\n", ret);
629 ret = platform_device_register(&omap3evm_wlan_regulator);
630 if (ret)
631 pr_err("error registering wl12xx device: %d\n", ret);
632#endif
633}
634
620static void __init omap3_evm_init(void) 635static void __init omap3_evm_init(void)
621{ 636{
622 omap3_evm_get_revision(); 637 omap3_evm_get_revision();
@@ -665,13 +680,7 @@ static void __init omap3_evm_init(void)
665 omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL); 680 omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
666 omap3evm_init_smsc911x(); 681 omap3evm_init_smsc911x();
667 omap3_evm_display_init(); 682 omap3_evm_display_init();
668 683 omap3_evm_wl12xx_init();
669#ifdef CONFIG_WL12XX_PLATFORM_DATA
670 /* WL12xx WLAN Init */
671 if (wl12xx_set_platform_data(&omap3evm_wlan_data))
672 pr_err("error setting wl12xx data\n");
673 platform_device_register(&omap3evm_wlan_regulator);
674#endif
675} 684}
676 685
677MACHINE_START(OMAP3EVM, "OMAP3 EVM") 686MACHINE_START(OMAP3EVM, "OMAP3 EVM")
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 30ad40db2cf3..28fc271f7031 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -51,8 +51,9 @@
51#define GPIO_HUB_NRESET 62 51#define GPIO_HUB_NRESET 62
52#define GPIO_WIFI_PMENA 43 52#define GPIO_WIFI_PMENA 43
53#define GPIO_WIFI_IRQ 53 53#define GPIO_WIFI_IRQ 53
54#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ 54#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
55#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ 55#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
56#define HDMI_GPIO_HPD 63 /* Hotplug detect */
56 57
57/* wl127x BT, FM, GPS connectivity chip */ 58/* wl127x BT, FM, GPS connectivity chip */
58static int wl1271_gpios[] = {46, -1, -1}; 59static int wl1271_gpios[] = {46, -1, -1};
@@ -413,8 +414,9 @@ int __init omap4_panda_dvi_init(void)
413} 414}
414 415
415static struct gpio panda_hdmi_gpios[] = { 416static struct gpio panda_hdmi_gpios[] = {
416 { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, 417 { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
417 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, 418 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
419 { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
418}; 420};
419 421
420static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) 422static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -431,10 +433,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
431 433
432static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev) 434static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
433{ 435{
434 gpio_free(HDMI_GPIO_LS_OE); 436 gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
435 gpio_free(HDMI_GPIO_HPD);
436} 437}
437 438
439static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
440 .hpd_gpio = HDMI_GPIO_HPD,
441};
442
438static struct omap_dss_device omap4_panda_hdmi_device = { 443static struct omap_dss_device omap4_panda_hdmi_device = {
439 .name = "hdmi", 444 .name = "hdmi",
440 .driver_name = "hdmi_panel", 445 .driver_name = "hdmi_panel",
@@ -442,6 +447,7 @@ static struct omap_dss_device omap4_panda_hdmi_device = {
442 .platform_enable = omap4_panda_panel_enable_hdmi, 447 .platform_enable = omap4_panda_panel_enable_hdmi,
443 .platform_disable = omap4_panda_panel_disable_hdmi, 448 .platform_disable = omap4_panda_panel_disable_hdmi,
444 .channel = OMAP_DSS_CHANNEL_DIGIT, 449 .channel = OMAP_DSS_CHANNEL_DIGIT,
450 .data = &omap4_panda_hdmi_data,
445}; 451};
446 452
447static struct omap_dss_device *omap4_panda_dss_devices[] = { 453static struct omap_dss_device *omap4_panda_dss_devices[] = {
@@ -473,18 +479,24 @@ void omap4_panda_display_init(void)
473 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); 479 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
474 else 480 else
475 omap_hdmi_init(0); 481 omap_hdmi_init(0);
482
483 omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
484 omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
485 omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
476} 486}
477 487
478static void __init omap4_panda_init(void) 488static void __init omap4_panda_init(void)
479{ 489{
480 int package = OMAP_PACKAGE_CBS; 490 int package = OMAP_PACKAGE_CBS;
491 int ret;
481 492
482 if (omap_rev() == OMAP4430_REV_ES1_0) 493 if (omap_rev() == OMAP4430_REV_ES1_0)
483 package = OMAP_PACKAGE_CBL; 494 package = OMAP_PACKAGE_CBL;
484 omap4_mux_init(board_mux, NULL, package); 495 omap4_mux_init(board_mux, NULL, package);
485 496
486 if (wl12xx_set_platform_data(&omap_panda_wlan_data)) 497 ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
487 pr_err("error setting wl12xx data\n"); 498 if (ret)
499 pr_err("error setting wl12xx data: %d\n", ret);
488 500
489 omap4_panda_i2c_init(); 501 omap4_panda_i2c_init();
490 platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); 502 platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 8d7ce11cfeaf..c126461836ac 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -296,8 +296,10 @@ static void enable_board_wakeup_source(void)
296 296
297void __init zoom_peripherals_init(void) 297void __init zoom_peripherals_init(void)
298{ 298{
299 if (wl12xx_set_platform_data(&omap_zoom_wlan_data)) 299 int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
300 pr_err("error setting wl12xx data\n"); 300
301 if (ret)
302 pr_err("error setting wl12xx data: %d\n", ret);
301 303
302 omap_i2c_init(); 304 omap_i2c_init();
303 platform_device_register(&omap_vwlan_device); 305 platform_device_register(&omap_vwlan_device);
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 0b510ad01a00..283d11eae693 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -405,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
405 break; 405 break;
406 default: 406 default:
407 pr_err("Invalid McSPI Revision value\n"); 407 pr_err("Invalid McSPI Revision value\n");
408 kfree(pdata);
408 return -EINVAL; 409 return -EINVAL;
409 } 410 }
410 411
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 3c446d1a1781..3677b1f58b85 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -103,12 +103,8 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
103 u32 reg; 103 u32 reg;
104 u16 control_i2c_1; 104 u16 control_i2c_1;
105 105
106 /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
107 omap_mux_init_signal("hdmi_hpd",
108 OMAP_PIN_INPUT_PULLUP);
109 omap_mux_init_signal("hdmi_cec", 106 omap_mux_init_signal("hdmi_cec",
110 OMAP_PIN_INPUT_PULLUP); 107 OMAP_PIN_INPUT_PULLUP);
111 /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
112 omap_mux_init_signal("hdmi_ddc_scl", 108 omap_mux_init_signal("hdmi_ddc_scl",
113 OMAP_PIN_INPUT_PULLUP); 109 OMAP_PIN_INPUT_PULLUP);
114 omap_mux_init_signal("hdmi_ddc_sda", 110 omap_mux_init_signal("hdmi_ddc_sda",
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 130034bf01d5..dfffbbf4c009 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
528 528
529 case GPMC_CONFIG_DEV_SIZE: 529 case GPMC_CONFIG_DEV_SIZE:
530 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); 530 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
531
532 /* clear 2 target bits */
533 regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
534
535 /* set the proper value */
531 regval |= GPMC_CONFIG1_DEVICESIZE(wval); 536 regval |= GPMC_CONFIG1_DEVICESIZE(wval);
537
532 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval); 538 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
533 break; 539 break;
534 540
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index bd844af13af5..b40c28895298 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
175{ 175{
176 u32 reg; 176 u32 reg;
177 177
178 if (mmc->slots[0].internal_clock) { 178 reg = omap_ctrl_readl(control_devconf1_offset);
179 reg = omap_ctrl_readl(control_devconf1_offset); 179 if (mmc->slots[0].internal_clock)
180 reg |= OMAP2_MMCSDIO2ADPCLKISEL; 180 reg |= OMAP2_MMCSDIO2ADPCLKISEL;
181 omap_ctrl_writel(reg, control_devconf1_offset); 181 else
182 } 182 reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
183 omap_ctrl_writel(reg, control_devconf1_offset);
183} 184}
184 185
185static void hsmmc23_before_set_reg(struct device *dev, int slot, 186static void hsmmc2_before_set_reg(struct device *dev, int slot,
186 int power_on, int vdd) 187 int power_on, int vdd)
187{ 188{
188 struct omap_mmc_platform_data *mmc = dev->platform_data; 189 struct omap_mmc_platform_data *mmc = dev->platform_data;
@@ -292,8 +293,8 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
292 } 293 }
293} 294}
294 295
295static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, 296static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
296 struct omap_mmc_platform_data *mmc) 297 struct omap_mmc_platform_data *mmc)
297{ 298{
298 char *hc_name; 299 char *hc_name;
299 300
@@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
407 c->caps &= ~MMC_CAP_8_BIT_DATA; 408 c->caps &= ~MMC_CAP_8_BIT_DATA;
408 c->caps |= MMC_CAP_4_BIT_DATA; 409 c->caps |= MMC_CAP_4_BIT_DATA;
409 } 410 }
410 /* FALLTHROUGH */
411 case 3:
412 if (mmc->slots[0].features & HSMMC_HAS_PBIAS) { 411 if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
413 /* off-chip level shifting, or none */ 412 /* off-chip level shifting, or none */
414 mmc->slots[0].before_set_reg = hsmmc23_before_set_reg; 413 mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
415 mmc->slots[0].after_set_reg = NULL; 414 mmc->slots[0].after_set_reg = NULL;
416 } 415 }
417 break; 416 break;
417 case 3:
418 case 4: 418 case 4:
419 case 5: 419 case 5:
420 mmc->slots[0].before_set_reg = NULL; 420 mmc->slots[0].before_set_reg = NULL;
@@ -430,7 +430,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
430 430
431#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16 431#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
432 432
433void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) 433void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
434{ 434{
435 struct omap_hwmod *oh; 435 struct omap_hwmod *oh;
436 struct platform_device *pdev; 436 struct platform_device *pdev;
@@ -487,7 +487,7 @@ done:
487 kfree(mmc_data); 487 kfree(mmc_data);
488} 488}
489 489
490void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) 490void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
491{ 491{
492 u32 reg; 492 u32 reg;
493 493
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3f174d51f67f..eb50c29fb644 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void)
388 omap_pm_if_early_init(); 388 omap_pm_if_early_init();
389} 389}
390 390
391#ifdef CONFIG_ARCH_OMAP2 391#ifdef CONFIG_SOC_OMAP2420
392void __init omap2420_init_early(void) 392void __init omap2420_init_early(void)
393{ 393{
394 omap2_set_globals_242x(); 394 omap2_set_globals_242x();
@@ -400,7 +400,9 @@ void __init omap2420_init_early(void)
400 omap_hwmod_init_postsetup(); 400 omap_hwmod_init_postsetup();
401 omap2420_clk_init(); 401 omap2420_clk_init();
402} 402}
403#endif
403 404
405#ifdef CONFIG_SOC_OMAP2430
404void __init omap2430_init_early(void) 406void __init omap2430_init_early(void)
405{ 407{
406 omap2_set_globals_243x(); 408 omap2_set_globals_243x();
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index e1cc75d1a57a..fb8bc9fa43b1 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -100,8 +100,8 @@ void omap_mux_write_array(struct omap_mux_partition *partition,
100 100
101static char *omap_mux_options; 101static char *omap_mux_options;
102 102
103static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition, 103static int _omap_mux_init_gpio(struct omap_mux_partition *partition,
104 int gpio, int val) 104 int gpio, int val)
105{ 105{
106 struct omap_mux_entry *e; 106 struct omap_mux_entry *e;
107 struct omap_mux *gpio_mux = NULL; 107 struct omap_mux *gpio_mux = NULL;
@@ -145,7 +145,7 @@ static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
145 return 0; 145 return 0;
146} 146}
147 147
148int __init omap_mux_init_gpio(int gpio, int val) 148int omap_mux_init_gpio(int gpio, int val)
149{ 149{
150 struct omap_mux_partition *partition; 150 struct omap_mux_partition *partition;
151 int ret; 151 int ret;
@@ -159,9 +159,9 @@ int __init omap_mux_init_gpio(int gpio, int val)
159 return -ENODEV; 159 return -ENODEV;
160} 160}
161 161
162static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, 162static int _omap_mux_get_by_name(struct omap_mux_partition *partition,
163 const char *muxname, 163 const char *muxname,
164 struct omap_mux **found_mux) 164 struct omap_mux **found_mux)
165{ 165{
166 struct omap_mux *mux = NULL; 166 struct omap_mux *mux = NULL;
167 struct omap_mux_entry *e; 167 struct omap_mux_entry *e;
@@ -240,7 +240,7 @@ omap_mux_get_by_name(const char *muxname,
240 return -ENODEV; 240 return -ENODEV;
241} 241}
242 242
243int __init omap_mux_init_signal(const char *muxname, int val) 243int omap_mux_init_signal(const char *muxname, int val)
244{ 244{
245 struct omap_mux_partition *partition = NULL; 245 struct omap_mux_partition *partition = NULL;
246 struct omap_mux *mux = NULL; 246 struct omap_mux *mux = NULL;
@@ -1094,8 +1094,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
1094 omap_mux_package_init_balls(package_balls, superset); 1094 omap_mux_package_init_balls(package_balls, superset);
1095} 1095}
1096 1096
1097static void omap_mux_init_signals(struct omap_mux_partition *partition, 1097static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
1098 struct omap_board_mux *board_mux) 1098 struct omap_board_mux *board_mux)
1099{ 1099{
1100 omap_mux_set_cmdline_signals(); 1100 omap_mux_set_cmdline_signals();
1101 omap_mux_write_array(partition, board_mux); 1101 omap_mux_write_array(partition, board_mux);
@@ -1109,8 +1109,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
1109{ 1109{
1110} 1110}
1111 1111
1112static void omap_mux_init_signals(struct omap_mux_partition *partition, 1112static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
1113 struct omap_board_mux *board_mux) 1113 struct omap_board_mux *board_mux)
1114{ 1114{
1115} 1115}
1116 1116
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index b13ef7ef5ef4..503ac777a2ba 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -18,6 +18,7 @@
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20 20
21 __CPUINIT
21/* 22/*
22 * OMAP4 specific entry point for secondary CPU to jump from ROM 23 * OMAP4 specific entry point for secondary CPU to jump from ROM
23 * code. This routine also provides a holding flag into which 24 * code. This routine also provides a holding flag into which
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 5192cabb40ed..eba6cd3816f5 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1517,8 +1517,8 @@ static int _enable(struct omap_hwmod *oh)
1517 if (oh->_state != _HWMOD_STATE_INITIALIZED && 1517 if (oh->_state != _HWMOD_STATE_INITIALIZED &&
1518 oh->_state != _HWMOD_STATE_IDLE && 1518 oh->_state != _HWMOD_STATE_IDLE &&
1519 oh->_state != _HWMOD_STATE_DISABLED) { 1519 oh->_state != _HWMOD_STATE_DISABLED) {
1520 WARN(1, "omap_hwmod: %s: enabled state can only be entered " 1520 WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n",
1521 "from initialized, idle, or disabled state\n", oh->name); 1521 oh->name);
1522 return -EINVAL; 1522 return -EINVAL;
1523 } 1523 }
1524 1524
@@ -1600,8 +1600,8 @@ static int _idle(struct omap_hwmod *oh)
1600 pr_debug("omap_hwmod: %s: idling\n", oh->name); 1600 pr_debug("omap_hwmod: %s: idling\n", oh->name);
1601 1601
1602 if (oh->_state != _HWMOD_STATE_ENABLED) { 1602 if (oh->_state != _HWMOD_STATE_ENABLED) {
1603 WARN(1, "omap_hwmod: %s: idle state can only be entered from " 1603 WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
1604 "enabled state\n", oh->name); 1604 oh->name);
1605 return -EINVAL; 1605 return -EINVAL;
1606 } 1606 }
1607 1607
@@ -1682,8 +1682,8 @@ static int _shutdown(struct omap_hwmod *oh)
1682 1682
1683 if (oh->_state != _HWMOD_STATE_IDLE && 1683 if (oh->_state != _HWMOD_STATE_IDLE &&
1684 oh->_state != _HWMOD_STATE_ENABLED) { 1684 oh->_state != _HWMOD_STATE_ENABLED) {
1685 WARN(1, "omap_hwmod: %s: disabled state can only be entered " 1685 WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
1686 "from idle, or enabled state\n", oh->name); 1686 oh->name);
1687 return -EINVAL; 1687 return -EINVAL;
1688 } 1688 }
1689 1689
@@ -2240,8 +2240,8 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
2240 BUG_ON(!oh); 2240 BUG_ON(!oh);
2241 2241
2242 if (!oh->class->sysc || !oh->class->sysc->sysc_flags) { 2242 if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
2243 WARN(1, "omap_device: %s: OCP barrier impossible due to " 2243 WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
2244 "device configuration\n", oh->name); 2244 oh->name);
2245 return; 2245 return;
2246 } 2246 }
2247 2247
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c11273da5dcc..f08e442af397 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -56,27 +56,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
56}; 56};
57 57
58/* 58/*
59 * 'dispc' class
60 * display controller
61 */
62
63static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
64 .rev_offs = 0x0000,
65 .sysc_offs = 0x0010,
66 .syss_offs = 0x0014,
67 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
68 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
69 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
70 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
71 .sysc_fields = &omap_hwmod_sysc_type1,
72};
73
74struct omap_hwmod_class omap2_dispc_hwmod_class = {
75 .name = "dispc",
76 .sysc = &omap2_dispc_sysc,
77};
78
79/*
80 * 'rfbi' class 59 * 'rfbi' class
81 * remote frame buffer interface 60 * remote frame buffer interface
82 */ 61 */
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 177dee20faef..2a6729741b06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
28 { .name = "dispc", .dma_req = 5 }, 28 { .name = "dispc", .dma_req = 5 },
29 { .dma_req = -1 } 29 { .dma_req = -1 }
30}; 30};
31
32/*
33 * 'dispc' class
34 * display controller
35 */
36
37static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
38 .rev_offs = 0x0000,
39 .sysc_offs = 0x0010,
40 .syss_offs = 0x0014,
41 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
42 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
43 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
44 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
45 .sysc_fields = &omap_hwmod_sysc_type1,
46};
47
48struct omap_hwmod_class omap2_dispc_hwmod_class = {
49 .name = "dispc",
50 .sysc = &omap2_dispc_sysc,
51};
52
31/* OMAP2xxx Timer Common */ 53/* OMAP2xxx Timer Common */
32static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = { 54static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
33 .rev_offs = 0x0000, 55 .rev_offs = 0x0000,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 5324e8d93bc0..3c8dd928628e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
1480 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), 1480 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
1481}; 1481};
1482 1482
1483/*
1484 * 'dispc' class
1485 * display controller
1486 */
1487
1488static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
1489 .rev_offs = 0x0000,
1490 .sysc_offs = 0x0010,
1491 .syss_offs = 0x0014,
1492 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
1493 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
1494 SYSC_HAS_ENAWAKEUP),
1495 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1496 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1497 .sysc_fields = &omap_hwmod_sysc_type1,
1498};
1499
1500static struct omap_hwmod_class omap3_dispc_hwmod_class = {
1501 .name = "dispc",
1502 .sysc = &omap3_dispc_sysc,
1503};
1504
1483/* l4_core -> dss_dispc */ 1505/* l4_core -> dss_dispc */
1484static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { 1506static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
1485 .master = &omap3xxx_l4_core_hwmod, 1507 .master = &omap3xxx_l4_core_hwmod,
@@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
1503 1525
1504static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { 1526static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
1505 .name = "dss_dispc", 1527 .name = "dss_dispc",
1506 .class = &omap2_dispc_hwmod_class, 1528 .class = &omap3_dispc_hwmod_class,
1507 .mpu_irqs = omap2_dispc_irqs, 1529 .mpu_irqs = omap2_dispc_irqs,
1508 .main_clk = "dss1_alwon_fck", 1530 .main_clk = "dss1_alwon_fck",
1509 .prcm = { 1531 .prcm = {
@@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3523 &omap3xxx_uart2_hwmod, 3545 &omap3xxx_uart2_hwmod,
3524 &omap3xxx_uart3_hwmod, 3546 &omap3xxx_uart3_hwmod,
3525 3547
3526 /* dss class */
3527 &omap3xxx_dss_dispc_hwmod,
3528 &omap3xxx_dss_dsi1_hwmod,
3529 &omap3xxx_dss_rfbi_hwmod,
3530 &omap3xxx_dss_venc_hwmod,
3531
3532 /* i2c class */ 3548 /* i2c class */
3533 &omap3xxx_i2c1_hwmod, 3549 &omap3xxx_i2c1_hwmod,
3534 &omap3xxx_i2c2_hwmod, 3550 &omap3xxx_i2c2_hwmod,
@@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
3635 NULL 3651 NULL
3636}; 3652};
3637 3653
3654static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
3655 /* dss class */
3656 &omap3xxx_dss_dispc_hwmod,
3657 &omap3xxx_dss_dsi1_hwmod,
3658 &omap3xxx_dss_rfbi_hwmod,
3659 &omap3xxx_dss_venc_hwmod,
3660 NULL
3661};
3662
3638int __init omap3xxx_hwmod_init(void) 3663int __init omap3xxx_hwmod_init(void)
3639{ 3664{
3640 int r; 3665 int r;
@@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void)
3708 3733
3709 if (h) 3734 if (h)
3710 r = omap_hwmod_register(h); 3735 r = omap_hwmod_register(h);
3736 if (r < 0)
3737 return r;
3738
3739 /*
3740 * DSS code presumes that dss_core hwmod is handled first,
3741 * _before_ any other DSS related hwmods so register common
3742 * DSS hwmods last to ensure that dss_core is already registered.
3743 * Otherwise some change things may happen, for ex. if dispc
3744 * is handled before dss_core and DSS is enabled in bootloader
3745 * DIPSC will be reset with outputs enabled which sometimes leads
3746 * to unrecoverable L3 error.
3747 * XXX The long-term fix to this is to ensure modules are set up
3748 * in dependency order in the hwmod core code.
3749 */
3750 r = omap_hwmod_register(omap3xxx_dss_hwmods);
3711 3751
3712 return r; 3752 return r;
3713} 3753}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index f9f151081760..ef0524c10a84 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
1031 1031
1032static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { 1032static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
1033 { 1033 {
1034 .name = "mpu",
1034 .pa_start = 0x4012e000, 1035 .pa_start = 0x4012e000,
1035 .pa_end = 0x4012e07f, 1036 .pa_end = 0x4012e07f,
1036 .flags = ADDR_TYPE_RT 1037 .flags = ADDR_TYPE_RT
@@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
1049 1050
1050static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { 1051static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
1051 { 1052 {
1053 .name = "dma",
1052 .pa_start = 0x4902e000, 1054 .pa_start = 0x4902e000,
1053 .pa_end = 0x4902e07f, 1055 .pa_end = 0x4902e07f,
1054 .flags = ADDR_TYPE_RT 1056 .flags = ADDR_TYPE_RT
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 1f736222a629..a4eb5c280435 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -82,13 +82,7 @@ static int omap2_fclks_active(void)
82 f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); 82 f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
83 f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); 83 f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
84 84
85 /* Ignore UART clocks. These are handled by UART core (serial.c) */ 85 return (f1 | f2) ? 1 : 0;
86 f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
87 f2 &= ~OMAP24XX_EN_UART3_MASK;
88
89 if (f1 | f2)
90 return 1;
91 return 0;
92} 86}
93 87
94static void omap2_enter_full_retention(void) 88static void omap2_enter_full_retention(void)
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index c1c4d86a79a8..9ce765407ad5 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -19,6 +19,7 @@
19#include "common.h" 19#include "common.h"
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/prcm.h> 21#include <plat/prcm.h>
22#include <plat/irqs.h>
22 23
23#include "vp.h" 24#include "vp.h"
24 25
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 33dd655e6aab..a1d6154dc120 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -19,6 +19,7 @@
19 19
20#include "common.h" 20#include "common.h"
21#include <plat/cpu.h> 21#include <plat/cpu.h>
22#include <plat/irqs.h>
22#include <plat/prcm.h> 23#include <plat/prcm.h>
23 24
24#include "vp.h" 25#include "vp.h"
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 247d89478f24..f590afc1f673 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -107,18 +107,18 @@ static void omap_uart_set_noidle(struct platform_device *pdev)
107 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); 107 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
108} 108}
109 109
110static void omap_uart_set_forceidle(struct platform_device *pdev) 110static void omap_uart_set_smartidle(struct platform_device *pdev)
111{ 111{
112 struct omap_device *od = to_omap_device(pdev); 112 struct omap_device *od = to_omap_device(pdev);
113 113
114 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE); 114 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
115} 115}
116 116
117#else 117#else
118static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable) 118static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
119{} 119{}
120static void omap_uart_set_noidle(struct platform_device *pdev) {} 120static void omap_uart_set_noidle(struct platform_device *pdev) {}
121static void omap_uart_set_forceidle(struct platform_device *pdev) {} 121static void omap_uart_set_smartidle(struct platform_device *pdev) {}
122#endif /* CONFIG_PM */ 122#endif /* CONFIG_PM */
123 123
124#ifdef CONFIG_OMAP_MUX 124#ifdef CONFIG_OMAP_MUX
@@ -349,7 +349,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
349 omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; 349 omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
350 omap_up.flags = UPF_BOOT_AUTOCONF; 350 omap_up.flags = UPF_BOOT_AUTOCONF;
351 omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; 351 omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
352 omap_up.set_forceidle = omap_uart_set_forceidle; 352 omap_up.set_forceidle = omap_uart_set_smartidle;
353 omap_up.set_noidle = omap_uart_set_noidle; 353 omap_up.set_noidle = omap_uart_set_noidle;
354 omap_up.enable_wakeup = omap_uart_enable_wakeup; 354 omap_up.enable_wakeup = omap_uart_enable_wakeup;
355 omap_up.dma_rx_buf_size = info->dma_rx_buf_size; 355 omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 9dd93453e563..7e755bb0ffc4 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
897 ret = sr_late_init(sr_info); 897 ret = sr_late_init(sr_info);
898 if (ret) { 898 if (ret) {
899 pr_warning("%s: Error in SR late init\n", __func__); 899 pr_warning("%s: Error in SR late init\n", __func__);
900 return ret; 900 goto err_iounmap;
901 } 901 }
902 } 902 }
903 903
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 6eeff0e0ae01..5c9acea95761 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = {
270static u32 notrace dmtimer_read_sched_clock(void) 270static u32 notrace dmtimer_read_sched_clock(void)
271{ 271{
272 if (clksrc.reserved) 272 if (clksrc.reserved)
273 return __omap_dm_timer_read_counter(clksrc.io_base, 1); 273 return __omap_dm_timer_read_counter(&clksrc, 1);
274 274
275 return 0; 275 return 0;
276} 276}
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 031d116fbf10..175b7d86d86a 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -247,7 +247,7 @@ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
247 * omap_vc_i2c_init - initialize I2C interface to PMIC 247 * omap_vc_i2c_init - initialize I2C interface to PMIC
248 * @voltdm: voltage domain containing VC data 248 * @voltdm: voltage domain containing VC data
249 * 249 *
250 * Use PMIC supplied seetings for I2C high-speed mode and 250 * Use PMIC supplied settings for I2C high-speed mode and
251 * master code (if set) and program the VC I2C configuration 251 * master code (if set) and program the VC I2C configuration
252 * register. 252 * register.
253 * 253 *
@@ -265,8 +265,8 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
265 265
266 if (initialized) { 266 if (initialized) {
267 if (voltdm->pmic->i2c_high_speed != i2c_high_speed) 267 if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
268 pr_warn("%s: I2C config for all channels must match.", 268 pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
269 __func__); 269 __func__, voltdm->name, i2c_high_speed);
270 return; 270 return;
271 } 271 }
272 272
@@ -292,9 +292,7 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
292 u32 val; 292 u32 val;
293 293
294 if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { 294 if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
295 pr_err("%s: PMIC info requried to configure vc for" 295 pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
296 "vdd_%s not populated.Hence cannot initialize vc\n",
297 __func__, voltdm->name);
298 return; 296 return;
299 } 297 }
300 298
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
index 807391d84a9d..0df88820978d 100644
--- a/arch/arm/mach-omap2/vp.c
+++ b/arch/arm/mach-omap2/vp.c
@@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
41 u32 val, sys_clk_rate, timeout, waittime; 41 u32 val, sys_clk_rate, timeout, waittime;
42 u32 vddmin, vddmax, vstepmin, vstepmax; 42 u32 vddmin, vddmax, vstepmin, vstepmax;
43 43
44 if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
45 pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
46 return;
47 }
48
44 if (!voltdm->read || !voltdm->write) { 49 if (!voltdm->read || !voltdm->write) {
45 pr_err("%s: No read/write API for accessing vdd_%s regs\n", 50 pr_err("%s: No read/write API for accessing vdd_%s regs\n",
46 __func__, voltdm->name); 51 __func__, voltdm->name);
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 0e28bae20bd4..5dad38ec00ea 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -29,6 +29,7 @@
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30#include <mach/orion5x.h> 30#include <mach/orion5x.h>
31#include <plat/orion_nand.h> 31#include <plat/orion_nand.h>
32#include <plat/ehci-orion.h>
32#include <plat/time.h> 33#include <plat/time.h>
33#include <plat/common.h> 34#include <plat/common.h>
34#include <plat/addr-map.h> 35#include <plat/addr-map.h>
@@ -72,7 +73,8 @@ void __init orion5x_map_io(void)
72 ****************************************************************************/ 73 ****************************************************************************/
73void __init orion5x_ehci0_init(void) 74void __init orion5x_ehci0_init(void)
74{ 75{
75 orion_ehci_init(ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL); 76 orion_ehci_init(ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL,
77 EHCI_PHY_ORION);
76} 78}
77 79
78 80
diff --git a/arch/arm/mach-s3c2410/cpu-freq.c b/arch/arm/mach-s3c2410/cpu-freq.c
index 7dc6c46b5e2b..5404535da1a5 100644
--- a/arch/arm/mach-s3c2410/cpu-freq.c
+++ b/arch/arm/mach-s3c2410/cpu-freq.c
@@ -115,7 +115,8 @@ static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
115 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), 115 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
116}; 116};
117 117
118static int s3c2410_cpufreq_add(struct device *dev) 118static int s3c2410_cpufreq_add(struct device *dev,
119 struct subsys_interface *sif)
119{ 120{
120 return s3c_cpufreq_register(&s3c2410_cpufreq_info); 121 return s3c_cpufreq_register(&s3c2410_cpufreq_info);
121} 122}
@@ -133,7 +134,8 @@ static int __init s3c2410_cpufreq_init(void)
133 134
134arch_initcall(s3c2410_cpufreq_init); 135arch_initcall(s3c2410_cpufreq_init);
135 136
136static int s3c2410a_cpufreq_add(struct device *dev) 137static int s3c2410a_cpufreq_add(struct device *dev,
138 struct subsys_interface *sif)
137{ 139{
138 /* alter the maximum freq settings for S3C2410A. If a board knows 140 /* alter the maximum freq settings for S3C2410A. If a board knows
139 * it only has a maximum of 200, then it should register its own 141 * it only has a maximum of 200, then it should register its own
@@ -144,7 +146,7 @@ static int s3c2410a_cpufreq_add(struct device *dev)
144 s3c2410_cpufreq_info.max.pclk = 66500000; 146 s3c2410_cpufreq_info.max.pclk = 66500000;
145 s3c2410_cpufreq_info.name = "s3c2410a"; 147 s3c2410_cpufreq_info.name = "s3c2410a";
146 148
147 return s3c2410_cpufreq_add(dev); 149 return s3c2410_cpufreq_add(dev, sif);
148} 150}
149 151
150static struct subsys_interface s3c2410a_cpufreq_interface = { 152static struct subsys_interface s3c2410a_cpufreq_interface = {
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 2afd00014a77..4803338cf56e 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -132,7 +132,8 @@ static struct s3c24xx_dma_order __initdata s3c2410_dma_order = {
132 }, 132 },
133}; 133};
134 134
135static int __init s3c2410_dma_add(struct device *dev) 135static int __init s3c2410_dma_add(struct device *dev,
136 struct subsys_interface *sif)
136{ 137{
137 s3c2410_dma_init(); 138 s3c2410_dma_init();
138 s3c24xx_dma_order_set(&s3c2410_dma_order); 139 s3c24xx_dma_order_set(&s3c2410_dma_order);
@@ -148,7 +149,7 @@ static struct subsys_interface s3c2410_dma_interface = {
148 149
149static int __init s3c2410_dma_drvinit(void) 150static int __init s3c2410_dma_drvinit(void)
150{ 151{
151 return subsys_interface_register(&s3c2410_interface); 152 return subsys_interface_register(&s3c2410_dma_interface);
152} 153}
153 154
154arch_initcall(s3c2410_dma_drvinit); 155arch_initcall(s3c2410_dma_drvinit);
diff --git a/arch/arm/mach-s3c2410/pll.c b/arch/arm/mach-s3c2410/pll.c
index c07438bfc99f..e0b3b347da82 100644
--- a/arch/arm/mach-s3c2410/pll.c
+++ b/arch/arm/mach-s3c2410/pll.c
@@ -66,7 +66,7 @@ static struct cpufreq_frequency_table pll_vals_12MHz[] = {
66 { .frequency = 270000000, .index = PLLVAL(127, 1, 1), }, 66 { .frequency = 270000000, .index = PLLVAL(127, 1, 1), },
67}; 67};
68 68
69static int s3c2410_plls_add(struct device *dev) 69static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif)
70{ 70{
71 return s3c_plltab_register(pll_vals_12MHz, ARRAY_SIZE(pll_vals_12MHz)); 71 return s3c_plltab_register(pll_vals_12MHz, ARRAY_SIZE(pll_vals_12MHz));
72} 72}
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c
index fda5385deff6..03f706dd6009 100644
--- a/arch/arm/mach-s3c2410/pm.c
+++ b/arch/arm/mach-s3c2410/pm.c
@@ -111,7 +111,7 @@ struct syscore_ops s3c2410_pm_syscore_ops = {
111 .resume = s3c2410_pm_resume, 111 .resume = s3c2410_pm_resume,
112}; 112};
113 113
114static int s3c2410_pm_add(struct device *dev) 114static int s3c2410_pm_add(struct device *dev, struct subsys_interface *sif)
115{ 115{
116 pm_cpu_prep = s3c2410_pm_prepare; 116 pm_cpu_prep = s3c2410_pm_prepare;
117 pm_cpu_sleep = s3c2410_cpu_suspend; 117 pm_cpu_sleep = s3c2410_cpu_suspend;
diff --git a/arch/arm/mach-s3c2412/cpu-freq.c b/arch/arm/mach-s3c2412/cpu-freq.c
index d8664b7652ce..125be7d5fa60 100644
--- a/arch/arm/mach-s3c2412/cpu-freq.c
+++ b/arch/arm/mach-s3c2412/cpu-freq.c
@@ -194,7 +194,8 @@ static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
194 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), 194 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
195}; 195};
196 196
197static int s3c2412_cpufreq_add(struct device *dev) 197static int s3c2412_cpufreq_add(struct device *dev,
198 struct subsys_interface *sif)
198{ 199{
199 unsigned long fclk_rate; 200 unsigned long fclk_rate;
200 201
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index 142acd3b5e15..38472ac920ff 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -159,7 +159,8 @@ static struct s3c24xx_dma_selection __initdata s3c2412_dma_sel = {
159 .map_size = ARRAY_SIZE(s3c2412_dma_mappings), 159 .map_size = ARRAY_SIZE(s3c2412_dma_mappings),
160}; 160};
161 161
162static int __init s3c2412_dma_add(struct device *dev) 162static int __init s3c2412_dma_add(struct device *dev,
163 struct subsys_interface *sif)
163{ 164{
164 s3c2410_dma_init(); 165 s3c2410_dma_init();
165 return s3c24xx_dma_init_map(&s3c2412_dma_sel); 166 return s3c24xx_dma_init_map(&s3c2412_dma_sel);
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index a8a46c1644f4..e65619ddbccc 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -170,7 +170,7 @@ static int s3c2412_irq_rtc_wake(struct irq_data *data, unsigned int state)
170 170
171static struct irq_chip s3c2412_irq_rtc_chip; 171static struct irq_chip s3c2412_irq_rtc_chip;
172 172
173static int s3c2412_irq_add(struct device *dev) 173static int s3c2412_irq_add(struct device *dev, struct subsys_interface *sif)
174{ 174{
175 unsigned int irqno; 175 unsigned int irqno;
176 176
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index d1adfa65f66d..d04588506ec4 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -56,7 +56,7 @@ static void s3c2412_pm_prepare(void)
56{ 56{
57} 57}
58 58
59static int s3c2412_pm_add(struct device *dev) 59static int s3c2412_pm_add(struct device *dev, struct subsys_interface *sif)
60{ 60{
61 pm_cpu_prep = s3c2412_pm_prepare; 61 pm_cpu_prep = s3c2412_pm_prepare;
62 pm_cpu_sleep = s3c2412_cpu_suspend; 62 pm_cpu_sleep = s3c2412_cpu_suspend;
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 36df761061de..fd49f35e448e 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -213,7 +213,8 @@ static int __init s3c2416_add_sub(unsigned int base,
213 return 0; 213 return 0;
214} 214}
215 215
216static int __init s3c2416_irq_add(struct device *dev) 216static int __init s3c2416_irq_add(struct device *dev,
217 struct subsys_interface *sif)
217{ 218{
218 printk(KERN_INFO "S3C2416: IRQ Support\n"); 219 printk(KERN_INFO "S3C2416: IRQ Support\n");
219 220
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 3bdb15a0d419..1bd4817b8eb8 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void)
48 __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); 48 __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
49} 49}
50 50
51static int s3c2416_pm_add(struct device *dev) 51static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif)
52{ 52{
53 pm_cpu_prep = s3c2416_pm_prepare; 53 pm_cpu_prep = s3c2416_pm_prepare;
54 pm_cpu_sleep = s3c2416_cpu_suspend; 54 pm_cpu_sleep = s3c2416_cpu_suspend;
diff --git a/arch/arm/mach-s3c2440/clock.c b/arch/arm/mach-s3c2440/clock.c
index bedbc87a3426..414364eb426c 100644
--- a/arch/arm/mach-s3c2440/clock.c
+++ b/arch/arm/mach-s3c2440/clock.c
@@ -149,7 +149,7 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
149 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n), 149 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
150}; 150};
151 151
152static int s3c2440_clk_add(struct device *dev) 152static int s3c2440_clk_add(struct device *dev, struct subsys_interface *sif)
153{ 153{
154 struct clk *clock_upll; 154 struct clk *clock_upll;
155 struct clk *clock_h; 155 struct clk *clock_h;
diff --git a/arch/arm/mach-s3c2440/dma.c b/arch/arm/mach-s3c2440/dma.c
index 15b1ddf8f626..5f0a0c8ef84f 100644
--- a/arch/arm/mach-s3c2440/dma.c
+++ b/arch/arm/mach-s3c2440/dma.c
@@ -174,7 +174,8 @@ static struct s3c24xx_dma_order __initdata s3c2440_dma_order = {
174 }, 174 },
175}; 175};
176 176
177static int __init s3c2440_dma_add(struct device *dev) 177static int __init s3c2440_dma_add(struct device *dev,
178 struct subsys_interface *sif)
178{ 179{
179 s3c2410_dma_init(); 180 s3c2410_dma_init();
180 s3c24xx_dma_order_set(&s3c2440_dma_order); 181 s3c24xx_dma_order_set(&s3c2440_dma_order);
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c
index 4fee9bc6bcb5..4a18cde439cc 100644
--- a/arch/arm/mach-s3c2440/irq.c
+++ b/arch/arm/mach-s3c2440/irq.c
@@ -92,7 +92,7 @@ static struct irq_chip s3c_irq_wdtac97 = {
92 .irq_ack = s3c_irq_wdtac97_ack, 92 .irq_ack = s3c_irq_wdtac97_ack,
93}; 93};
94 94
95static int s3c2440_irq_add(struct device *dev) 95static int s3c2440_irq_add(struct device *dev, struct subsys_interface *sif)
96{ 96{
97 unsigned int irqno; 97 unsigned int irqno;
98 98
diff --git a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
index cf7596694efe..61776764d9f4 100644
--- a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
+++ b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
@@ -270,7 +270,8 @@ struct s3c_cpufreq_info s3c2440_cpufreq_info = {
270 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), 270 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
271}; 271};
272 272
273static int s3c2440_cpufreq_add(struct device *dev) 273static int s3c2440_cpufreq_add(struct device *dev,
274 struct subsys_interface *sif)
274{ 275{
275 xtal = s3c_cpufreq_clk_get(NULL, "xtal"); 276 xtal = s3c_cpufreq_clk_get(NULL, "xtal");
276 hclk = s3c_cpufreq_clk_get(NULL, "hclk"); 277 hclk = s3c_cpufreq_clk_get(NULL, "hclk");
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
index b5368ae8d7fe..551fb433be87 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
@@ -51,7 +51,7 @@ static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
51 { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ 51 { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */
52}; 52};
53 53
54static int s3c2440_plls12_add(struct device *dev) 54static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif)
55{ 55{
56 struct clk *xtal_clk; 56 struct clk *xtal_clk;
57 unsigned long xtal; 57 unsigned long xtal;
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
index 42f2b5cd2399..3f15bcf64290 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
@@ -79,7 +79,8 @@ static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
79 { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ 79 { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */
80}; 80};
81 81
82static int s3c2440_plls169344_add(struct device *dev) 82static int s3c2440_plls169344_add(struct device *dev,
83 struct subsys_interface *sif)
83{ 84{
84 struct clk *xtal_clk; 85 struct clk *xtal_clk;
85 unsigned long xtal; 86 unsigned long xtal;
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 8004e0497bf4..22cb7c94a8c8 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -122,7 +122,7 @@ static struct clk s3c2442_clk_cam_upll = {
122 }, 122 },
123}; 123};
124 124
125static int s3c2442_clk_add(struct device *dev) 125static int s3c2442_clk_add(struct device *dev, struct subsys_interface *sif)
126{ 126{
127 struct clk *clock_upll; 127 struct clk *clock_upll;
128 struct clk *clock_h; 128 struct clk *clock_h;
diff --git a/arch/arm/mach-s3c2440/s3c244x-clock.c b/arch/arm/mach-s3c2440/s3c244x-clock.c
index b3fdbdda3d5f..6d9b688c442b 100644
--- a/arch/arm/mach-s3c2440/s3c244x-clock.c
+++ b/arch/arm/mach-s3c2440/s3c244x-clock.c
@@ -72,7 +72,7 @@ static struct clk clk_arm = {
72 }, 72 },
73}; 73};
74 74
75static int s3c244x_clk_add(struct device *dev) 75static int s3c244x_clk_add(struct device *dev, struct subsys_interface *sif)
76{ 76{
77 unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN); 77 unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
78 unsigned long clkdivn; 78 unsigned long clkdivn;
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index 74d3dcf46a48..5fe8e58d3afd 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -91,7 +91,7 @@ static struct irq_chip s3c_irq_cam = {
91 .irq_ack = s3c_irq_cam_ack, 91 .irq_ack = s3c_irq_cam_ack,
92}; 92};
93 93
94static int s3c244x_irq_add(struct device *dev) 94static int s3c244x_irq_add(struct device *dev, struct subsys_interface *sif)
95{ 95{
96 unsigned int irqno; 96 unsigned int irqno;
97 97
diff --git a/arch/arm/mach-s3c2443/dma.c b/arch/arm/mach-s3c2443/dma.c
index de6b4a23c9ed..14224517e621 100644
--- a/arch/arm/mach-s3c2443/dma.c
+++ b/arch/arm/mach-s3c2443/dma.c
@@ -135,7 +135,8 @@ static struct s3c24xx_dma_selection __initdata s3c2443_dma_sel = {
135 .map_size = ARRAY_SIZE(s3c2443_dma_mappings), 135 .map_size = ARRAY_SIZE(s3c2443_dma_mappings),
136}; 136};
137 137
138static int __init s3c2443_dma_add(struct device *dev) 138static int __init s3c2443_dma_add(struct device *dev,
139 struct subsys_interface *sif)
139{ 140{
140 s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100); 141 s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100);
141 return s3c24xx_dma_init_map(&s3c2443_dma_sel); 142 return s3c24xx_dma_init_map(&s3c2443_dma_sel);
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 35e4ff24fb43..ac2829f56d12 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -241,7 +241,8 @@ static int __init s3c2443_add_sub(unsigned int base,
241 return 0; 241 return 0;
242} 242}
243 243
244static int __init s3c2443_irq_add(struct device *dev) 244static int __init s3c2443_irq_add(struct device *dev,
245 struct subsys_interface *sif)
245{ 246{
246 printk("S3C2443: IRQ Support\n"); 247 printk("S3C2443: IRQ Support\n");
247 248
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 31bb27dc4aeb..aebbcc291b4e 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -138,6 +138,11 @@ static struct clk init_clocks_off[] = {
138 .ctrlbit = S3C_CLKCON_PCLK_TSADC, 138 .ctrlbit = S3C_CLKCON_PCLK_TSADC,
139 }, { 139 }, {
140 .name = "i2c", 140 .name = "i2c",
141#ifdef CONFIG_S3C_DEV_I2C1
142 .devname = "s3c2440-i2c.0",
143#else
144 .devname = "s3c2440-i2c",
145#endif
141 .parent = &clk_p, 146 .parent = &clk_p,
142 .enable = s3c64xx_pclk_ctrl, 147 .enable = s3c64xx_pclk_ctrl,
143 .ctrlbit = S3C_CLKCON_PCLK_IIC, 148 .ctrlbit = S3C_CLKCON_PCLK_IIC,
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index 4a7394d4bd9e..bee7dcd4df7c 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -49,7 +49,7 @@
49 49
50/* uart registration process */ 50/* uart registration process */
51 51
52void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no) 52static void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no)
53{ 53{
54 s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no); 54 s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
55} 55}
diff --git a/arch/arm/mach-s5p64x0/pm.c b/arch/arm/mach-s5p64x0/pm.c
index 23f9b22439c9..9cba18bfe47b 100644
--- a/arch/arm/mach-s5p64x0/pm.c
+++ b/arch/arm/mach-s5p64x0/pm.c
@@ -160,7 +160,7 @@ static void s5p64x0_pm_prepare(void)
160 160
161} 161}
162 162
163static int s5p64x0_pm_add(struct device *dev) 163static int s5p64x0_pm_add(struct device *dev, struct subsys_interface *sif)
164{ 164{
165 pm_cpu_prep = s5p64x0_pm_prepare; 165 pm_cpu_prep = s5p64x0_pm_prepare;
166 pm_cpu_sleep = s5p64x0_cpu_suspend; 166 pm_cpu_sleep = s5p64x0_cpu_suspend;
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index c78dfddd77fd..b9ec0c35379f 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -175,7 +175,7 @@ static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable)
175 return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable); 175 return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable);
176} 176}
177 177
178static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable) 178static int s5pv210_clk_hdmiphy_ctrl(struct clk *clk, int enable)
179{ 179{
180 return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable); 180 return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
181} 181}
@@ -372,7 +372,7 @@ static struct clk init_clocks_off[] = {
372 }, { 372 }, {
373 .name = "hdmiphy", 373 .name = "hdmiphy",
374 .devname = "s5pv210-hdmi", 374 .devname = "s5pv210-hdmi",
375 .enable = exynos4_clk_hdmiphy_ctrl, 375 .enable = s5pv210_clk_hdmiphy_ctrl,
376 .ctrlbit = (1 << 0), 376 .ctrlbit = (1 << 0),
377 }, { 377 }, {
378 .name = "dacphy", 378 .name = "dacphy",
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 677c71c41e50..736bfb103cbc 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -133,7 +133,7 @@ static void s5pv210_pm_prepare(void)
133 s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); 133 s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
134} 134}
135 135
136static int s5pv210_pm_add(struct device *dev) 136static int s5pv210_pm_add(struct device *dev, struct subsys_interface *sif)
137{ 137{
138 pm_cpu_prep = s5pv210_pm_prepare; 138 pm_cpu_prep = s5pv210_pm_prepare;
139 pm_cpu_sleep = s5pv210_cpu_suspend; 139 pm_cpu_sleep = s5pv210_cpu_suspend;
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 6fcf304d3cdf..a83cf51fc099 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -662,6 +662,7 @@ static struct sh_dmae_pdata usb_dma0_platform_data = {
662 .dmaor_is_32bit = 1, 662 .dmaor_is_32bit = 1,
663 .needs_tend_set = 1, 663 .needs_tend_set = 1,
664 .no_dmars = 1, 664 .no_dmars = 1,
665 .slave_only = 1,
665}; 666};
666 667
667static struct resource sh7372_usb_dmae0_resources[] = { 668static struct resource sh7372_usb_dmae0_resources[] = {
@@ -723,6 +724,7 @@ static struct sh_dmae_pdata usb_dma1_platform_data = {
723 .dmaor_is_32bit = 1, 724 .dmaor_is_32bit = 1,
724 .needs_tend_set = 1, 725 .needs_tend_set = 1,
725 .no_dmars = 1, 726 .no_dmars = 1,
727 .slave_only = 1,
726}; 728};
727 729
728static struct resource sh7372_usb_dmae1_resources[] = { 730static struct resource sh7372_usb_dmae1_resources[] = {
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index fcf4f377b1dc..330afdfa2475 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -60,9 +60,9 @@ static struct plat_serial8250_port debug_uart_platform_data[] = {
60 .uartclk = 216000000, 60 .uartclk = 216000000,
61 }, { 61 }, {
62 /* serial port on mini-pcie */ 62 /* serial port on mini-pcie */
63 .membase = IO_ADDRESS(TEGRA_UARTD_BASE), 63 .membase = IO_ADDRESS(TEGRA_UARTC_BASE),
64 .mapbase = TEGRA_UARTD_BASE, 64 .mapbase = TEGRA_UARTC_BASE,
65 .irq = INT_UARTD, 65 .irq = INT_UARTC,
66 .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE, 66 .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
67 .type = PORT_TEGRA, 67 .type = PORT_TEGRA,
68 .iotype = UPIO_MEM, 68 .iotype = UPIO_MEM,
@@ -174,7 +174,7 @@ static void __init tegra_paz00_fixup(struct tag *tags, char **cmdline,
174static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = { 174static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = {
175 /* name parent rate enabled */ 175 /* name parent rate enabled */
176 { "uarta", "pll_p", 216000000, true }, 176 { "uarta", "pll_p", 216000000, true },
177 { "uartd", "pll_p", 216000000, true }, 177 { "uartc", "pll_p", 216000000, true },
178 178
179 { "pll_p_out4", "pll_p", 24000000, true }, 179 { "pll_p_out4", "pll_p", 24000000, true },
180 { "usbd", "clk_m", 12000000, false }, 180 { "usbd", "clk_m", 12000000, false },
diff --git a/arch/arm/mach-tegra/board-paz00.h b/arch/arm/mach-tegra/board-paz00.h
index ffa83f580db6..3c9f8da37ea3 100644
--- a/arch/arm/mach-tegra/board-paz00.h
+++ b/arch/arm/mach-tegra/board-paz00.h
@@ -22,7 +22,7 @@
22/* SDCARD */ 22/* SDCARD */
23#define TEGRA_GPIO_SD1_CD TEGRA_GPIO_PV5 23#define TEGRA_GPIO_SD1_CD TEGRA_GPIO_PV5
24#define TEGRA_GPIO_SD1_WP TEGRA_GPIO_PH1 24#define TEGRA_GPIO_SD1_WP TEGRA_GPIO_PH1
25#define TEGRA_GPIO_SD1_POWER TEGRA_GPIO_PT3 25#define TEGRA_GPIO_SD1_POWER TEGRA_GPIO_PV1
26 26
27/* ULPI */ 27/* ULPI */
28#define TEGRA_ULPI_RST TEGRA_GPIO_PV0 28#define TEGRA_ULPI_RST TEGRA_GPIO_PV0
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
index d0132e8031a1..3c9339058bec 100644
--- a/arch/arm/mach-tegra/include/mach/dma.h
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -23,11 +23,6 @@
23 23
24#include <linux/list.h> 24#include <linux/list.h>
25 25
26#if defined(CONFIG_TEGRA_SYSTEM_DMA)
27
28struct tegra_dma_req;
29struct tegra_dma_channel;
30
31#define TEGRA_DMA_REQ_SEL_CNTR 0 26#define TEGRA_DMA_REQ_SEL_CNTR 0
32#define TEGRA_DMA_REQ_SEL_I2S_2 1 27#define TEGRA_DMA_REQ_SEL_I2S_2 1
33#define TEGRA_DMA_REQ_SEL_I2S_1 2 28#define TEGRA_DMA_REQ_SEL_I2S_1 2
@@ -56,6 +51,11 @@ struct tegra_dma_channel;
56#define TEGRA_DMA_REQ_SEL_OWR 25 51#define TEGRA_DMA_REQ_SEL_OWR 25
57#define TEGRA_DMA_REQ_SEL_INVALID 31 52#define TEGRA_DMA_REQ_SEL_INVALID 31
58 53
54#if defined(CONFIG_TEGRA_SYSTEM_DMA)
55
56struct tegra_dma_req;
57struct tegra_dma_channel;
58
59enum tegra_dma_mode { 59enum tegra_dma_mode {
60 TEGRA_DMA_SHARED = 1, 60 TEGRA_DMA_SHARED = 1,
61 TEGRA_DMA_MODE_CONTINOUS = 2, 61 TEGRA_DMA_MODE_CONTINOUS = 2,
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 07c4bc8ea0a4..7a24d39661f0 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -54,9 +54,15 @@ loop1:
54 and r1, r1, #7 @ mask of the bits for current cache only 54 and r1, r1, #7 @ mask of the bits for current cache only
55 cmp r1, #2 @ see what cache we have at this level 55 cmp r1, #2 @ see what cache we have at this level
56 blt skip @ skip if no cache, or just i-cache 56 blt skip @ skip if no cache, or just i-cache
57#ifdef CONFIG_PREEMPT
58 save_and_disable_irqs r9 @ make cssr&csidr read atomic
59#endif
57 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 60 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
58 isb @ isb to sych the new cssr&csidr 61 isb @ isb to sych the new cssr&csidr
59 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 62 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
63#ifdef CONFIG_PREEMPT
64 restore_irqs_notrace r9
65#endif
60 and r2, r1, #7 @ extract the length of the cache lines 66 and r2, r1, #7 @ extract the length of the cache lines
61 add r2, r2, #4 @ add 4 (line length offset) 67 add r2, r2, #4 @ add 4 (line length offset)
62 ldr r4, =0x3ff 68 ldr r4, =0x3ff
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ba159370fa5f..80632e8d7538 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -225,8 +225,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
225 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 225 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
226 continue; 226 continue;
227 if (__phys_to_pfn(area->phys_addr) > pfn || 227 if (__phys_to_pfn(area->phys_addr) > pfn ||
228 __pfn_to_phys(pfn) + offset + size-1 > 228 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
229 area->phys_addr + area->size-1)
230 continue; 229 continue;
231 /* we can drop the lock here as we know *area is static */ 230 /* we can drop the lock here as we know *area is static */
232 read_unlock(&vmlist_lock); 231 read_unlock(&vmlist_lock);
diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h
index 64f9d1c7f1bb..3047ff923a63 100644
--- a/arch/arm/plat-omap/include/plat/omap-secure.h
+++ b/arch/arm/plat-omap/include/plat/omap-secure.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#ifdef CONFIG_ARCH_OMAP2PLUS 6#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
7extern int omap_secure_ram_reserve_memblock(void); 7extern int omap_secure_ram_reserve_memblock(void);
8#else 8#else
9static inline void omap_secure_ram_reserve_memblock(void) 9static inline void omap_secure_ram_reserve_memblock(void)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index e5a2fde29b19..089899a7db72 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -789,10 +789,7 @@ void __init orion_xor1_init(unsigned long mapbase_low,
789/***************************************************************************** 789/*****************************************************************************
790 * EHCI 790 * EHCI
791 ****************************************************************************/ 791 ****************************************************************************/
792static struct orion_ehci_data orion_ehci_data = { 792static struct orion_ehci_data orion_ehci_data;
793 .phy_version = EHCI_PHY_NA,
794};
795
796static u64 ehci_dmamask = DMA_BIT_MASK(32); 793static u64 ehci_dmamask = DMA_BIT_MASK(32);
797 794
798 795
@@ -812,8 +809,10 @@ static struct platform_device orion_ehci = {
812}; 809};
813 810
814void __init orion_ehci_init(unsigned long mapbase, 811void __init orion_ehci_init(unsigned long mapbase,
815 unsigned long irq) 812 unsigned long irq,
813 enum orion_ehci_phy_ver phy_version)
816{ 814{
815 orion_ehci_data.phy_version = phy_version;
817 fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1, 816 fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
818 irq); 817 irq);
819 818
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index 0fe08d77e835..a7fa005a5a0e 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -89,7 +89,8 @@ void __init orion_xor1_init(unsigned long mapbase_low,
89 unsigned long irq_1); 89 unsigned long irq_1);
90 90
91void __init orion_ehci_init(unsigned long mapbase, 91void __init orion_ehci_init(unsigned long mapbase,
92 unsigned long irq); 92 unsigned long irq,
93 enum orion_ehci_phy_ver phy_version);
93 94
94void __init orion_ehci_1_init(unsigned long mapbase, 95void __init orion_ehci_1_init(unsigned long mapbase,
95 unsigned long irq); 96 unsigned long irq);
diff --git a/arch/arm/plat-orion/mpp.c b/arch/arm/plat-orion/mpp.c
index 91553432711d..3b1e17bd3d17 100644
--- a/arch/arm/plat-orion/mpp.c
+++ b/arch/arm/plat-orion/mpp.c
@@ -64,8 +64,7 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask,
64 gpio_mode |= GPIO_INPUT_OK; 64 gpio_mode |= GPIO_INPUT_OK;
65 if (*mpp_list & MPP_OUTPUT_MASK) 65 if (*mpp_list & MPP_OUTPUT_MASK)
66 gpio_mode |= GPIO_OUTPUT_OK; 66 gpio_mode |= GPIO_OUTPUT_OK;
67 if (sel != 0) 67
68 gpio_mode = 0;
69 orion_gpio_set_valid(num, gpio_mode); 68 orion_gpio_set_valid(num, gpio_mode);
70 } 69 }
71 70
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 32a6e394db24..f10768e988d4 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -468,8 +468,10 @@ void __init s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *pd)
468{ 468{
469 struct s3c2410_platform_i2c *npd; 469 struct s3c2410_platform_i2c *npd;
470 470
471 if (!pd) 471 if (!pd) {
472 pd = &default_i2c_data; 472 pd = &default_i2c_data;
473 pd->bus_num = 0;
474 }
473 475
474 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), 476 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
475 &s3c_device_i2c0); 477 &s3c_device_i2c0);
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 197e96f70405..3dea7231f637 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -8,6 +8,7 @@ config AVR32
8 select HAVE_KPROBES 8 select HAVE_KPROBES
9 select HAVE_GENERIC_HARDIRQS 9 select HAVE_GENERIC_HARDIRQS
10 select GENERIC_IRQ_PROBE 10 select GENERIC_IRQ_PROBE
11 select GENERIC_ATOMIC64
11 select HARDIRQS_SW_RESEND 12 select HARDIRQS_SW_RESEND
12 select GENERIC_IRQ_SHOW 13 select GENERIC_IRQ_SHOW
13 select ARCH_HAVE_NMI_SAFE_CMPXCHG 14 select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index d4fc1a971779..604cd9dd1333 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -26,7 +26,6 @@
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/of_platform.h> 27#include <linux/of_platform.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/cpu.h>
30#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
31#include <asm/entry.h> 30#include <asm/entry.h>
32#include <asm/cpuinfo.h> 31#include <asm/cpuinfo.h>
@@ -227,23 +226,5 @@ static int __init setup_bus_notifier(void)
227 226
228 return 0; 227 return 0;
229} 228}
230arch_initcall(setup_bus_notifier);
231
232static DEFINE_PER_CPU(struct cpu, cpu_devices);
233
234static int __init topology_init(void)
235{
236 int i, ret;
237
238 for_each_present_cpu(i) {
239 struct cpu *c = &per_cpu(cpu_devices, i);
240 229
241 ret = register_cpu(c, i); 230arch_initcall(setup_bus_notifier);
242 if (ret)
243 printk(KERN_WARNING "topology_init: register_cpu %d "
244 "failed (%d)\n", i, ret);
245 }
246
247 return 0;
248}
249subsys_initcall(topology_init);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c4c1312473fb..5ab6e89603c5 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2356,6 +2356,7 @@ config PCI
2356 depends on HW_HAS_PCI 2356 depends on HW_HAS_PCI
2357 select PCI_DOMAINS 2357 select PCI_DOMAINS
2358 select GENERIC_PCI_IOMAP 2358 select GENERIC_PCI_IOMAP
2359 select NO_GENERIC_PCI_IOPORT_MAP
2359 help 2360 help
2360 Find out whether you have a PCI motherboard. PCI is the name of a 2361 Find out whether you have a PCI motherboard. PCI is the name of a
2361 bus system, i.e. the way the CPU talks to the other stuff inside 2362 bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index 2635b1a96333..fd35daa45314 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -10,8 +10,8 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <asm/io.h> 11#include <asm/io.h>
12 12
13static void __iomem *ioport_map_pci(struct pci_dev *dev, 13void __iomem *__pci_ioport_map(struct pci_dev *dev,
14 unsigned long port, unsigned int nr) 14 unsigned long port, unsigned int nr)
15{ 15{
16 struct pci_controller *ctrl = dev->bus->sysdata; 16 struct pci_controller *ctrl = dev->bus->sysdata;
17 unsigned long base = ctrl->io_map_base; 17 unsigned long base = ctrl->io_map_base;
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 2156e077859b..1acf65026773 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -24,10 +24,6 @@ CONFIG_PPC_SPLPAR=y
24CONFIG_SCANLOG=m 24CONFIG_SCANLOG=m
25CONFIG_PPC_SMLPAR=y 25CONFIG_PPC_SMLPAR=y
26CONFIG_DTL=y 26CONFIG_DTL=y
27CONFIG_PPC_ISERIES=y
28CONFIG_VIODASD=y
29CONFIG_VIOCD=m
30CONFIG_VIOTAPE=m
31CONFIG_PPC_MAPLE=y 27CONFIG_PPC_MAPLE=y
32CONFIG_PPC_PASEMI=y 28CONFIG_PPC_PASEMI=y
33CONFIG_PPC_PASEMI_IOMMU=y 29CONFIG_PPC_PASEMI_IOMMU=y
@@ -259,7 +255,6 @@ CONFIG_PASEMI_MAC=y
259CONFIG_MLX4_EN=m 255CONFIG_MLX4_EN=m
260CONFIG_QLGE=m 256CONFIG_QLGE=m
261CONFIG_BE2NET=m 257CONFIG_BE2NET=m
262CONFIG_ISERIES_VETH=m
263CONFIG_PPP=m 258CONFIG_PPP=m
264CONFIG_PPP_ASYNC=m 259CONFIG_PPP_ASYNC=m
265CONFIG_PPP_SYNC_TTY=m 260CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 43268f15004e..6d422979ebaf 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -142,6 +142,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev)
142 return pdev ? pci_name(pdev) : "<null>"; 142 return pdev ? pci_name(pdev) : "<null>";
143} 143}
144 144
145static inline const char *eeh_driver_name(struct pci_dev *pdev)
146{
147 return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
148}
149
145#endif /* CONFIG_EEH */ 150#endif /* CONFIG_EEH */
146 151
147#else /* CONFIG_PCI */ 152#else /* CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 78a205162fd7..84cc7840cd18 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -83,8 +83,18 @@ struct pt_regs {
83 83
84#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
85 85
86#define instruction_pointer(regs) ((regs)->nip) 86#define GET_IP(regs) ((regs)->nip)
87#define user_stack_pointer(regs) ((regs)->gpr[1]) 87#define GET_USP(regs) ((regs)->gpr[1])
88#define GET_FP(regs) (0)
89#define SET_FP(regs, val)
90
91#ifdef CONFIG_SMP
92extern unsigned long profile_pc(struct pt_regs *regs);
93#define profile_pc profile_pc
94#endif
95
96#include <asm-generic/ptrace.h>
97
88#define kernel_stack_pointer(regs) ((regs)->gpr[1]) 98#define kernel_stack_pointer(regs) ((regs)->gpr[1])
89static inline int is_syscall_success(struct pt_regs *regs) 99static inline int is_syscall_success(struct pt_regs *regs)
90{ 100{
@@ -99,12 +109,6 @@ static inline long regs_return_value(struct pt_regs *regs)
99 return -regs->gpr[3]; 109 return -regs->gpr[3];
100} 110}
101 111
102#ifdef CONFIG_SMP
103extern unsigned long profile_pc(struct pt_regs *regs);
104#else
105#define profile_pc(regs) instruction_pointer(regs)
106#endif
107
108#ifdef __powerpc64__ 112#ifdef __powerpc64__
109#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) 113#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
110#else 114#else
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d4be7bb3dbdf..3844ca7c5099 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -775,7 +775,7 @@ program_check_common:
775 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 775 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
776 bl .save_nvgprs 776 bl .save_nvgprs
777 addi r3,r1,STACK_FRAME_OVERHEAD 777 addi r3,r1,STACK_FRAME_OVERHEAD
778 ENABLE_INTS 778 DISABLE_INTS
779 bl .program_check_exception 779 bl .program_check_exception
780 b .ret_from_except 780 b .ret_from_except
781 781
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 701d4aceb4f4..01e2877e8e04 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -118,10 +118,14 @@ static inline notrace void set_soft_enabled(unsigned long enable)
118static inline notrace void decrementer_check_overflow(void) 118static inline notrace void decrementer_check_overflow(void)
119{ 119{
120 u64 now = get_tb_or_rtc(); 120 u64 now = get_tb_or_rtc();
121 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 121 u64 *next_tb;
122
123 preempt_disable();
124 next_tb = &__get_cpu_var(decrementers_next_tb);
122 125
123 if (now >= *next_tb) 126 if (now >= *next_tb)
124 set_dec(1); 127 set_dec(1);
128 preempt_enable();
125} 129}
126 130
127notrace void arch_local_irq_restore(unsigned long en) 131notrace void arch_local_irq_restore(unsigned long en)
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 10a140f82cb8..64483fde95c6 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
865{ 865{
866 unsigned long flags; 866 unsigned long flags;
867 s64 left; 867 s64 left;
868 unsigned long val;
868 869
869 if (!event->hw.idx || !event->hw.sample_period) 870 if (!event->hw.idx || !event->hw.sample_period)
870 return; 871 return;
@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
880 881
881 event->hw.state = 0; 882 event->hw.state = 0;
882 left = local64_read(&event->hw.period_left); 883 left = local64_read(&event->hw.period_left);
883 write_pmc(event->hw.idx, left); 884
885 val = 0;
886 if (left < 0x80000000L)
887 val = 0x80000000L - left;
888
889 write_pmc(event->hw.idx, val);
884 890
885 perf_event_update_userpage(event); 891 perf_event_update_userpage(event);
886 perf_pmu_enable(event->pmu); 892 perf_pmu_enable(event->pmu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ebe5766781aa..d817ab018486 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -566,12 +566,12 @@ static void show_instructions(struct pt_regs *regs)
566 */ 566 */
567 if (!__kernel_text_address(pc) || 567 if (!__kernel_text_address(pc) ||
568 __get_user(instr, (unsigned int __user *)pc)) { 568 __get_user(instr, (unsigned int __user *)pc)) {
569 printk("XXXXXXXX "); 569 printk(KERN_CONT "XXXXXXXX ");
570 } else { 570 } else {
571 if (regs->nip == pc) 571 if (regs->nip == pc)
572 printk("<%08x> ", instr); 572 printk(KERN_CONT "<%08x> ", instr);
573 else 573 else
574 printk("%08x ", instr); 574 printk(KERN_CONT "%08x ", instr);
575 } 575 }
576 576
577 pc += sizeof(int); 577 pc += sizeof(int);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 517b1d8f455b..9f843cdfee9e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -716,7 +716,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
716 int cpu; 716 int cpu;
717 717
718 slb_set_size(SLB_MIN_SIZE); 718 slb_set_size(SLB_MIN_SIZE);
719 stop_topology_update();
720 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); 719 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
721 720
722 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && 721 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -732,7 +731,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
732 rc = atomic_read(&data->error); 731 rc = atomic_read(&data->error);
733 732
734 atomic_set(&data->error, rc); 733 atomic_set(&data->error, rc);
735 start_topology_update();
736 pSeries_coalesce_init(); 734 pSeries_coalesce_init();
737 735
738 if (wake_when_done) { 736 if (wake_when_done) {
@@ -846,6 +844,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
846 atomic_set(&data.error, 0); 844 atomic_set(&data.error, 0);
847 data.token = rtas_token("ibm,suspend-me"); 845 data.token = rtas_token("ibm,suspend-me");
848 data.complete = &done; 846 data.complete = &done;
847 stop_topology_update();
849 848
850 /* Call function on all CPUs. One of us will make the 849 /* Call function on all CPUs. One of us will make the
851 * rtas call 850 * rtas call
@@ -858,6 +857,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
858 if (atomic_read(&data.error) != 0) 857 if (atomic_read(&data.error) != 0)
859 printk(KERN_ERR "Error doing global join\n"); 858 printk(KERN_ERR "Error doing global join\n");
860 859
860 start_topology_update();
861
861 return atomic_read(&data.error); 862 return atomic_read(&data.error);
862} 863}
863#else /* CONFIG_PPC_PSERIES */ 864#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a70bc1e385eb..f92b9ef7340e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -52,32 +52,38 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
52 52
53static unsigned int pnv_get_one_msi(struct pnv_phb *phb) 53static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
54{ 54{
55 unsigned int id; 55 unsigned long flags;
56 unsigned int id, rc;
57
58 spin_lock_irqsave(&phb->lock, flags);
56 59
57 spin_lock(&phb->lock);
58 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next); 60 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
59 if (id >= phb->msi_count && phb->msi_next) 61 if (id >= phb->msi_count && phb->msi_next)
60 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0); 62 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
61 if (id >= phb->msi_count) { 63 if (id >= phb->msi_count) {
62 spin_unlock(&phb->lock); 64 rc = 0;
63 return 0; 65 goto out;
64 } 66 }
65 __set_bit(id, phb->msi_map); 67 __set_bit(id, phb->msi_map);
66 spin_unlock(&phb->lock); 68 rc = id + phb->msi_base;
67 return id + phb->msi_base; 69out:
70 spin_unlock_irqrestore(&phb->lock, flags);
71 return rc;
68} 72}
69 73
70static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq) 74static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
71{ 75{
76 unsigned long flags;
72 unsigned int id; 77 unsigned int id;
73 78
74 if (WARN_ON(hwirq < phb->msi_base || 79 if (WARN_ON(hwirq < phb->msi_base ||
75 hwirq >= (phb->msi_base + phb->msi_count))) 80 hwirq >= (phb->msi_base + phb->msi_count)))
76 return; 81 return;
77 id = hwirq - phb->msi_base; 82 id = hwirq - phb->msi_base;
78 spin_lock(&phb->lock); 83
84 spin_lock_irqsave(&phb->lock, flags);
79 __clear_bit(id, phb->msi_map); 85 __clear_bit(id, phb->msi_map);
80 spin_unlock(&phb->lock); 86 spin_unlock_irqrestore(&phb->lock, flags);
81} 87}
82 88
83static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 89static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 565869022e3d..c0b40af4ce4f 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -551,9 +551,9 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
551 printk (KERN_ERR "EEH: %d reads ignored for recovering device at " 551 printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
552 "location=%s driver=%s pci addr=%s\n", 552 "location=%s driver=%s pci addr=%s\n",
553 pdn->eeh_check_count, location, 553 pdn->eeh_check_count, location,
554 dev->driver->name, eeh_pci_name(dev)); 554 eeh_driver_name(dev), eeh_pci_name(dev));
555 printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", 555 printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
556 dev->driver->name); 556 eeh_driver_name(dev));
557 dump_stack(); 557 dump_stack();
558 } 558 }
559 goto dn_unlock; 559 goto dn_unlock;
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index b84a8b2238dd..47226e04126d 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -24,6 +24,7 @@
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/mmu.h> 25#include <asm/mmu.h>
26#include <asm/rtas.h> 26#include <asm/rtas.h>
27#include <asm/topology.h>
27 28
28static u64 stream_id; 29static u64 stream_id;
29static struct device suspend_dev; 30static struct device suspend_dev;
@@ -138,8 +139,11 @@ static ssize_t store_hibernate(struct device *dev,
138 ssleep(1); 139 ssleep(1);
139 } while (rc == -EAGAIN); 140 } while (rc == -EAGAIN);
140 141
141 if (!rc) 142 if (!rc) {
143 stop_topology_update();
142 rc = pm_suspend(PM_SUSPEND_MEM); 144 rc = pm_suspend(PM_SUSPEND_MEM);
145 start_topology_update();
146 }
143 147
144 stream_id = 0; 148 stream_id = 0;
145 149
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 576874392543..97fe82ee8633 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -346,7 +346,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
346 * For the moment only implement delivery to all cpus or one cpu. 346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq 347 * Get current irq_server for the given irq
348 */ 348 */
349 ret = cache_hwirq_map(ics, d->irq, cpumask); 349 ret = cache_hwirq_map(ics, hw_irq, cpumask);
350 if (ret == -1) { 350 if (ret == -1) {
351 char cpulist[128]; 351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); 352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index e0262cd0e2d3..d24b3acf858e 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -468,15 +468,15 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
468#define DUMP_REG(x) \ 468#define DUMP_REG(x) \
469 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) 469 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
470 470
471#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS 471 /*
472 /* WSP DD1 has a bogus class code by default in the PCI-E 472 * Some WSP variants has a bogus class code by default in the PCI-E
473 * root complex's built-in P2P bridge */ 473 * root complex's built-in P2P bridge
474 */
474 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); 475 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
475 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); 476 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
476 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, 477 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
477 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); 478 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
478 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); 479 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
479#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
480 480
481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS 481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
482 /* XXX Disable TCE caching, it doesn't work on DD1 */ 482 /* XXX Disable TCE caching, it doesn't work on DD1 */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 30eb17ecad49..6073288fed29 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -385,26 +385,36 @@ static void __init setup_pci_cmd(struct pci_controller *hose)
385void fsl_pcibios_fixup_bus(struct pci_bus *bus) 385void fsl_pcibios_fixup_bus(struct pci_bus *bus)
386{ 386{
387 struct pci_controller *hose = pci_bus_to_host(bus); 387 struct pci_controller *hose = pci_bus_to_host(bus);
388 int i; 388 int i, is_pcie = 0, no_link;
389 389
390 if ((bus->parent == hose->bus) && 390 /* The root complex bridge comes up with bogus resources,
391 ((fsl_pcie_bus_fixup && 391 * we copy the PHB ones in.
392 early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) || 392 *
393 (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK))) 393 * With the current generic PCI code, the PHB bus no longer
394 { 394 * has bus->resource[0..4] set, so things are a bit more
395 for (i = 0; i < 4; ++i) { 395 * tricky.
396 */
397
398 if (fsl_pcie_bus_fixup)
399 is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
400 no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
401
402 if (bus->parent == hose->bus && (is_pcie || no_link)) {
403 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
396 struct resource *res = bus->resource[i]; 404 struct resource *res = bus->resource[i];
397 struct resource *par = bus->parent->resource[i]; 405 struct resource *par;
398 if (res) { 406
399 res->start = 0; 407 if (!res)
400 res->end = 0; 408 continue;
401 res->flags = 0; 409 if (i == 0)
402 } 410 par = &hose->io_resource;
403 if (res && par) { 411 else if (i < 4)
404 res->start = par->start; 412 par = &hose->mem_resources[i-1];
405 res->end = par->end; 413 else par = NULL;
406 res->flags = par->flags; 414
407 } 415 res->start = par ? par->start : 0;
416 res->end = par ? par->end : 0;
417 res->flags = par ? par->flags : 0;
408 } 418 }
409 } 419 }
410} 420}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3c8db65c89e5..713fb58ca507 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -859,6 +859,7 @@ config PCI
859 depends on SYS_SUPPORTS_PCI 859 depends on SYS_SUPPORTS_PCI
860 select PCI_DOMAINS 860 select PCI_DOMAINS
861 select GENERIC_PCI_IOMAP 861 select GENERIC_PCI_IOMAP
862 select NO_GENERIC_PCI_IOPORT_MAP
862 help 863 help
863 Find out whether you have a PCI motherboard. PCI is the name of a 864 Find out whether you have a PCI motherboard. PCI is the name of a
864 bus system, i.e. the way the CPU talks to the other stuff inside 865 bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 8f18dd090a66..1e7b0e2e764d 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -356,8 +356,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
356 356
357#ifndef CONFIG_GENERIC_IOMAP 357#ifndef CONFIG_GENERIC_IOMAP
358 358
359static void __iomem *ioport_map_pci(struct pci_dev *dev, 359void __iomem *__pci_ioport_map(struct pci_dev *dev,
360 unsigned long port, unsigned int nr) 360 unsigned long port, unsigned int nr)
361{ 361{
362 struct pci_channel *chan = dev->sysdata; 362 struct pci_channel *chan = dev->sysdata;
363 363
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96657992a72e..ca5580e4d813 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -33,6 +33,7 @@ config SPARC
33config SPARC32 33config SPARC32
34 def_bool !64BIT 34 def_bool !64BIT
35 select GENERIC_ATOMIC64 35 select GENERIC_ATOMIC64
36 select CLZ_TAB
36 37
37config SPARC64 38config SPARC64
38 def_bool 64BIT 39 def_bool 64BIT
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index 681b3683da9e..d74bc0925f2d 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -17,23 +17,9 @@ along with GNU CC; see the file COPYING. If not, write to
17the Free Software Foundation, 59 Temple Place - Suite 330, 17the Free Software Foundation, 59 Temple Place - Suite 330,
18Boston, MA 02111-1307, USA. */ 18Boston, MA 02111-1307, USA. */
19 19
20 .data
21 .align 8
22 .globl __clz_tab
23__clz_tab:
24 .byte 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
25 .byte 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6
26 .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
27 .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
28 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
29 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
30 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
31 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
32 .size __clz_tab,256
33 .global .udiv
34
35 .text 20 .text
36 .align 4 21 .align 4
22 .global .udiv
37 .globl __divdi3 23 .globl __divdi3
38__divdi3: 24__divdi3:
39 save %sp,-104,%sp 25 save %sp,-104,%sp
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 0c9fa2745f13..b3b733262909 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -145,13 +145,13 @@ extern void __add_wrong_size(void)
145 145
146#ifdef __HAVE_ARCH_CMPXCHG 146#ifdef __HAVE_ARCH_CMPXCHG
147#define cmpxchg(ptr, old, new) \ 147#define cmpxchg(ptr, old, new) \
148 __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 148 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
149 149
150#define sync_cmpxchg(ptr, old, new) \ 150#define sync_cmpxchg(ptr, old, new) \
151 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 151 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
152 152
153#define cmpxchg_local(ptr, old, new) \ 153#define cmpxchg_local(ptr, old, new) \
154 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 154 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
155#endif 155#endif
156 156
157/* 157/*
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 6919e936345b..a850b4d8d14d 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size;
29extern void fpu_init(void); 29extern void fpu_init(void);
30extern void mxcsr_feature_mask_init(void); 30extern void mxcsr_feature_mask_init(void);
31extern int init_fpu(struct task_struct *child); 31extern int init_fpu(struct task_struct *child);
32extern asmlinkage void math_state_restore(void); 32extern void __math_state_restore(struct task_struct *);
33extern void __math_state_restore(void); 33extern void math_state_restore(void);
34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
35 35
36extern user_regset_active_fn fpregs_active, xfpregs_active; 36extern user_regset_active_fn fpregs_active, xfpregs_active;
@@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
212 212
213#endif /* CONFIG_X86_64 */ 213#endif /* CONFIG_X86_64 */
214 214
215/* We need a safe address that is cheap to find and that is already
216 in L1 during context switch. The best choices are unfortunately
217 different for UP and SMP */
218#ifdef CONFIG_SMP
219#define safe_address (__per_cpu_offset[0])
220#else
221#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
222#endif
223
224/* 215/*
225 * These must be called with preempt disabled 216 * These must be called with preempt disabled. Returns
217 * 'true' if the FPU state is still intact.
226 */ 218 */
227static inline void fpu_save_init(struct fpu *fpu) 219static inline int fpu_save_init(struct fpu *fpu)
228{ 220{
229 if (use_xsave()) { 221 if (use_xsave()) {
230 fpu_xsave(fpu); 222 fpu_xsave(fpu);
@@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu)
233 * xsave header may indicate the init state of the FP. 225 * xsave header may indicate the init state of the FP.
234 */ 226 */
235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 227 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
236 return; 228 return 1;
237 } else if (use_fxsr()) { 229 } else if (use_fxsr()) {
238 fpu_fxsave(fpu); 230 fpu_fxsave(fpu);
239 } else { 231 } else {
240 asm volatile("fnsave %[fx]; fwait" 232 asm volatile("fnsave %[fx]; fwait"
241 : [fx] "=m" (fpu->state->fsave)); 233 : [fx] "=m" (fpu->state->fsave));
242 return; 234 return 0;
243 } 235 }
244 236
245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) 237 /*
238 * If exceptions are pending, we need to clear them so
239 * that we don't randomly get exceptions later.
240 *
241 * FIXME! Is this perhaps only true for the old-style
242 * irq13 case? Maybe we could leave the x87 state
243 * intact otherwise?
244 */
245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
246 asm volatile("fnclex"); 246 asm volatile("fnclex");
247 247 return 0;
248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 248 }
249 is pending. Clear the x87 state here by setting it to fixed 249 return 1;
250 values. safe_address is a random variable that should be in L1 */
251 alternative_input(
252 ASM_NOP8 ASM_NOP2,
253 "emms\n\t" /* clear stack tags */
254 "fildl %P[addr]", /* set F?P to defined value */
255 X86_FEATURE_FXSAVE_LEAK,
256 [addr] "m" (safe_address));
257} 250}
258 251
259static inline void __save_init_fpu(struct task_struct *tsk) 252static inline int __save_init_fpu(struct task_struct *tsk)
260{ 253{
261 fpu_save_init(&tsk->thread.fpu); 254 return fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263} 255}
264 256
265static inline int fpu_fxrstor_checking(struct fpu *fpu) 257static inline int fpu_fxrstor_checking(struct fpu *fpu)
@@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
281} 273}
282 274
283/* 275/*
284 * Signal frame handlers... 276 * Software FPU state helpers. Careful: these need to
277 * be preemption protection *and* they need to be
278 * properly paired with the CR0.TS changes!
285 */ 279 */
286extern int save_i387_xstate(void __user *buf); 280static inline int __thread_has_fpu(struct task_struct *tsk)
287extern int restore_i387_xstate(void __user *buf); 281{
282 return tsk->thread.has_fpu;
283}
288 284
289static inline void __unlazy_fpu(struct task_struct *tsk) 285/* Must be paired with an 'stts' after! */
286static inline void __thread_clear_has_fpu(struct task_struct *tsk)
290{ 287{
291 if (task_thread_info(tsk)->status & TS_USEDFPU) { 288 tsk->thread.has_fpu = 0;
292 __save_init_fpu(tsk); 289}
293 stts(); 290
294 } else 291/* Must be paired with a 'clts' before! */
295 tsk->fpu_counter = 0; 292static inline void __thread_set_has_fpu(struct task_struct *tsk)
293{
294 tsk->thread.has_fpu = 1;
296} 295}
297 296
297/*
298 * Encapsulate the CR0.TS handling together with the
299 * software flag.
300 *
301 * These generally need preemption protection to work,
302 * do try to avoid using these on their own.
303 */
304static inline void __thread_fpu_end(struct task_struct *tsk)
305{
306 __thread_clear_has_fpu(tsk);
307 stts();
308}
309
310static inline void __thread_fpu_begin(struct task_struct *tsk)
311{
312 clts();
313 __thread_set_has_fpu(tsk);
314}
315
316/*
317 * FPU state switching for scheduling.
318 *
319 * This is a two-stage process:
320 *
321 * - switch_fpu_prepare() saves the old state and
322 * sets the new state of the CR0.TS bit. This is
323 * done within the context of the old process.
324 *
325 * - switch_fpu_finish() restores the new state as
326 * necessary.
327 */
328typedef struct { int preload; } fpu_switch_t;
329
330/*
331 * FIXME! We could do a totally lazy restore, but we need to
332 * add a per-cpu "this was the task that last touched the FPU
333 * on this CPU" variable, and the task needs to have a "I last
334 * touched the FPU on this CPU" and check them.
335 *
336 * We don't do that yet, so "fpu_lazy_restore()" always returns
337 * false, but some day..
338 */
339#define fpu_lazy_restore(tsk) (0)
340#define fpu_lazy_state_intact(tsk) do { } while (0)
341
342static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
343{
344 fpu_switch_t fpu;
345
346 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
347 if (__thread_has_fpu(old)) {
348 if (__save_init_fpu(old))
349 fpu_lazy_state_intact(old);
350 __thread_clear_has_fpu(old);
351 old->fpu_counter++;
352
353 /* Don't change CR0.TS if we just switch! */
354 if (fpu.preload) {
355 __thread_set_has_fpu(new);
356 prefetch(new->thread.fpu.state);
357 } else
358 stts();
359 } else {
360 old->fpu_counter = 0;
361 if (fpu.preload) {
362 if (fpu_lazy_restore(new))
363 fpu.preload = 0;
364 else
365 prefetch(new->thread.fpu.state);
366 __thread_fpu_begin(new);
367 }
368 }
369 return fpu;
370}
371
372/*
373 * By the time this gets called, we've already cleared CR0.TS and
374 * given the process the FPU if we are going to preload the FPU
375 * state - all we need to do is to conditionally restore the register
376 * state itself.
377 */
378static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
379{
380 if (fpu.preload)
381 __math_state_restore(new);
382}
383
384/*
385 * Signal frame handlers...
386 */
387extern int save_i387_xstate(void __user *buf);
388extern int restore_i387_xstate(void __user *buf);
389
298static inline void __clear_fpu(struct task_struct *tsk) 390static inline void __clear_fpu(struct task_struct *tsk)
299{ 391{
300 if (task_thread_info(tsk)->status & TS_USEDFPU) { 392 if (__thread_has_fpu(tsk)) {
301 /* Ignore delayed exceptions from user space */ 393 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n" 394 asm volatile("1: fwait\n"
303 "2:\n" 395 "2:\n"
304 _ASM_EXTABLE(1b, 2b)); 396 _ASM_EXTABLE(1b, 2b));
305 task_thread_info(tsk)->status &= ~TS_USEDFPU; 397 __thread_fpu_end(tsk);
306 stts();
307 } 398 }
308} 399}
309 400
401/*
402 * Were we in an interrupt that interrupted kernel mode?
403 *
404 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
405 * pair does nothing at all: the thread must not have fpu (so
406 * that we don't try to save the FPU state), and TS must
407 * be set (so that the clts/stts pair does nothing that is
408 * visible in the interrupted kernel thread).
409 */
410static inline bool interrupted_kernel_fpu_idle(void)
411{
412 return !__thread_has_fpu(current) &&
413 (read_cr0() & X86_CR0_TS);
414}
415
416/*
417 * Were we in user mode (or vm86 mode) when we were
418 * interrupted?
419 *
420 * Doing kernel_fpu_begin/end() is ok if we are running
421 * in an interrupt context from user mode - we'll just
422 * save the FPU state as required.
423 */
424static inline bool interrupted_user_mode(void)
425{
426 struct pt_regs *regs = get_irq_regs();
427 return regs && user_mode_vm(regs);
428}
429
430/*
431 * Can we use the FPU in kernel mode with the
432 * whole "kernel_fpu_begin/end()" sequence?
433 *
434 * It's always ok in process context (ie "not interrupt")
435 * but it is sometimes ok even from an irq.
436 */
437static inline bool irq_fpu_usable(void)
438{
439 return !in_interrupt() ||
440 interrupted_user_mode() ||
441 interrupted_kernel_fpu_idle();
442}
443
310static inline void kernel_fpu_begin(void) 444static inline void kernel_fpu_begin(void)
311{ 445{
312 struct thread_info *me = current_thread_info(); 446 struct task_struct *me = current;
447
448 WARN_ON_ONCE(!irq_fpu_usable());
313 preempt_disable(); 449 preempt_disable();
314 if (me->status & TS_USEDFPU) 450 if (__thread_has_fpu(me)) {
315 __save_init_fpu(me->task); 451 __save_init_fpu(me);
316 else 452 __thread_clear_has_fpu(me);
453 /* We do 'stts()' in kernel_fpu_end() */
454 } else
317 clts(); 455 clts();
318} 456}
319 457
@@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void)
323 preempt_enable(); 461 preempt_enable();
324} 462}
325 463
326static inline bool irq_fpu_usable(void)
327{
328 struct pt_regs *regs;
329
330 return !in_interrupt() || !(regs = get_irq_regs()) || \
331 user_mode(regs) || (read_cr0() & X86_CR0_TS);
332}
333
334/* 464/*
335 * Some instructions like VIA's padlock instructions generate a spurious 465 * Some instructions like VIA's padlock instructions generate a spurious
336 * DNA fault but don't modify SSE registers. And these instructions 466 * DNA fault but don't modify SSE registers. And these instructions
@@ -363,20 +493,64 @@ static inline void irq_ts_restore(int TS_state)
363} 493}
364 494
365/* 495/*
496 * The question "does this thread have fpu access?"
497 * is slightly racy, since preemption could come in
498 * and revoke it immediately after the test.
499 *
500 * However, even in that very unlikely scenario,
501 * we can just assume we have FPU access - typically
502 * to save the FP state - we'll just take a #NM
503 * fault and get the FPU access back.
504 *
505 * The actual user_fpu_begin/end() functions
506 * need to be preemption-safe, though.
507 *
508 * NOTE! user_fpu_end() must be used only after you
509 * have saved the FP state, and user_fpu_begin() must
510 * be used only immediately before restoring it.
511 * These functions do not do any save/restore on
512 * their own.
513 */
514static inline int user_has_fpu(void)
515{
516 return __thread_has_fpu(current);
517}
518
519static inline void user_fpu_end(void)
520{
521 preempt_disable();
522 __thread_fpu_end(current);
523 preempt_enable();
524}
525
526static inline void user_fpu_begin(void)
527{
528 preempt_disable();
529 if (!user_has_fpu())
530 __thread_fpu_begin(current);
531 preempt_enable();
532}
533
534/*
366 * These disable preemption on their own and are safe 535 * These disable preemption on their own and are safe
367 */ 536 */
368static inline void save_init_fpu(struct task_struct *tsk) 537static inline void save_init_fpu(struct task_struct *tsk)
369{ 538{
539 WARN_ON_ONCE(!__thread_has_fpu(tsk));
370 preempt_disable(); 540 preempt_disable();
371 __save_init_fpu(tsk); 541 __save_init_fpu(tsk);
372 stts(); 542 __thread_fpu_end(tsk);
373 preempt_enable(); 543 preempt_enable();
374} 544}
375 545
376static inline void unlazy_fpu(struct task_struct *tsk) 546static inline void unlazy_fpu(struct task_struct *tsk)
377{ 547{
378 preempt_disable(); 548 preempt_disable();
379 __unlazy_fpu(tsk); 549 if (__thread_has_fpu(tsk)) {
550 __save_init_fpu(tsk);
551 __thread_fpu_end(tsk);
552 } else
553 tsk->fpu_counter = 0;
380 preempt_enable(); 554 preempt_enable();
381} 555}
382 556
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index ab4092e3214e..7b9cfc4878af 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -190,6 +190,9 @@ struct x86_emulate_ops {
190 int (*intercept)(struct x86_emulate_ctxt *ctxt, 190 int (*intercept)(struct x86_emulate_ctxt *ctxt,
191 struct x86_instruction_info *info, 191 struct x86_instruction_info *info,
192 enum x86_intercept_stage stage); 192 enum x86_intercept_stage stage);
193
194 bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
195 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
193}; 196};
194 197
195typedef u32 __attribute__((vector_size(16))) sse128_t; 198typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt {
298#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ 301#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
299 X86EMUL_MODE_PROT64) 302 X86EMUL_MODE_PROT64)
300 303
304/* CPUID vendors */
305#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
306#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
307#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
308
309#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
310#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
311#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
312
313#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
314#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
315#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
316
301enum x86_intercept_stage { 317enum x86_intercept_stage {
302 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ 318 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
303 X86_ICPT_PRE_EXCEPT, 319 X86_ICPT_PRE_EXCEPT,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index aa9088c26931..f7c89e231c6c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,6 +454,7 @@ struct thread_struct {
454 unsigned long trap_no; 454 unsigned long trap_no;
455 unsigned long error_code; 455 unsigned long error_code;
456 /* floating point and extended processor state */ 456 /* floating point and extended processor state */
457 unsigned long has_fpu;
457 struct fpu fpu; 458 struct fpu fpu;
458#ifdef CONFIG_X86_32 459#ifdef CONFIG_X86_32
459 /* Virtual 86 mode info */ 460 /* Virtual 86 mode info */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index bc817cd8b443..cfd8144d5527 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void)
247 * ever touches our thread-synchronous status, so we don't 247 * ever touches our thread-synchronous status, so we don't
248 * have to worry about atomic accesses. 248 * have to worry about atomic accesses.
249 */ 249 */
250#define TS_USEDFPU 0x0001 /* FPU was used by this task
251 this quantum (SMP) */
252#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 250#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
253#define TS_POLLING 0x0004 /* idle task polling need_resched, 251#define TS_POLLING 0x0004 /* idle task polling need_resched,
254 skip sending interrupt */ 252 skip sending interrupt */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 73da6b64f5b7..d6bd49faa40c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event)
439 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 439 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
440 440
441 cpuc->pebs_enabled |= 1ULL << hwc->idx; 441 cpuc->pebs_enabled |= 1ULL << hwc->idx;
442 WARN_ON_ONCE(cpuc->enabled);
443 442
444 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) 443 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
445 intel_pmu_lbr_enable(event); 444 intel_pmu_lbr_enable(event);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 3fab3de3ce96..47a7e63bfe54 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
72 if (!x86_pmu.lbr_nr) 72 if (!x86_pmu.lbr_nr)
73 return; 73 return;
74 74
75 WARN_ON_ONCE(cpuc->enabled);
76
77 /* 75 /*
78 * Reset the LBR stack if we changed task context to 76 * Reset the LBR stack if we changed task context to
79 * avoid data leaks. 77 * avoid data leaks.
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 1aae78f775fc..4025fe4f928f 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -252,7 +252,8 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
252 unsigned short ss; 252 unsigned short ss;
253 unsigned long sp; 253 unsigned long sp;
254#endif 254#endif
255 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 255 printk(KERN_DEFAULT
256 "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
256#ifdef CONFIG_PREEMPT 257#ifdef CONFIG_PREEMPT
257 printk("PREEMPT "); 258 printk("PREEMPT ");
258#endif 259#endif
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6d728d9284bd..17107bd6e1f0 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -129,7 +129,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
129 if (!stack) { 129 if (!stack) {
130 if (regs) 130 if (regs)
131 stack = (unsigned long *)regs->sp; 131 stack = (unsigned long *)regs->sp;
132 else if (task && task != current) 132 else if (task != current)
133 stack = (unsigned long *)task->thread.sp; 133 stack = (unsigned long *)task->thread.sp;
134 else 134 else
135 stack = &dummy; 135 stack = &dummy;
@@ -269,11 +269,11 @@ void show_registers(struct pt_regs *regs)
269 unsigned char c; 269 unsigned char c;
270 u8 *ip; 270 u8 *ip;
271 271
272 printk(KERN_EMERG "Stack:\n"); 272 printk(KERN_DEFAULT "Stack:\n");
273 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 273 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
274 0, KERN_EMERG); 274 0, KERN_DEFAULT);
275 275
276 printk(KERN_EMERG "Code: "); 276 printk(KERN_DEFAULT "Code: ");
277 277
278 ip = (u8 *)regs->ip - code_prologue; 278 ip = (u8 *)regs->ip - code_prologue;
279 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { 279 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 485204f58cda..80bfe1ab0031 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -299,22 +299,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
299 *next = &next_p->thread; 299 *next = &next_p->thread;
300 int cpu = smp_processor_id(); 300 int cpu = smp_processor_id();
301 struct tss_struct *tss = &per_cpu(init_tss, cpu); 301 struct tss_struct *tss = &per_cpu(init_tss, cpu);
302 bool preload_fpu; 302 fpu_switch_t fpu;
303 303
304 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 304 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
305 305
306 /* 306 fpu = switch_fpu_prepare(prev_p, next_p);
307 * If the task has used fpu the last 5 timeslices, just do a full
308 * restore of the math state immediately to avoid the trap; the
309 * chances of needing FPU soon are obviously high now
310 */
311 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
312
313 __unlazy_fpu(prev_p);
314
315 /* we're going to use this soon, after a few expensive things */
316 if (preload_fpu)
317 prefetch(next->fpu.state);
318 307
319 /* 308 /*
320 * Reload esp0. 309 * Reload esp0.
@@ -354,11 +343,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
354 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 343 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
355 __switch_to_xtra(prev_p, next_p, tss); 344 __switch_to_xtra(prev_p, next_p, tss);
356 345
357 /* If we're going to preload the fpu context, make sure clts
358 is run while we're batching the cpu state updates. */
359 if (preload_fpu)
360 clts();
361
362 /* 346 /*
363 * Leave lazy mode, flushing any hypercalls made here. 347 * Leave lazy mode, flushing any hypercalls made here.
364 * This must be done before restoring TLS segments so 348 * This must be done before restoring TLS segments so
@@ -368,15 +352,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
368 */ 352 */
369 arch_end_context_switch(next_p); 353 arch_end_context_switch(next_p);
370 354
371 if (preload_fpu)
372 __math_state_restore();
373
374 /* 355 /*
375 * Restore %gs if needed (which is common) 356 * Restore %gs if needed (which is common)
376 */ 357 */
377 if (prev->gs | next->gs) 358 if (prev->gs | next->gs)
378 lazy_load_gs(next->gs); 359 lazy_load_gs(next->gs);
379 360
361 switch_fpu_finish(next_p, fpu);
362
380 percpu_write(current_task, next_p); 363 percpu_write(current_task, next_p);
381 364
382 return prev_p; 365 return prev_p;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a85c87..1fd94bc4279d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -386,18 +386,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 int cpu = smp_processor_id(); 386 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 387 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 388 unsigned fsindex, gsindex;
389 bool preload_fpu; 389 fpu_switch_t fpu;
390 390
391 /* 391 fpu = switch_fpu_prepare(prev_p, next_p);
392 * If the task has used fpu the last 5 timeslices, just do a full
393 * restore of the math state immediately to avoid the trap; the
394 * chances of needing FPU soon are obviously high now
395 */
396 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
397
398 /* we're going to use this soon, after a few expensive things */
399 if (preload_fpu)
400 prefetch(next->fpu.state);
401 392
402 /* 393 /*
403 * Reload esp0, LDT and the page table pointer: 394 * Reload esp0, LDT and the page table pointer:
@@ -427,13 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
427 418
428 load_TLS(next, cpu); 419 load_TLS(next, cpu);
429 420
430 /* Must be after DS reload */
431 __unlazy_fpu(prev_p);
432
433 /* Make sure cpu is ready for new context */
434 if (preload_fpu)
435 clts();
436
437 /* 421 /*
438 * Leave lazy mode, flushing any hypercalls made here. 422 * Leave lazy mode, flushing any hypercalls made here.
439 * This must be done before restoring TLS segments so 423 * This must be done before restoring TLS segments so
@@ -474,6 +458,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
474 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 458 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
475 prev->gsindex = gsindex; 459 prev->gsindex = gsindex;
476 460
461 switch_fpu_finish(next_p, fpu);
462
477 /* 463 /*
478 * Switch the PDA and FPU contexts. 464 * Switch the PDA and FPU contexts.
479 */ 465 */
@@ -492,13 +478,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
492 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 478 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
493 __switch_to_xtra(prev_p, next_p, tss); 479 __switch_to_xtra(prev_p, next_p, tss);
494 480
495 /*
496 * Preload the FPU context, now that we've determined that the
497 * task is likely to be using it.
498 */
499 if (preload_fpu)
500 __math_state_restore();
501
502 return prev_p; 481 return prev_p;
503} 482}
504 483
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 37a458b521a6..d840e69a853c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -39,6 +39,14 @@ static int reboot_mode;
39enum reboot_type reboot_type = BOOT_ACPI; 39enum reboot_type reboot_type = BOOT_ACPI;
40int reboot_force; 40int reboot_force;
41 41
42/* This variable is used privately to keep track of whether or not
43 * reboot_type is still set to its default value (i.e., reboot= hasn't
44 * been set on the command line). This is needed so that we can
45 * suppress DMI scanning for reboot quirks. Without it, it's
46 * impossible to override a faulty reboot quirk without recompiling.
47 */
48static int reboot_default = 1;
49
42#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 50#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
43static int reboot_cpu = -1; 51static int reboot_cpu = -1;
44#endif 52#endif
@@ -67,6 +75,12 @@ bool port_cf9_safe = false;
67static int __init reboot_setup(char *str) 75static int __init reboot_setup(char *str)
68{ 76{
69 for (;;) { 77 for (;;) {
78 /* Having anything passed on the command line via
79 * reboot= will cause us to disable DMI checking
80 * below.
81 */
82 reboot_default = 0;
83
70 switch (*str) { 84 switch (*str) {
71 case 'w': 85 case 'w':
72 reboot_mode = 0x1234; 86 reboot_mode = 0x1234;
@@ -295,14 +309,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
295 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 309 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
296 }, 310 },
297 }, 311 },
298 { /* Handle problems with rebooting on VersaLogic Menlow boards */
299 .callback = set_bios_reboot,
300 .ident = "VersaLogic Menlow based board",
301 .matches = {
302 DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
303 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
304 },
305 },
306 { /* Handle reboot issue on Acer Aspire one */ 312 { /* Handle reboot issue on Acer Aspire one */
307 .callback = set_kbd_reboot, 313 .callback = set_kbd_reboot,
308 .ident = "Acer Aspire One A110", 314 .ident = "Acer Aspire One A110",
@@ -316,7 +322,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
316 322
317static int __init reboot_init(void) 323static int __init reboot_init(void)
318{ 324{
319 dmi_check_system(reboot_dmi_table); 325 /* Only do the DMI check if reboot_type hasn't been overridden
326 * on the command line
327 */
328 if (reboot_default) {
329 dmi_check_system(reboot_dmi_table);
330 }
320 return 0; 331 return 0;
321} 332}
322core_initcall(reboot_init); 333core_initcall(reboot_init);
@@ -465,7 +476,12 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
465 476
466static int __init pci_reboot_init(void) 477static int __init pci_reboot_init(void)
467{ 478{
468 dmi_check_system(pci_reboot_dmi_table); 479 /* Only do the DMI check if reboot_type hasn't been overridden
480 * on the command line
481 */
482 if (reboot_default) {
483 dmi_check_system(pci_reboot_dmi_table);
484 }
469 return 0; 485 return 0;
470} 486}
471core_initcall(pci_reboot_init); 487core_initcall(pci_reboot_init);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 482ec3af2067..77da5b475ad2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -571,25 +571,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
571} 571}
572 572
573/* 573/*
574 * __math_state_restore assumes that cr0.TS is already clear and the 574 * This gets called with the process already owning the
575 * fpu state is all ready for use. Used during context switch. 575 * FPU state, and with CR0.TS cleared. It just needs to
576 * restore the FPU register state.
576 */ 577 */
577void __math_state_restore(void) 578void __math_state_restore(struct task_struct *tsk)
578{ 579{
579 struct thread_info *thread = current_thread_info(); 580 /* We need a safe address that is cheap to find and that is already
580 struct task_struct *tsk = thread->task; 581 in L1. We've just brought in "tsk->thread.has_fpu", so use that */
582#define safe_address (tsk->thread.has_fpu)
583
584 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
585 is pending. Clear the x87 state here by setting it to fixed
586 values. safe_address is a random variable that should be in L1 */
587 alternative_input(
588 ASM_NOP8 ASM_NOP2,
589 "emms\n\t" /* clear stack tags */
590 "fildl %P[addr]", /* set F?P to defined value */
591 X86_FEATURE_FXSAVE_LEAK,
592 [addr] "m" (safe_address));
581 593
582 /* 594 /*
583 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 595 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
584 */ 596 */
585 if (unlikely(restore_fpu_checking(tsk))) { 597 if (unlikely(restore_fpu_checking(tsk))) {
586 stts(); 598 __thread_fpu_end(tsk);
587 force_sig(SIGSEGV, tsk); 599 force_sig(SIGSEGV, tsk);
588 return; 600 return;
589 } 601 }
590
591 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
592 tsk->fpu_counter++;
593} 602}
594 603
595/* 604/*
@@ -599,13 +608,12 @@ void __math_state_restore(void)
599 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 608 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
600 * Don't touch unless you *really* know how it works. 609 * Don't touch unless you *really* know how it works.
601 * 610 *
602 * Must be called with kernel preemption disabled (in this case, 611 * Must be called with kernel preemption disabled (eg with local
603 * local interrupts are disabled at the call-site in entry.S). 612 * local interrupts as in the case of do_device_not_available).
604 */ 613 */
605asmlinkage void math_state_restore(void) 614void math_state_restore(void)
606{ 615{
607 struct thread_info *thread = current_thread_info(); 616 struct task_struct *tsk = current;
608 struct task_struct *tsk = thread->task;
609 617
610 if (!tsk_used_math(tsk)) { 618 if (!tsk_used_math(tsk)) {
611 local_irq_enable(); 619 local_irq_enable();
@@ -622,9 +630,10 @@ asmlinkage void math_state_restore(void)
622 local_irq_disable(); 630 local_irq_disable();
623 } 631 }
624 632
625 clts(); /* Allow maths ops (or we recurse) */ 633 __thread_fpu_begin(tsk);
634 __math_state_restore(tsk);
626 635
627 __math_state_restore(); 636 tsk->fpu_counter++;
628} 637}
629EXPORT_SYMBOL_GPL(math_state_restore); 638EXPORT_SYMBOL_GPL(math_state_restore);
630 639
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a3911343976b..711091114119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
47 if (!fx) 47 if (!fx)
48 return; 48 return;
49 49
50 BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); 50 BUG_ON(__thread_has_fpu(tsk));
51 51
52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; 52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
53 53
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
168 if (!used_math()) 168 if (!used_math())
169 return 0; 169 return 0;
170 170
171 if (task_thread_info(tsk)->status & TS_USEDFPU) { 171 if (user_has_fpu()) {
172 if (use_xsave()) 172 if (use_xsave())
173 err = xsave_user(buf); 173 err = xsave_user(buf);
174 else 174 else
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
176 176
177 if (err) 177 if (err)
178 return err; 178 return err;
179 task_thread_info(tsk)->status &= ~TS_USEDFPU; 179 user_fpu_end();
180 stts();
181 } else { 180 } else {
182 sanitize_i387_state(tsk); 181 sanitize_i387_state(tsk);
183 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, 182 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
292 return err; 291 return err;
293 } 292 }
294 293
295 if (!(task_thread_info(current)->status & TS_USEDFPU)) { 294 user_fpu_begin();
296 clts();
297 task_thread_info(current)->status |= TS_USEDFPU;
298 }
299 if (use_xsave()) 295 if (use_xsave())
300 err = restore_user_xstate(buf); 296 err = restore_user_xstate(buf);
301 else 297 else
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 05a562b85025..0982507b962a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1891 ss->p = 1; 1891 ss->p = 1;
1892} 1892}
1893 1893
1894static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1895{
1896 struct x86_emulate_ops *ops = ctxt->ops;
1897 u32 eax, ebx, ecx, edx;
1898
1899 /*
1900 * syscall should always be enabled in longmode - so only become
1901 * vendor specific (cpuid) if other modes are active...
1902 */
1903 if (ctxt->mode == X86EMUL_MODE_PROT64)
1904 return true;
1905
1906 eax = 0x00000000;
1907 ecx = 0x00000000;
1908 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1909 /*
1910 * Intel ("GenuineIntel")
1911 * remark: Intel CPUs only support "syscall" in 64bit
1912 * longmode. Also an 64bit guest with a
1913 * 32bit compat-app running will #UD !! While this
1914 * behaviour can be fixed (by emulating) into AMD
1915 * response - CPUs of AMD can't behave like Intel.
1916 */
1917 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1918 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1919 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1920 return false;
1921
1922 /* AMD ("AuthenticAMD") */
1923 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1924 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1925 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1926 return true;
1927
1928 /* AMD ("AMDisbetter!") */
1929 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1930 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1931 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1932 return true;
1933 }
1934
1935 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1936 return false;
1937}
1938
1894static int em_syscall(struct x86_emulate_ctxt *ctxt) 1939static int em_syscall(struct x86_emulate_ctxt *ctxt)
1895{ 1940{
1896 struct x86_emulate_ops *ops = ctxt->ops; 1941 struct x86_emulate_ops *ops = ctxt->ops;
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
1904 ctxt->mode == X86EMUL_MODE_VM86) 1949 ctxt->mode == X86EMUL_MODE_VM86)
1905 return emulate_ud(ctxt); 1950 return emulate_ud(ctxt);
1906 1951
1952 if (!(em_syscall_is_enabled(ctxt)))
1953 return emulate_ud(ctxt);
1954
1907 ops->get_msr(ctxt, MSR_EFER, &efer); 1955 ops->get_msr(ctxt, MSR_EFER, &efer);
1908 setup_syscalls_segments(ctxt, &cs, &ss); 1956 setup_syscalls_segments(ctxt, &cs, &ss);
1909 1957
1958 if (!(efer & EFER_SCE))
1959 return emulate_ud(ctxt);
1960
1910 ops->get_msr(ctxt, MSR_STAR, &msr_data); 1961 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1911 msr_data >>= 32; 1962 msr_data >>= 32;
1912 cs_sel = (u16)(msr_data & 0xfffc); 1963 cs_sel = (u16)(msr_data & 0xfffc);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d29216c462b3..3b4c8d8ad906 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1457#ifdef CONFIG_X86_64 1457#ifdef CONFIG_X86_64
1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1459#endif 1459#endif
1460 if (current_thread_info()->status & TS_USEDFPU) 1460 if (__thread_has_fpu(current))
1461 clts(); 1461 clts();
1462 load_gdt(&__get_cpu_var(host_gdt)); 1462 load_gdt(&__get_cpu_var(host_gdt));
1463} 1463}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 14d6cadc4ba6..9cbfc0698118 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
1495 1495
1496int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1496int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1497{ 1497{
1498 bool pr = false;
1499
1498 switch (msr) { 1500 switch (msr) {
1499 case MSR_EFER: 1501 case MSR_EFER:
1500 return set_efer(vcpu, data); 1502 return set_efer(vcpu, data);
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1635 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " 1637 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1636 "0x%x data 0x%llx\n", msr, data); 1638 "0x%x data 0x%llx\n", msr, data);
1637 break; 1639 break;
1640 case MSR_P6_PERFCTR0:
1641 case MSR_P6_PERFCTR1:
1642 pr = true;
1643 case MSR_P6_EVNTSEL0:
1644 case MSR_P6_EVNTSEL1:
1645 if (kvm_pmu_msr(vcpu, msr))
1646 return kvm_pmu_set_msr(vcpu, msr, data);
1647
1648 if (pr || data != 0)
1649 pr_unimpl(vcpu, "disabled perfctr wrmsr: "
1650 "0x%x data 0x%llx\n", msr, data);
1651 break;
1638 case MSR_K7_CLK_CTL: 1652 case MSR_K7_CLK_CTL:
1639 /* 1653 /*
1640 * Ignore all writes to this no longer documented MSR. 1654 * Ignore all writes to this no longer documented MSR.
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1835 case MSR_FAM10H_MMIO_CONF_BASE: 1849 case MSR_FAM10H_MMIO_CONF_BASE:
1836 data = 0; 1850 data = 0;
1837 break; 1851 break;
1852 case MSR_P6_PERFCTR0:
1853 case MSR_P6_PERFCTR1:
1854 case MSR_P6_EVNTSEL0:
1855 case MSR_P6_EVNTSEL1:
1856 if (kvm_pmu_msr(vcpu, msr))
1857 return kvm_pmu_get_msr(vcpu, msr, pdata);
1858 data = 0;
1859 break;
1838 case MSR_IA32_UCODE_REV: 1860 case MSR_IA32_UCODE_REV:
1839 data = 0x100000000ULL; 1861 data = 0x100000000ULL;
1840 break; 1862 break;
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4180 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); 4202 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4181} 4203}
4182 4204
4205static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4206 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4207{
4208 struct kvm_cpuid_entry2 *cpuid = NULL;
4209
4210 if (eax && ecx)
4211 cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4212 *eax, *ecx);
4213
4214 if (cpuid) {
4215 *eax = cpuid->eax;
4216 *ecx = cpuid->ecx;
4217 if (ebx)
4218 *ebx = cpuid->ebx;
4219 if (edx)
4220 *edx = cpuid->edx;
4221 return true;
4222 }
4223
4224 return false;
4225}
4226
4183static struct x86_emulate_ops emulate_ops = { 4227static struct x86_emulate_ops emulate_ops = {
4184 .read_std = kvm_read_guest_virt_system, 4228 .read_std = kvm_read_guest_virt_system,
4185 .write_std = kvm_write_guest_virt_system, 4229 .write_std = kvm_write_guest_virt_system,
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = {
4211 .get_fpu = emulator_get_fpu, 4255 .get_fpu = emulator_get_fpu,
4212 .put_fpu = emulator_put_fpu, 4256 .put_fpu = emulator_put_fpu,
4213 .intercept = emulator_intercept, 4257 .intercept = emulator_intercept,
4258 .get_cpuid = emulator_get_cpuid,
4214}; 4259};
4215 4260
4216static void cache_all_regs(struct kvm_vcpu *vcpu) 4261static void cache_all_regs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d74824a708d..f0b4caf85c1a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -673,7 +673,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
673 673
674 stackend = end_of_stack(tsk); 674 stackend = end_of_stack(tsk);
675 if (tsk != &init_task && *stackend != STACK_END_MAGIC) 675 if (tsk != &init_task && *stackend != STACK_END_MAGIC)
676 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
677 677
678 tsk->thread.cr2 = address; 678 tsk->thread.cr2 = address;
679 tsk->thread.trap_no = 14; 679 tsk->thread.trap_no = 14;
@@ -684,7 +684,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
684 sig = 0; 684 sig = 0;
685 685
686 /* Executive summary in case the body of the oops scrolled away */ 686 /* Executive summary in case the body of the oops scrolled away */
687 printk(KERN_EMERG "CR2: %016lx\n", address); 687 printk(KERN_DEFAULT "CR2: %016lx\n", address);
688 688
689 oops_end(flags, regs, sig); 689 oops_end(flags, regs, sig);
690} 690}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 492ade8c978e..d99346ea8fdb 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -374,7 +374,7 @@ int __init pci_xen_init(void)
374 374
375int __init pci_xen_hvm_init(void) 375int __init pci_xen_hvm_init(void)
376{ 376{
377 if (!xen_feature(XENFEAT_hvm_pirqs)) 377 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
378 return 0; 378 return 0;
379 379
380#ifdef CONFIG_ACPI 380#ifdef CONFIG_ACPI
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 041d4fe9dfe4..501d4e0244ba 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
409 play_dead_common(); 409 play_dead_common();
410 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 410 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
411 cpu_bringup(); 411 cpu_bringup();
412 /*
413 * Balance out the preempt calls - as we are running in cpu_idle
414 * loop which has been called at bootup from cpu_bringup_and_idle.
415 * The cpucpu_bringup_and_idle called cpu_bringup which made a
416 * preempt_disable() So this preempt_enable will balance it out.
417 */
418 preempt_enable();
412} 419}
413 420
414#else /* !CONFIG_HOTPLUG_CPU */ 421#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 5fb8c27cbef5..405a8c49ff2c 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -118,7 +118,4 @@ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
118/* Don't build bcopy at all ... */ 118/* Don't build bcopy at all ... */
119#define __HAVE_ARCH_BCOPY 119#define __HAVE_ARCH_BCOPY
120 120
121#define __HAVE_ARCH_MEMSCAN
122#define memscan memchr
123
124#endif /* _XTENSA_STRING_H */ 121#endif /* _XTENSA_STRING_H */
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fa8f26309444..75642a352a8f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1660 if (ioc) { 1660 if (ioc) {
1661 ioc_cgroup_changed(ioc); 1661 ioc_cgroup_changed(ioc);
1662 put_io_context(ioc, NULL); 1662 put_io_context(ioc);
1663 } 1663 }
1664 } 1664 }
1665} 1665}
diff --git a/block/blk-core.c b/block/blk-core.c
index e6c05a97ee2b..3a78b00edd71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
642 if (rq->cmd_flags & REQ_ELVPRIV) { 642 if (rq->cmd_flags & REQ_ELVPRIV) {
643 elv_put_request(q, rq); 643 elv_put_request(q, rq);
644 if (rq->elv.icq) 644 if (rq->elv.icq)
645 put_io_context(rq->elv.icq->ioc, q); 645 put_io_context(rq->elv.icq->ioc);
646 } 646 }
647 647
648 mempool_free(rq, q->rq.rq_pool); 648 mempool_free(rq, q->rq.rq_pool);
@@ -872,13 +872,15 @@ retry:
872 spin_unlock_irq(q->queue_lock); 872 spin_unlock_irq(q->queue_lock);
873 873
874 /* create icq if missing */ 874 /* create icq if missing */
875 if (unlikely(et->icq_cache && !icq)) 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
876 icq = ioc_create_icq(q, gfp_mask); 876 icq = ioc_create_icq(q, gfp_mask);
877 if (!icq)
878 goto fail_icq;
879 }
877 880
878 /* rqs are guaranteed to have icq on elv_set_request() if requested */ 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
879 if (likely(!et->icq_cache || icq))
880 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
881 882
883fail_icq:
882 if (unlikely(!rq)) { 884 if (unlikely(!rq)) {
883 /* 885 /*
884 * Allocation failed presumably due to memory. Undo anything 886 * Allocation failed presumably due to memory. Undo anything
@@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1210 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1211 1213
1212 drive_stat_acct(req, 0); 1214 drive_stat_acct(req, 0);
1213 elv_bio_merged(q, req, bio);
1214 return true; 1215 return true;
1215} 1216}
1216 1217
@@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1241 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1242 1243
1243 drive_stat_acct(req, 0); 1244 drive_stat_acct(req, 0);
1244 elv_bio_merged(q, req, bio);
1245 return true; 1245 return true;
1246} 1246}
1247 1247
@@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1255 * on %current's plugged list. Returns %true if merge was successful, 1255 * on %current's plugged list. Returns %true if merge was successful,
1256 * otherwise %false. 1256 * otherwise %false.
1257 * 1257 *
1258 * This function is called without @q->queue_lock; however, elevator is 1258 * Plugging coalesces IOs from the same issuer for the same purpose without
1259 * accessed iff there already are requests on the plugged list which in 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism
1260 * turn guarantees validity of the elevator. 1260 * than scheduling, and the request, while may have elvpriv data, is not
1261 * 1261 * added on the elevator at this point. In addition, we don't have
1262 * Note that, on successful merge, elevator operation 1262 * reliable access to the elevator outside queue lock. Only check basic
1263 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1263 * merging parameters without querying the elevator.
1264 * must be ready for this.
1265 */ 1264 */
1266static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1265static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1267 unsigned int *request_count) 1266 unsigned int *request_count)
@@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1280 1279
1281 (*request_count)++; 1280 (*request_count)++;
1282 1281
1283 if (rq->q != q) 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1284 continue; 1283 continue;
1285 1284
1286 el_ret = elv_try_merge(rq, bio); 1285 el_ret = blk_try_merge(rq, bio);
1287 if (el_ret == ELEVATOR_BACK_MERGE) { 1286 if (el_ret == ELEVATOR_BACK_MERGE) {
1288 ret = bio_attempt_back_merge(q, rq, bio); 1287 ret = bio_attempt_back_merge(q, rq, bio);
1289 if (ret) 1288 if (ret)
@@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1345 el_ret = elv_merge(q, &req, bio); 1344 el_ret = elv_merge(q, &req, bio);
1346 if (el_ret == ELEVATOR_BACK_MERGE) { 1345 if (el_ret == ELEVATOR_BACK_MERGE) {
1347 if (bio_attempt_back_merge(q, req, bio)) { 1346 if (bio_attempt_back_merge(q, req, bio)) {
1347 elv_bio_merged(q, req, bio);
1348 if (!attempt_back_merge(q, req)) 1348 if (!attempt_back_merge(q, req))
1349 elv_merged_request(q, req, el_ret); 1349 elv_merged_request(q, req, el_ret);
1350 goto out_unlock; 1350 goto out_unlock;
1351 } 1351 }
1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1353 if (bio_attempt_front_merge(q, req, bio)) { 1353 if (bio_attempt_front_merge(q, req, bio)) {
1354 elv_bio_merged(q, req, bio);
1354 if (!attempt_front_merge(q, req)) 1355 if (!attempt_front_merge(q, req))
1355 elv_merged_request(q, req, el_ret); 1356 elv_merged_request(q, req, el_ret);
1356 goto out_unlock; 1357 goto out_unlock;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 27a06e00eaec..8b782a63c297 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
29} 29}
30EXPORT_SYMBOL(get_io_context); 30EXPORT_SYMBOL(get_io_context);
31 31
32/*
33 * Releasing ioc may nest into another put_io_context() leading to nested
34 * fast path release. As the ioc's can't be the same, this is okay but
35 * makes lockdep whine. Keep track of nesting and use it as subclass.
36 */
37#ifdef CONFIG_LOCKDEP
38#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41#else
42#define ioc_release_depth(q) 0
43#define ioc_release_depth_inc(q) do { } while (0)
44#define ioc_release_depth_dec(q) do { } while (0)
45#endif
46
47static void icq_free_icq_rcu(struct rcu_head *head) 32static void icq_free_icq_rcu(struct rcu_head *head)
48{ 33{
49 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq)
75 if (rcu_dereference_raw(ioc->icq_hint) == icq) 60 if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 rcu_assign_pointer(ioc->icq_hint, NULL); 61 rcu_assign_pointer(ioc->icq_hint, NULL);
77 62
78 if (et->ops.elevator_exit_icq_fn) { 63 if (et->ops.elevator_exit_icq_fn)
79 ioc_release_depth_inc(q);
80 et->ops.elevator_exit_icq_fn(icq); 64 et->ops.elevator_exit_icq_fn(icq);
81 ioc_release_depth_dec(q);
82 }
83 65
84 /* 66 /*
85 * @icq->q might have gone away by the time RCU callback runs 67 * @icq->q might have gone away by the time RCU callback runs
@@ -98,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
98 struct io_context *ioc = container_of(work, struct io_context, 80 struct io_context *ioc = container_of(work, struct io_context,
99 release_work); 81 release_work);
100 struct request_queue *last_q = NULL; 82 struct request_queue *last_q = NULL;
83 unsigned long flags;
101 84
102 spin_lock_irq(&ioc->lock); 85 /*
86 * Exiting icq may call into put_io_context() through elevator
87 * which will trigger lockdep warning. The ioc's are guaranteed to
88 * be different, use a different locking subclass here. Use
89 * irqsave variant as there's no spin_lock_irq_nested().
90 */
91 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
103 92
104 while (!hlist_empty(&ioc->icq_list)) { 93 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 94 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -121,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
121 */ 110 */
122 if (last_q) { 111 if (last_q) {
123 spin_unlock(last_q->queue_lock); 112 spin_unlock(last_q->queue_lock);
124 spin_unlock_irq(&ioc->lock); 113 spin_unlock_irqrestore(&ioc->lock, flags);
125 blk_put_queue(last_q); 114 blk_put_queue(last_q);
126 } else { 115 } else {
127 spin_unlock_irq(&ioc->lock); 116 spin_unlock_irqrestore(&ioc->lock, flags);
128 } 117 }
129 118
130 last_q = this_q; 119 last_q = this_q;
131 spin_lock_irq(this_q->queue_lock); 120 spin_lock_irqsave(this_q->queue_lock, flags);
132 spin_lock(&ioc->lock); 121 spin_lock_nested(&ioc->lock, 1);
133 continue; 122 continue;
134 } 123 }
135 ioc_exit_icq(icq); 124 ioc_exit_icq(icq);
@@ -137,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
137 126
138 if (last_q) { 127 if (last_q) {
139 spin_unlock(last_q->queue_lock); 128 spin_unlock(last_q->queue_lock);
140 spin_unlock_irq(&ioc->lock); 129 spin_unlock_irqrestore(&ioc->lock, flags);
141 blk_put_queue(last_q); 130 blk_put_queue(last_q);
142 } else { 131 } else {
143 spin_unlock_irq(&ioc->lock); 132 spin_unlock_irqrestore(&ioc->lock, flags);
144 } 133 }
145 134
146 kmem_cache_free(iocontext_cachep, ioc); 135 kmem_cache_free(iocontext_cachep, ioc);
@@ -149,79 +138,29 @@ static void ioc_release_fn(struct work_struct *work)
149/** 138/**
150 * put_io_context - put a reference of io_context 139 * put_io_context - put a reference of io_context
151 * @ioc: io_context to put 140 * @ioc: io_context to put
152 * @locked_q: request_queue the caller is holding queue_lock of (hint)
153 * 141 *
154 * Decrement reference count of @ioc and release it if the count reaches 142 * Decrement reference count of @ioc and release it if the count reaches
155 * zero. If the caller is holding queue_lock of a queue, it can indicate 143 * zero.
156 * that with @locked_q. This is an optimization hint and the caller is
157 * allowed to pass in %NULL even when it's holding a queue_lock.
158 */ 144 */
159void put_io_context(struct io_context *ioc, struct request_queue *locked_q) 145void put_io_context(struct io_context *ioc)
160{ 146{
161 struct request_queue *last_q = locked_q;
162 unsigned long flags; 147 unsigned long flags;
163 148
164 if (ioc == NULL) 149 if (ioc == NULL)
165 return; 150 return;
166 151
167 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 152 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
168 if (locked_q)
169 lockdep_assert_held(locked_q->queue_lock);
170
171 if (!atomic_long_dec_and_test(&ioc->refcount))
172 return;
173 153
174 /* 154 /*
175 * Destroy @ioc. This is a bit messy because icq's are chained 155 * Releasing ioc requires reverse order double locking and we may
176 * from both ioc and queue, and ioc->lock nests inside queue_lock. 156 * already be holding a queue_lock. Do it asynchronously from wq.
177 * The inner ioc->lock should be held to walk our icq_list and then
178 * for each icq the outer matching queue_lock should be grabbed.
179 * ie. We need to do reverse-order double lock dancing.
180 *
181 * Another twist is that we are often called with one of the
182 * matching queue_locks held as indicated by @locked_q, which
183 * prevents performing double-lock dance for other queues.
184 *
185 * So, we do it in two stages. The fast path uses the queue_lock
186 * the caller is holding and, if other queues need to be accessed,
187 * uses trylock to avoid introducing locking dependency. This can
188 * handle most cases, especially if @ioc was performing IO on only
189 * single device.
190 *
191 * If trylock doesn't cut it, we defer to @ioc->release_work which
192 * can do all the double-locking dancing.
193 */ 157 */
194 spin_lock_irqsave_nested(&ioc->lock, flags, 158 if (atomic_long_dec_and_test(&ioc->refcount)) {
195 ioc_release_depth(locked_q)); 159 spin_lock_irqsave(&ioc->lock, flags);
196 160 if (!hlist_empty(&ioc->icq_list))
197 while (!hlist_empty(&ioc->icq_list)) { 161 schedule_work(&ioc->release_work);
198 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 162 spin_unlock_irqrestore(&ioc->lock, flags);
199 struct io_cq, ioc_node);
200 struct request_queue *this_q = icq->q;
201
202 if (this_q != last_q) {
203 if (last_q && last_q != locked_q)
204 spin_unlock(last_q->queue_lock);
205 last_q = NULL;
206
207 if (!spin_trylock(this_q->queue_lock))
208 break;
209 last_q = this_q;
210 continue;
211 }
212 ioc_exit_icq(icq);
213 } 163 }
214
215 if (last_q && last_q != locked_q)
216 spin_unlock(last_q->queue_lock);
217
218 spin_unlock_irqrestore(&ioc->lock, flags);
219
220 /* if no icq is left, we're done; otherwise, kick release_work */
221 if (hlist_empty(&ioc->icq_list))
222 kmem_cache_free(iocontext_cachep, ioc);
223 else
224 schedule_work(&ioc->release_work);
225} 164}
226EXPORT_SYMBOL(put_io_context); 165EXPORT_SYMBOL(put_io_context);
227 166
@@ -236,7 +175,7 @@ void exit_io_context(struct task_struct *task)
236 task_unlock(task); 175 task_unlock(task);
237 176
238 atomic_dec(&ioc->nr_tasks); 177 atomic_dec(&ioc->nr_tasks);
239 put_io_context(ioc, NULL); 178 put_io_context(ioc);
240} 179}
241 180
242/** 181/**
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37cb222b..160035f54882 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
471{ 471{
472 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
473} 473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477 if (!rq_mergeable(rq))
478 return false;
479
480 /* don't merge file system requests and discard requests */
481 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482 return false;
483
484 /* don't merge discard requests and secure discard requests */
485 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486 return false;
487
488 /* different data direction or already started, don't merge */
489 if (bio_data_dir(bio) != rq_data_dir(rq))
490 return false;
491
492 /* must be same device and not a special request */
493 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494 return false;
495
496 /* only merge integrity protected bio into ditto rq */
497 if (bio_integrity(bio) != blk_integrity_rq(rq))
498 return false;
499
500 return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
506 return ELEVATOR_BACK_MERGE;
507 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508 return ELEVATOR_FRONT_MERGE;
509 return ELEVATOR_NO_MERGE;
510}
diff --git a/block/blk.h b/block/blk.h
index 7efd772336de..9c12f80882b0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 struct request *next); 137 struct request *next);
138void blk_recalc_rq_segments(struct request *rq); 138void blk_recalc_rq_segments(struct request *rq);
139void blk_rq_set_mixed_merge(struct request *rq); 139void blk_rq_set_mixed_merge(struct request *rq);
140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141int blk_try_merge(struct request *rq, struct bio *bio);
140 142
141void blk_queue_congestion_threshold(struct request_queue *q); 143void blk_queue_congestion_threshold(struct request_queue *q);
142 144
diff --git a/block/bsg.c b/block/bsg.c
index 4cf703fd98bb..ff64ae3bacee 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -983,7 +983,8 @@ void bsg_unregister_queue(struct request_queue *q)
983 983
984 mutex_lock(&bsg_mutex); 984 mutex_lock(&bsg_mutex);
985 idr_remove(&bsg_minor_idr, bcd->minor); 985 idr_remove(&bsg_minor_idr, bcd->minor);
986 sysfs_remove_link(&q->kobj, "bsg"); 986 if (q->kobj.sd)
987 sysfs_remove_link(&q->kobj, "bsg");
987 device_unregister(bcd->class_dev); 988 device_unregister(bcd->class_dev);
988 bcd->class_dev = NULL; 989 bcd->class_dev = NULL;
989 kref_put(&bcd->ref, bsg_kref_release_function); 990 kref_put(&bcd->ref, bsg_kref_release_function);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee55019066a1..d0ba50533668 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1699,18 +1699,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1699 1699
1700 /* 1700 /*
1701 * Lookup the cfqq that this bio will be queued with and allow 1701 * Lookup the cfqq that this bio will be queued with and allow
1702 * merge only if rq is queued there. This function can be called 1702 * merge only if rq is queued there.
1703 * from plug merge without queue_lock. In such cases, ioc of @rq
1704 * and %current are guaranteed to be equal. Avoid lookup which
1705 * requires queue_lock by using @rq's cic.
1706 */ 1703 */
1707 if (current->io_context == RQ_CIC(rq)->icq.ioc) { 1704 cic = cfq_cic_lookup(cfqd, current->io_context);
1708 cic = RQ_CIC(rq); 1705 if (!cic)
1709 } else { 1706 return false;
1710 cic = cfq_cic_lookup(cfqd, current->io_context);
1711 if (!cic)
1712 return false;
1713 }
1714 1707
1715 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1708 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1716 return cfqq == RQ_CFQQ(rq); 1709 return cfqq == RQ_CFQQ(rq);
@@ -1794,7 +1787,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1794 cfqd->active_queue = NULL; 1787 cfqd->active_queue = NULL;
1795 1788
1796 if (cfqd->active_cic) { 1789 if (cfqd->active_cic) {
1797 put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); 1790 put_io_context(cfqd->active_cic->icq.ioc);
1798 cfqd->active_cic = NULL; 1791 cfqd->active_cic = NULL;
1799 } 1792 }
1800} 1793}
@@ -3117,17 +3110,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3117 */ 3110 */
3118static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3111static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3119{ 3112{
3113 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3114
3120 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3115 cfq_log_cfqq(cfqd, cfqq, "preempt");
3116 cfq_slice_expired(cfqd, 1);
3121 3117
3122 /* 3118 /*
3123 * workload type is changed, don't save slice, otherwise preempt 3119 * workload type is changed, don't save slice, otherwise preempt
3124 * doesn't happen 3120 * doesn't happen
3125 */ 3121 */
3126 if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq)) 3122 if (old_type != cfqq_type(cfqq))
3127 cfqq->cfqg->saved_workload_slice = 0; 3123 cfqq->cfqg->saved_workload_slice = 0;
3128 3124
3129 cfq_slice_expired(cfqd, 1);
3130
3131 /* 3125 /*
3132 * Put the new queue at the front of the of the current list, 3126 * Put the new queue at the front of the of the current list,
3133 * so we know that it will be selected next. 3127 * so we know that it will be selected next.
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9be..f016855a46b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
70/* 70/*
71 * can we safely merge with this request? 71 * can we safely merge with this request?
72 */ 72 */
73int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{ 74{
75 if (!rq_mergeable(rq)) 75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
82 return 0;
83
84 /*
85 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
91 * different data direction or already started, don't merge
92 */
93 if (bio_data_dir(bio) != rq_data_dir(rq))
94 return 0;
95
96 /*
97 * must be same device and not a special request
98 */
99 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100 return 0;
101
102 /*
103 * only merge integrity protected bio into ditto rq
104 */
105 if (bio_integrity(bio) != blk_integrity_rq(rq))
106 return 0; 76 return 0;
107 77
108 if (!elv_iosched_allow_merge(rq, bio)) 78 if (!elv_iosched_allow_merge(rq, bio))
@@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
112} 82}
113EXPORT_SYMBOL(elv_rq_merge_ok); 83EXPORT_SYMBOL(elv_rq_merge_ok);
114 84
115int elv_try_merge(struct request *__rq, struct bio *bio)
116{
117 int ret = ELEVATOR_NO_MERGE;
118
119 /*
120 * we can merge and sequence is ok, check if it's possible
121 */
122 if (elv_rq_merge_ok(__rq, bio)) {
123 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE;
125 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE;
127 }
128
129 return ret;
130}
131
132static struct elevator_type *elevator_find(const char *name) 85static struct elevator_type *elevator_find(const char *name)
133{ 86{
134 struct elevator_type *e; 87 struct elevator_type *e;
@@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
478 /* 431 /*
479 * First try one-hit cache. 432 * First try one-hit cache.
480 */ 433 */
481 if (q->last_merge) { 434 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
482 ret = elv_try_merge(q->last_merge, bio); 435 ret = blk_try_merge(q->last_merge, bio);
483 if (ret != ELEVATOR_NO_MERGE) { 436 if (ret != ELEVATOR_NO_MERGE) {
484 *req = q->last_merge; 437 *req = q->last_merge;
485 return ret; 438 return ret;
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 88f160b77b1f..107f6f7be5e1 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -31,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z)
31 return (x & y) | (z & (x | y)); 31 return (x & y) | (z & (x | y));
32} 32}
33 33
34static inline u64 RORu64(u64 x, u64 y)
35{
36 return (x >> y) | (x << (64 - y));
37}
38
39static const u64 sha512_K[80] = { 34static const u64 sha512_K[80] = {
40 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 35 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
41 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 36 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
@@ -66,10 +61,10 @@ static const u64 sha512_K[80] = {
66 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, 61 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
67}; 62};
68 63
69#define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39)) 64#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39))
70#define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41)) 65#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41))
71#define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) 66#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
72#define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) 67#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6))
73 68
74static inline void LOAD_OP(int I, u64 *W, const u8 *input) 69static inline void LOAD_OP(int I, u64 *W, const u8 *input)
75{ 70{
@@ -78,7 +73,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
78 73
79static inline void BLEND_OP(int I, u64 *W) 74static inline void BLEND_OP(int I, u64 *W)
80{ 75{
81 W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]); 76 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
82} 77}
83 78
84static void 79static void
@@ -89,46 +84,42 @@ sha512_transform(u64 *state, const u8 *input)
89 int i; 84 int i;
90 u64 W[16]; 85 u64 W[16];
91 86
92 /* load the input */
93 for (i = 0; i < 16; i++)
94 LOAD_OP(i, W, input);
95
96 /* load the state into our registers */ 87 /* load the state into our registers */
97 a=state[0]; b=state[1]; c=state[2]; d=state[3]; 88 a=state[0]; b=state[1]; c=state[2]; d=state[3];
98 e=state[4]; f=state[5]; g=state[6]; h=state[7]; 89 e=state[4]; f=state[5]; g=state[6]; h=state[7];
99 90
100#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \ 91 /* now iterate */
101 t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \ 92 for (i=0; i<80; i+=8) {
102 t2 = e0(a) + Maj(a, b, c); \ 93 if (!(i & 8)) {
103 d += t1; \ 94 int j;
104 h = t1 + t2 95
105 96 if (i < 16) {
106#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \ 97 /* load the input */
107 BLEND_OP(i, W); \ 98 for (j = 0; j < 16; j++)
108 t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \ 99 LOAD_OP(i + j, W, input);
109 t2 = e0(a) + Maj(a, b, c); \ 100 } else {
110 d += t1; \ 101 for (j = 0; j < 16; j++) {
111 h = t1 + t2 102 BLEND_OP(i + j, W);
112 103 }
113 for (i = 0; i < 16; i += 8) { 104 }
114 SHA512_0_15(i, a, b, c, d, e, f, g, h); 105 }
115 SHA512_0_15(i + 1, h, a, b, c, d, e, f, g); 106
116 SHA512_0_15(i + 2, g, h, a, b, c, d, e, f); 107 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)];
117 SHA512_0_15(i + 3, f, g, h, a, b, c, d, e); 108 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
118 SHA512_0_15(i + 4, e, f, g, h, a, b, c, d); 109 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
119 SHA512_0_15(i + 5, d, e, f, g, h, a, b, c); 110 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
120 SHA512_0_15(i + 6, c, d, e, f, g, h, a, b); 111 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
121 SHA512_0_15(i + 7, b, c, d, e, f, g, h, a); 112 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
122 } 113 t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
123 for (i = 16; i < 80; i += 8) { 114 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
124 SHA512_16_79(i, a, b, c, d, e, f, g, h); 115 t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
125 SHA512_16_79(i + 1, h, a, b, c, d, e, f, g); 116 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
126 SHA512_16_79(i + 2, g, h, a, b, c, d, e, f); 117 t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
127 SHA512_16_79(i + 3, f, g, h, a, b, c, d, e); 118 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
128 SHA512_16_79(i + 4, e, f, g, h, a, b, c, d); 119 t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
129 SHA512_16_79(i + 5, d, e, f, g, h, a, b, c); 120 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
130 SHA512_16_79(i + 6, c, d, e, f, g, h, a, b); 121 t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
131 SHA512_16_79(i + 7, b, c, d, e, f, g, h, a); 122 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
132 } 123 }
133 124
134 state[0] += a; state[1] += b; state[2] += c; state[3] += d; 125 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 2b805d7ef317..8ae05ce18500 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -586,13 +586,6 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
586 if (pr->flags.need_hotplug_init) 586 if (pr->flags.need_hotplug_init)
587 return 0; 587 return 0;
588 588
589 /*
590 * Do not start hotplugged CPUs now, but when they
591 * are onlined the first time
592 */
593 if (pr->flags.need_hotplug_init)
594 return 0;
595
596 result = acpi_processor_start(pr); 589 result = acpi_processor_start(pr);
597 if (result) 590 if (result)
598 goto err_remove_sysfs; 591 goto err_remove_sysfs;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index a7d91a72ee35..53d3770a0b1b 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -207,11 +207,11 @@ static void set_smc_timing(struct device *dev, struct ata_device *adev,
207{ 207{
208 int ret = 0; 208 int ret = 0;
209 int use_iordy; 209 int use_iordy;
210 struct sam9_smc_config smc;
210 unsigned int t6z; /* data tristate time in ns */ 211 unsigned int t6z; /* data tristate time in ns */
211 unsigned int cycle; /* SMC Cycle width in MCK ticks */ 212 unsigned int cycle; /* SMC Cycle width in MCK ticks */
212 unsigned int setup; /* SMC Setup width in MCK ticks */ 213 unsigned int setup; /* SMC Setup width in MCK ticks */
213 unsigned int pulse; /* CFIOR and CFIOW pulse width in MCK ticks */ 214 unsigned int pulse; /* CFIOR and CFIOW pulse width in MCK ticks */
214 unsigned int cs_setup = 0;/* CS4 or CS5 setup width in MCK ticks */
215 unsigned int cs_pulse; /* CS4 or CS5 pulse width in MCK ticks*/ 215 unsigned int cs_pulse; /* CS4 or CS5 pulse width in MCK ticks*/
216 unsigned int tdf_cycles; /* SMC TDF MCK ticks */ 216 unsigned int tdf_cycles; /* SMC TDF MCK ticks */
217 unsigned long mck_hz; /* MCK frequency in Hz */ 217 unsigned long mck_hz; /* MCK frequency in Hz */
@@ -244,26 +244,20 @@ static void set_smc_timing(struct device *dev, struct ata_device *adev,
244 } 244 }
245 245
246 dev_dbg(dev, "Use IORDY=%u, TDF Cycles=%u\n", use_iordy, tdf_cycles); 246 dev_dbg(dev, "Use IORDY=%u, TDF Cycles=%u\n", use_iordy, tdf_cycles);
247 info->mode |= AT91_SMC_TDF_(tdf_cycles); 247
248 248 /* SMC Setup Register */
249 /* write SMC Setup Register */ 249 smc.nwe_setup = smc.nrd_setup = setup;
250 at91_sys_write(AT91_SMC_SETUP(info->cs), 250 smc.ncs_write_setup = smc.ncs_read_setup = 0;
251 AT91_SMC_NWESETUP_(setup) | 251 /* SMC Pulse Register */
252 AT91_SMC_NRDSETUP_(setup) | 252 smc.nwe_pulse = smc.nrd_pulse = pulse;
253 AT91_SMC_NCS_WRSETUP_(cs_setup) | 253 smc.ncs_write_pulse = smc.ncs_read_pulse = cs_pulse;
254 AT91_SMC_NCS_RDSETUP_(cs_setup)); 254 /* SMC Cycle Register */
255 /* write SMC Pulse Register */ 255 smc.write_cycle = smc.read_cycle = cycle;
256 at91_sys_write(AT91_SMC_PULSE(info->cs), 256 /* SMC Mode Register*/
257 AT91_SMC_NWEPULSE_(pulse) | 257 smc.tdf_cycles = tdf_cycles;
258 AT91_SMC_NRDPULSE_(pulse) | 258 smc.mode = info->mode;
259 AT91_SMC_NCS_WRPULSE_(cs_pulse) | 259
260 AT91_SMC_NCS_RDPULSE_(cs_pulse)); 260 sam9_smc_configure(0, info->cs, &smc);
261 /* write SMC Cycle Register */
262 at91_sys_write(AT91_SMC_CYCLE(info->cs),
263 AT91_SMC_NWECYCLE_(cycle) |
264 AT91_SMC_NRDCYCLE_(cycle));
265 /* write SMC Mode Register*/
266 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
267} 261}
268 262
269static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev) 263static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -288,20 +282,20 @@ static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
288 struct at91_ide_info *info = dev->link->ap->host->private_data; 282 struct at91_ide_info *info = dev->link->ap->host->private_data;
289 unsigned int consumed; 283 unsigned int consumed;
290 unsigned long flags; 284 unsigned long flags;
291 unsigned int mode; 285 struct sam9_smc_config smc;
292 286
293 local_irq_save(flags); 287 local_irq_save(flags);
294 mode = at91_sys_read(AT91_SMC_MODE(info->cs)); 288 sam9_smc_read_mode(0, info->cs, &smc);
295 289
296 /* set 16bit mode before writing data */ 290 /* set 16bit mode before writing data */
297 at91_sys_write(AT91_SMC_MODE(info->cs), 291 smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16;
298 (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16); 292 sam9_smc_write_mode(0, info->cs, &smc);
299 293
300 consumed = ata_sff_data_xfer(dev, buf, buflen, rw); 294 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
301 295
302 /* restore 8bit mode after data is written */ 296 /* restore 8bit mode after data is written */
303 at91_sys_write(AT91_SMC_MODE(info->cs), 297 smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8;
304 (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8); 298 sam9_smc_write_mode(0, info->cs, &smc);
305 299
306 local_irq_restore(flags); 300 local_irq_restore(flags);
307 return consumed; 301 return consumed;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index db87e78d7459..4dabf5077c48 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -208,6 +208,25 @@ static ssize_t print_cpus_offline(struct device *dev,
208} 208}
209static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); 209static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
210 210
211static void cpu_device_release(struct device *dev)
212{
213 /*
214 * This is an empty function to prevent the driver core from spitting a
215 * warning at us. Yes, I know this is directly opposite of what the
216 * documentation for the driver core and kobjects say, and the author
217 * of this code has already been publically ridiculed for doing
218 * something as foolish as this. However, at this point in time, it is
219 * the only way to handle the issue of statically allocated cpu
220 * devices. The different architectures will have their cpu device
221 * code reworked to properly handle this in the near future, so this
222 * function will then be changed to correctly free up the memory held
223 * by the cpu device.
224 *
225 * Never copy this way of doing things, or you too will be made fun of
226 * on the linux-kerenl list, you have been warned.
227 */
228}
229
211/* 230/*
212 * register_cpu - Setup a sysfs device for a CPU. 231 * register_cpu - Setup a sysfs device for a CPU.
213 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in 232 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -221,8 +240,10 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
221 int error; 240 int error;
222 241
223 cpu->node_id = cpu_to_node(num); 242 cpu->node_id = cpu_to_node(num);
243 memset(&cpu->dev, 0x00, sizeof(struct device));
224 cpu->dev.id = num; 244 cpu->dev.id = num;
225 cpu->dev.bus = &cpu_subsys; 245 cpu->dev.bus = &cpu_subsys;
246 cpu->dev.release = cpu_device_release;
226 error = device_register(&cpu->dev); 247 error = device_register(&cpu->dev);
227 if (!error && cpu->hotpluggable) 248 if (!error && cpu->hotpluggable)
228 register_cpu_control(cpu); 249 register_cpu_control(cpu);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index ed5de58c340f..9e60dbe9fd94 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -572,19 +572,36 @@ static int init_memory_block(struct memory_block **memory,
572} 572}
573 573
574static int add_memory_section(int nid, struct mem_section *section, 574static int add_memory_section(int nid, struct mem_section *section,
575 struct memory_block **mem_p,
575 unsigned long state, enum mem_add_context context) 576 unsigned long state, enum mem_add_context context)
576{ 577{
577 struct memory_block *mem; 578 struct memory_block *mem = NULL;
579 int scn_nr = __section_nr(section);
578 int ret = 0; 580 int ret = 0;
579 581
580 mutex_lock(&mem_sysfs_mutex); 582 mutex_lock(&mem_sysfs_mutex);
581 583
582 mem = find_memory_block(section); 584 if (context == BOOT) {
585 /* same memory block ? */
586 if (mem_p && *mem_p)
587 if (scn_nr >= (*mem_p)->start_section_nr &&
588 scn_nr <= (*mem_p)->end_section_nr) {
589 mem = *mem_p;
590 kobject_get(&mem->dev.kobj);
591 }
592 } else
593 mem = find_memory_block(section);
594
583 if (mem) { 595 if (mem) {
584 mem->section_count++; 596 mem->section_count++;
585 kobject_put(&mem->dev.kobj); 597 kobject_put(&mem->dev.kobj);
586 } else 598 } else {
587 ret = init_memory_block(&mem, section, state); 599 ret = init_memory_block(&mem, section, state);
600 /* store memory_block pointer for next loop */
601 if (!ret && context == BOOT)
602 if (mem_p)
603 *mem_p = mem;
604 }
588 605
589 if (!ret) { 606 if (!ret) {
590 if (context == HOTPLUG && 607 if (context == HOTPLUG &&
@@ -627,7 +644,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
627 */ 644 */
628int register_new_memory(int nid, struct mem_section *section) 645int register_new_memory(int nid, struct mem_section *section)
629{ 646{
630 return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG); 647 return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
631} 648}
632 649
633int unregister_memory_section(struct mem_section *section) 650int unregister_memory_section(struct mem_section *section)
@@ -647,6 +664,7 @@ int __init memory_dev_init(void)
647 int ret; 664 int ret;
648 int err; 665 int err;
649 unsigned long block_sz; 666 unsigned long block_sz;
667 struct memory_block *mem = NULL;
650 668
651 ret = subsys_system_register(&memory_subsys, NULL); 669 ret = subsys_system_register(&memory_subsys, NULL);
652 if (ret) 670 if (ret)
@@ -662,7 +680,10 @@ int __init memory_dev_init(void)
662 for (i = 0; i < NR_MEM_SECTIONS; i++) { 680 for (i = 0; i < NR_MEM_SECTIONS; i++) {
663 if (!present_section_nr(i)) 681 if (!present_section_nr(i))
664 continue; 682 continue;
665 err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE, 683 /* don't need to reuse memory_block if only one per block */
684 err = add_memory_section(0, __nr_to_section(i),
685 (sections_per_block == 1) ? NULL : &mem,
686 MEM_ONLINE,
666 BOOT); 687 BOOT);
667 if (!ret) 688 if (!ret)
668 ret = err; 689 ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 44f427a66117..90aa2a11a933 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -456,7 +456,15 @@ static int link_mem_sections(int nid)
456 if (!present_section_nr(section_nr)) 456 if (!present_section_nr(section_nr))
457 continue; 457 continue;
458 mem_sect = __nr_to_section(section_nr); 458 mem_sect = __nr_to_section(section_nr);
459
460 /* same memblock ? */
461 if (mem_blk)
462 if ((section_nr >= mem_blk->start_section_nr) &&
463 (section_nr <= mem_blk->end_section_nr))
464 continue;
465
459 mem_blk = find_memory_block_hinted(mem_sect, mem_blk); 466 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
467
460 ret = register_mem_sect_under_node(mem_blk, nid); 468 ret = register_mem_sect_under_node(mem_blk, nid);
461 if (!err) 469 if (!err)
462 err = ret; 470 err = ret;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 1ead66186b7c..d1daa5e9fadf 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -53,7 +53,7 @@ static int regcache_hw_init(struct regmap *map)
53 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { 53 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
54 val = regcache_get_val(map->reg_defaults_raw, 54 val = regcache_get_val(map->reg_defaults_raw,
55 i, map->cache_word_size); 55 i, map->cache_word_size);
56 if (!val) 56 if (regmap_volatile(map, i))
57 continue; 57 continue;
58 count++; 58 count++;
59 } 59 }
@@ -70,7 +70,7 @@ static int regcache_hw_init(struct regmap *map)
70 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 70 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
71 val = regcache_get_val(map->reg_defaults_raw, 71 val = regcache_get_val(map->reg_defaults_raw,
72 i, map->cache_word_size); 72 i, map->cache_word_size);
73 if (!val) 73 if (regmap_volatile(map, i))
74 continue; 74 continue;
75 map->reg_defaults[j].reg = i; 75 map->reg_defaults[j].reg = i;
76 map->reg_defaults[j].def = val; 76 map->reg_defaults[j].def = val;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index febbc0a1222a..ec31f7dd5549 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -169,10 +169,8 @@ int bcma_bus_register(struct bcma_bus *bus)
169 err = bcma_sprom_get(bus); 169 err = bcma_sprom_get(bus);
170 if (err == -ENOENT) { 170 if (err == -ENOENT) {
171 pr_err("No SPROM available\n"); 171 pr_err("No SPROM available\n");
172 } else if (err) { 172 } else if (err)
173 pr_err("Failed to get SPROM: %d\n", err); 173 pr_err("Failed to get SPROM: %d\n", err);
174 return -ENOENT;
175 }
176 174
177 /* Register found cores */ 175 /* Register found cores */
178 bcma_register_cores(bus); 176 bcma_register_cores(bus);
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cad994857683..3a2f672db9ad 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -399,15 +399,18 @@ int bcma_bus_scan(struct bcma_bus *bus)
399 core->bus = bus; 399 core->bus = bus;
400 400
401 err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core); 401 err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
402 if (err == -ENODEV) { 402 if (err < 0) {
403 core_num++; 403 kfree(core);
404 continue; 404 if (err == -ENODEV) {
405 } else if (err == -ENXIO) 405 core_num++;
406 continue; 406 continue;
407 else if (err == -ESPIPE) 407 } else if (err == -ENXIO) {
408 break; 408 continue;
409 else if (err < 0) 409 } else if (err == -ESPIPE) {
410 break;
411 }
410 return err; 412 return err;
413 }
411 414
412 core->core_index = core_num++; 415 core->core_index = core_num++;
413 bus->nr_cores++; 416 bus->nr_cores++;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 510fb10ec45a..9baf11e86362 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4368,8 +4368,14 @@ out_unreg_blkdev:
4368out_put_disk: 4368out_put_disk:
4369 while (dr--) { 4369 while (dr--) {
4370 del_timer_sync(&motor_off_timer[dr]); 4370 del_timer_sync(&motor_off_timer[dr]);
4371 if (disks[dr]->queue) 4371 if (disks[dr]->queue) {
4372 blk_cleanup_queue(disks[dr]->queue); 4372 blk_cleanup_queue(disks[dr]->queue);
4373 /*
4374 * put_disk() is not paired with add_disk() and
4375 * will put queue reference one extra time. fix it.
4376 */
4377 disks[dr]->queue = NULL;
4378 }
4373 put_disk(disks[dr]); 4379 put_disk(disks[dr]);
4374 } 4380 }
4375 return err; 4381 return err;
@@ -4579,6 +4585,15 @@ static void __exit floppy_module_exit(void)
4579 platform_device_unregister(&floppy_device[drive]); 4585 platform_device_unregister(&floppy_device[drive]);
4580 } 4586 }
4581 blk_cleanup_queue(disks[drive]->queue); 4587 blk_cleanup_queue(disks[drive]->queue);
4588
4589 /*
4590 * These disks have not called add_disk(). Don't put down
4591 * queue reference in put_disk().
4592 */
4593 if (!(allowed_drive_mask & (1 << drive)) ||
4594 fdc_state[FDC(drive)].version == FDC_NONE)
4595 disks[drive]->queue = NULL;
4596
4582 put_disk(disks[drive]); 4597 put_disk(disks[drive]);
4583 } 4598 }
4584 4599
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f00257782fcc..cd504353b278 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -356,14 +356,14 @@ lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
356 return __splice_from_pipe(pipe, sd, lo_splice_actor); 356 return __splice_from_pipe(pipe, sd, lo_splice_actor);
357} 357}
358 358
359static int 359static ssize_t
360do_lo_receive(struct loop_device *lo, 360do_lo_receive(struct loop_device *lo,
361 struct bio_vec *bvec, int bsize, loff_t pos) 361 struct bio_vec *bvec, int bsize, loff_t pos)
362{ 362{
363 struct lo_read_data cookie; 363 struct lo_read_data cookie;
364 struct splice_desc sd; 364 struct splice_desc sd;
365 struct file *file; 365 struct file *file;
366 long retval; 366 ssize_t retval;
367 367
368 cookie.lo = lo; 368 cookie.lo = lo;
369 cookie.page = bvec->bv_page; 369 cookie.page = bvec->bv_page;
@@ -379,26 +379,28 @@ do_lo_receive(struct loop_device *lo,
379 file = lo->lo_backing_file; 379 file = lo->lo_backing_file;
380 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); 380 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
381 381
382 if (retval < 0) 382 return retval;
383 return retval;
384 if (retval != bvec->bv_len)
385 return -EIO;
386 return 0;
387} 383}
388 384
389static int 385static int
390lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 386lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
391{ 387{
392 struct bio_vec *bvec; 388 struct bio_vec *bvec;
393 int i, ret = 0; 389 ssize_t s;
390 int i;
394 391
395 bio_for_each_segment(bvec, bio, i) { 392 bio_for_each_segment(bvec, bio, i) {
396 ret = do_lo_receive(lo, bvec, bsize, pos); 393 s = do_lo_receive(lo, bvec, bsize, pos);
397 if (ret < 0) 394 if (s < 0)
395 return s;
396
397 if (s != bvec->bv_len) {
398 zero_fill_bio(bio);
398 break; 399 break;
400 }
399 pos += bvec->bv_len; 401 pos += bvec->bv_len;
400 } 402 }
401 return ret; 403 return 0;
402} 404}
403 405
404static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) 406static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index b74eab70c3d0..8eb81c96608f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2068,8 +2068,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2068 * when the read completes. 2068 * when the read completes.
2069 * @data Callback data passed to the callback function 2069 * @data Callback data passed to the callback function
2070 * when the read completes. 2070 * when the read completes.
2071 * @barrier If non-zero, this command must be completed before
2072 * issuing any other commands.
2073 * @dir Direction (read or write) 2071 * @dir Direction (read or write)
2074 * 2072 *
2075 * return value 2073 * return value
@@ -2077,7 +2075,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2077 */ 2075 */
2078static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, 2076static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2079 int nsect, int nents, int tag, void *callback, 2077 int nsect, int nents, int tag, void *callback,
2080 void *data, int barrier, int dir) 2078 void *data, int dir)
2081{ 2079{
2082 struct host_to_dev_fis *fis; 2080 struct host_to_dev_fis *fis;
2083 struct mtip_port *port = dd->port; 2081 struct mtip_port *port = dd->port;
@@ -2108,8 +2106,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2108 *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); 2106 *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
2109 *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); 2107 *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
2110 fis->device = 1 << 6; 2108 fis->device = 1 << 6;
2111 if (barrier)
2112 fis->device |= FUA_BIT;
2113 fis->features = nsect & 0xFF; 2109 fis->features = nsect & 0xFF;
2114 fis->features_ex = (nsect >> 8) & 0xFF; 2110 fis->features_ex = (nsect >> 8) & 0xFF;
2115 fis->sect_count = ((tag << 3) | (tag >> 5)); 2111 fis->sect_count = ((tag << 3) | (tag >> 5));
@@ -3087,7 +3083,6 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3087 tag, 3083 tag,
3088 bio_endio, 3084 bio_endio,
3089 bio, 3085 bio,
3090 bio->bi_rw & REQ_FUA,
3091 bio_data_dir(bio)); 3086 bio_data_dir(bio));
3092 } else 3087 } else
3093 bio_io_error(bio); 3088 bio_io_error(bio);
@@ -3187,6 +3182,10 @@ skip_create_disk:
3187 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3182 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3188 blk_queue_physical_block_size(dd->queue, 4096); 3183 blk_queue_physical_block_size(dd->queue, 4096);
3189 blk_queue_io_min(dd->queue, 4096); 3184 blk_queue_io_min(dd->queue, 4096);
3185 /*
3186 * write back cache is not supported in the device. FUA depends on
3187 * write back cache support, hence setting flush support to zero.
3188 */
3190 blk_queue_flush(dd->queue, 0); 3189 blk_queue_flush(dd->queue, 0);
3191 3190
3192 /* Set the capacity of the device in 512 byte sectors. */ 3191 /* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 723d7c4946dc..e0554a8f2233 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -104,9 +104,6 @@
104/* BAR number used to access the HBA registers. */ 104/* BAR number used to access the HBA registers. */
105#define MTIP_ABAR 5 105#define MTIP_ABAR 5
106 106
107/* Forced Unit Access Bit */
108#define FUA_BIT 0x80
109
110#ifdef DEBUG 107#ifdef DEBUG
111 #define dbg_printk(format, arg...) \ 108 #define dbg_printk(format, arg...) \
112 printk(pr_fmt(format), ##arg); 109 printk(pr_fmt(format), ##arg);
@@ -415,8 +412,6 @@ struct driver_data {
415 412
416 atomic_t resumeflag; /* Atomic variable to track suspend/resume */ 413 atomic_t resumeflag; /* Atomic variable to track suspend/resume */
417 414
418 atomic_t eh_active; /* Flag for error handling tracking */
419
420 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ 415 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
421}; 416};
422 417
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3fd31dec8c9c..a6278e7e61a0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -380,6 +380,7 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
380 rbdc = __rbd_client_find(opt); 380 rbdc = __rbd_client_find(opt);
381 if (rbdc) { 381 if (rbdc) {
382 ceph_destroy_options(opt); 382 ceph_destroy_options(opt);
383 kfree(rbd_opts);
383 384
384 /* using an existing client */ 385 /* using an existing client */
385 kref_get(&rbdc->kref); 386 kref_get(&rbdc->kref);
@@ -406,15 +407,15 @@ done_err:
406 407
407/* 408/*
408 * Destroy ceph client 409 * Destroy ceph client
410 *
411 * Caller must hold node_lock.
409 */ 412 */
410static void rbd_client_release(struct kref *kref) 413static void rbd_client_release(struct kref *kref)
411{ 414{
412 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 415 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
413 416
414 dout("rbd_release_client %p\n", rbdc); 417 dout("rbd_release_client %p\n", rbdc);
415 spin_lock(&node_lock);
416 list_del(&rbdc->node); 418 list_del(&rbdc->node);
417 spin_unlock(&node_lock);
418 419
419 ceph_destroy_client(rbdc->client); 420 ceph_destroy_client(rbdc->client);
420 kfree(rbdc->rbd_opts); 421 kfree(rbdc->rbd_opts);
@@ -427,7 +428,9 @@ static void rbd_client_release(struct kref *kref)
427 */ 428 */
428static void rbd_put_client(struct rbd_device *rbd_dev) 429static void rbd_put_client(struct rbd_device *rbd_dev)
429{ 430{
431 spin_lock(&node_lock);
430 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); 432 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
433 spin_unlock(&node_lock);
431 rbd_dev->rbd_client = NULL; 434 rbd_dev->rbd_client = NULL;
432 rbd_dev->client = NULL; 435 rbd_dev->client = NULL;
433} 436}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 55eaf474d32c..d620b4495745 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -286,8 +286,6 @@
286 286
287/* used to tell the module to turn on full debugging messages */ 287/* used to tell the module to turn on full debugging messages */
288static bool debug; 288static bool debug;
289/* used to keep tray locked at all times */
290static int keeplocked;
291/* default compatibility mode */ 289/* default compatibility mode */
292static bool autoclose=1; 290static bool autoclose=1;
293static bool autoeject; 291static bool autoeject;
@@ -1204,7 +1202,7 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
1204 cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); 1202 cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
1205 cdrom_dvd_rw_close_write(cdi); 1203 cdrom_dvd_rw_close_write(cdi);
1206 1204
1207 if ((cdo->capability & CDC_LOCK) && !keeplocked) { 1205 if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
1208 cdinfo(CD_CLOSE, "Unlocking door!\n"); 1206 cdinfo(CD_CLOSE, "Unlocking door!\n");
1209 cdo->lock_door(cdi, 0); 1207 cdo->lock_door(cdi, 0);
1210 } 1208 }
@@ -1371,7 +1369,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
1371 curslot = info->hdr.curslot; 1369 curslot = info->hdr.curslot;
1372 kfree(info); 1370 kfree(info);
1373 1371
1374 if (cdi->use_count > 1 || keeplocked) { 1372 if (cdi->use_count > 1 || cdi->keeplocked) {
1375 if (slot == CDSL_CURRENT) { 1373 if (slot == CDSL_CURRENT) {
1376 return curslot; 1374 return curslot;
1377 } else { 1375 } else {
@@ -2119,11 +2117,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2119 if (!nr) 2117 if (!nr)
2120 return -ENOMEM; 2118 return -ENOMEM;
2121 2119
2122 if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
2123 ret = -EFAULT;
2124 goto out;
2125 }
2126
2127 cgc.data_direction = CGC_DATA_READ; 2120 cgc.data_direction = CGC_DATA_READ;
2128 while (nframes > 0) { 2121 while (nframes > 0) {
2129 if (nr > nframes) 2122 if (nr > nframes)
@@ -2132,7 +2125,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2132 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); 2125 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
2133 if (ret) 2126 if (ret)
2134 break; 2127 break;
2135 if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { 2128 if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
2136 ret = -EFAULT; 2129 ret = -EFAULT;
2137 break; 2130 break;
2138 } 2131 }
@@ -2140,7 +2133,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2140 nframes -= nr; 2133 nframes -= nr;
2141 lba += nr; 2134 lba += nr;
2142 } 2135 }
2143out:
2144 kfree(cgc.buffer); 2136 kfree(cgc.buffer);
2145 return ret; 2137 return ret;
2146} 2138}
@@ -2295,7 +2287,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
2295 2287
2296 if (!CDROM_CAN(CDC_OPEN_TRAY)) 2288 if (!CDROM_CAN(CDC_OPEN_TRAY))
2297 return -ENOSYS; 2289 return -ENOSYS;
2298 if (cdi->use_count != 1 || keeplocked) 2290 if (cdi->use_count != 1 || cdi->keeplocked)
2299 return -EBUSY; 2291 return -EBUSY;
2300 if (CDROM_CAN(CDC_LOCK)) { 2292 if (CDROM_CAN(CDC_LOCK)) {
2301 int ret = cdi->ops->lock_door(cdi, 0); 2293 int ret = cdi->ops->lock_door(cdi, 0);
@@ -2322,7 +2314,7 @@ static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
2322 2314
2323 if (!CDROM_CAN(CDC_OPEN_TRAY)) 2315 if (!CDROM_CAN(CDC_OPEN_TRAY))
2324 return -ENOSYS; 2316 return -ENOSYS;
2325 if (keeplocked) 2317 if (cdi->keeplocked)
2326 return -EBUSY; 2318 return -EBUSY;
2327 2319
2328 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); 2320 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
@@ -2453,7 +2445,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
2453 if (!CDROM_CAN(CDC_LOCK)) 2445 if (!CDROM_CAN(CDC_LOCK))
2454 return -EDRIVE_CANT_DO_THIS; 2446 return -EDRIVE_CANT_DO_THIS;
2455 2447
2456 keeplocked = arg ? 1 : 0; 2448 cdi->keeplocked = arg ? 1 : 0;
2457 2449
2458 /* 2450 /*
2459 * Don't unlock the door on multiple opens by default, but allow 2451 * Don't unlock the door on multiple opens by default, but allow
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 97f87b29b9f3..f4aed5fc2cb6 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1343,7 +1343,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1343 1343
1344 tasklet_init(&atchan->tasklet, atc_tasklet, 1344 tasklet_init(&atchan->tasklet, atc_tasklet,
1345 (unsigned long)atchan); 1345 (unsigned long)atchan);
1346 atc_enable_irq(atchan); 1346 atc_enable_chan_irq(atdma, i);
1347 } 1347 }
1348 1348
1349 /* set base routines */ 1349 /* set base routines */
@@ -1410,7 +1410,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
1410 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1410 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1411 1411
1412 /* Disable interrupts */ 1412 /* Disable interrupts */
1413 atc_disable_irq(atchan); 1413 atc_disable_chan_irq(atdma, chan->chan_id);
1414 tasklet_disable(&atchan->tasklet); 1414 tasklet_disable(&atchan->tasklet);
1415 1415
1416 tasklet_kill(&atchan->tasklet); 1416 tasklet_kill(&atchan->tasklet);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index dcaedfc181cf..a8d3277d60b5 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -327,28 +327,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
327} 327}
328 328
329 329
330static void atc_setup_irq(struct at_dma_chan *atchan, int on) 330static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
331{ 331{
332 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 332 u32 ebci;
333 u32 ebci;
334 333
335 /* enable interrupts on buffer transfer completion & error */ 334 /* enable interrupts on buffer transfer completion & error */
336 ebci = AT_DMA_BTC(atchan->chan_common.chan_id) 335 ebci = AT_DMA_BTC(chan_id)
337 | AT_DMA_ERR(atchan->chan_common.chan_id); 336 | AT_DMA_ERR(chan_id);
338 if (on) 337 if (on)
339 dma_writel(atdma, EBCIER, ebci); 338 dma_writel(atdma, EBCIER, ebci);
340 else 339 else
341 dma_writel(atdma, EBCIDR, ebci); 340 dma_writel(atdma, EBCIDR, ebci);
342} 341}
343 342
344static inline void atc_enable_irq(struct at_dma_chan *atchan) 343static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
345{ 344{
346 atc_setup_irq(atchan, 1); 345 atc_setup_irq(atdma, chan_id, 1);
347} 346}
348 347
349static inline void atc_disable_irq(struct at_dma_chan *atchan) 348static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
350{ 349{
351 atc_setup_irq(atchan, 0); 350 atc_setup_irq(atdma, chan_id, 0);
352} 351}
353 352
354 353
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2b8661b54eaf..24225f0fdcd8 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -599,7 +599,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
599 } 599 }
600 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 600 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
601 cnt = dmatest_add_threads(dtc, DMA_PQ); 601 cnt = dmatest_add_threads(dtc, DMA_PQ);
602 thread_count += cnt > 0 ?: 0; 602 thread_count += cnt > 0 ? cnt : 0;
603 } 603 }
604 604
605 pr_info("dmatest: Started %u threads using %s\n", 605 pr_info("dmatest: Started %u threads using %s\n",
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a8af379680c1..8bc5acf36ee5 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1102,11 +1102,13 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1102 case DMA_SLAVE_CONFIG: 1102 case DMA_SLAVE_CONFIG:
1103 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 1103 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1104 sdmac->per_address = dmaengine_cfg->src_addr; 1104 sdmac->per_address = dmaengine_cfg->src_addr;
1105 sdmac->watermark_level = dmaengine_cfg->src_maxburst; 1105 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1106 dmaengine_cfg->src_addr_width;
1106 sdmac->word_size = dmaengine_cfg->src_addr_width; 1107 sdmac->word_size = dmaengine_cfg->src_addr_width;
1107 } else { 1108 } else {
1108 sdmac->per_address = dmaengine_cfg->dst_addr; 1109 sdmac->per_address = dmaengine_cfg->dst_addr;
1109 sdmac->watermark_level = dmaengine_cfg->dst_maxburst; 1110 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1111 dmaengine_cfg->dst_addr_width;
1110 sdmac->word_size = dmaengine_cfg->dst_addr_width; 1112 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1111 } 1113 }
1112 sdmac->direction = dmaengine_cfg->direction; 1114 sdmac->direction = dmaengine_cfg->direction;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 54043cd831c8..812fd76e9c18 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1262,7 +1262,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1262 1262
1263 INIT_LIST_HEAD(&shdev->common.channels); 1263 INIT_LIST_HEAD(&shdev->common.channels);
1264 1264
1265 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1265 if (!pdata->slave_only)
1266 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1266 if (pdata->slave && pdata->slave_num) 1267 if (pdata->slave && pdata->slave_num)
1267 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1268 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1268 1269
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6628feaa7622..7f5f0da726da 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
263static char ohci_driver_name[] = KBUILD_MODNAME; 263static char ohci_driver_name[] = KBUILD_MODNAME;
264 264
265#define PCI_DEVICE_ID_AGERE_FW643 0x5901 265#define PCI_DEVICE_ID_AGERE_FW643 0x5901
266#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
266#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 267#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
267#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 268#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
268#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 269#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
@@ -289,6 +290,9 @@ static const struct {
289 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 290 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
290 QUIRK_NO_MSI}, 291 QUIRK_NO_MSI},
291 292
293 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
294 QUIRK_RESET_PACKET},
295
292 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 296 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
293 QUIRK_NO_MSI}, 297 QUIRK_NO_MSI},
294 298
@@ -299,7 +303,7 @@ static const struct {
299 QUIRK_NO_MSI}, 303 QUIRK_NO_MSI},
300 304
301 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 305 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
302 QUIRK_CYCLE_TIMER}, 306 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
303 307
304 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 308 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
305 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 309 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5b6948081f8f..ddfacc5ce56d 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -96,7 +96,7 @@ static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
96}; 96};
97 97
98static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = { 98static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
99 "gpi000", "gpio01", "gpio02", "gpio03", 99 "gpio00", "gpio01", "gpio02", "gpio03",
100 "gpio04", "gpio05" 100 "gpio04", "gpio05"
101}; 101};
102 102
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 03d6dd5dcb77..f0febe5b8221 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -448,6 +448,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
448 chip->reg = chip->base; 448 chip->reg = chip->base;
449 chip->ch = i; 449 chip->ch = i;
450 mutex_init(&chip->lock); 450 mutex_init(&chip->lock);
451 spin_lock_init(&chip->spinlock);
451 ioh_gpio_setup(chip, num_ports[i]); 452 ioh_gpio_setup(chip, num_ports[i]);
452 ret = gpiochip_add(&chip->gpio); 453 ret = gpiochip_add(&chip->gpio);
453 if (ret) { 454 if (ret) {
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 68fa55e86eb1..e8729cc2ba2b 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -392,6 +392,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
392 chip->reg = chip->base; 392 chip->reg = chip->base;
393 pci_set_drvdata(pdev, chip); 393 pci_set_drvdata(pdev, chip);
394 mutex_init(&chip->lock); 394 mutex_init(&chip->lock);
395 spin_lock_init(&chip->spinlock);
395 pch_gpio_setup(chip); 396 pch_gpio_setup(chip);
396 ret = gpiochip_add(&chip->gpio); 397 ret = gpiochip_add(&chip->gpio);
397 if (ret) { 398 if (ret) {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index a7661773c052..0a79a1167a25 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2387,27 +2387,30 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
2387}; 2387};
2388 2388
2389#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) 2389#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
2390static int exynos4_gpio_xlate(struct gpio_chip *gc, struct device_node *np, 2390static int exynos4_gpio_xlate(struct gpio_chip *gc,
2391 const void *gpio_spec, u32 *flags) 2391 const struct of_phandle_args *gpiospec, u32 *flags)
2392{ 2392{
2393 const __be32 *gpio = gpio_spec; 2393 unsigned int pin;
2394 const u32 n = be32_to_cpup(gpio);
2395 unsigned int pin = gc->base + be32_to_cpu(gpio[0]);
2396 2394
2397 if (WARN_ON(gc->of_gpio_n_cells < 4)) 2395 if (WARN_ON(gc->of_gpio_n_cells < 4))
2398 return -EINVAL; 2396 return -EINVAL;
2399 2397
2400 if (n > gc->ngpio) 2398 if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
2401 return -EINVAL; 2399 return -EINVAL;
2402 2400
2403 if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(be32_to_cpu(gpio[1])))) 2401 if (gpiospec->args[0] > gc->ngpio)
2402 return -EINVAL;
2403
2404 pin = gc->base + gpiospec->args[0];
2405
2406 if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
2404 pr_warn("gpio_xlate: failed to set pin function\n"); 2407 pr_warn("gpio_xlate: failed to set pin function\n");
2405 if (s3c_gpio_setpull(pin, be32_to_cpu(gpio[2]))) 2408 if (s3c_gpio_setpull(pin, gpiospec->args[2]))
2406 pr_warn("gpio_xlate: failed to set pin pull up/down\n"); 2409 pr_warn("gpio_xlate: failed to set pin pull up/down\n");
2407 if (s5p_gpio_set_drvstr(pin, be32_to_cpu(gpio[3]))) 2410 if (s5p_gpio_set_drvstr(pin, gpiospec->args[3]))
2408 pr_warn("gpio_xlate: failed to set pin drive strength\n"); 2411 pr_warn("gpio_xlate: failed to set pin drive strength\n");
2409 2412
2410 return n; 2413 return gpiospec->args[0];
2411} 2414}
2412 2415
2413static const struct of_device_id exynos4_gpio_dt_match[] __initdata = { 2416static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index ddd70db45f76..637fcc3766c7 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -315,7 +315,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
315 if (err) 315 if (err)
316 return err; 316 return err;
317 317
318 if (__get_user(c32.auth, &client->auth) 318 if (__get_user(c32.idx, &client->idx)
319 || __get_user(c32.auth, &client->auth)
319 || __get_user(c32.pid, &client->pid) 320 || __get_user(c32.pid, &client->pid)
320 || __get_user(c32.uid, &client->uid) 321 || __get_user(c32.uid, &client->uid)
321 || __get_user(c32.magic, &client->magic) 322 || __get_user(c32.magic, &client->magic)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b3b51c43dad0..00fbff5ddd81 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1872,7 +1872,7 @@ static void intel_update_fbc(struct drm_device *dev)
1872 if (enable_fbc < 0) { 1872 if (enable_fbc < 0) {
1873 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 1873 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1874 enable_fbc = 1; 1874 enable_fbc = 1;
1875 if (INTEL_INFO(dev)->gen <= 5) 1875 if (INTEL_INFO(dev)->gen <= 6)
1876 enable_fbc = 0; 1876 enable_fbc = 0;
1877 } 1877 }
1878 if (!enable_fbc) { 1878 if (!enable_fbc) {
@@ -5307,6 +5307,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5307 } 5307 }
5308 } 5308 }
5309 5309
5310 pipeconf &= ~PIPECONF_INTERLACE_MASK;
5310 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5311 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5311 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5312 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5312 /* the chip adds 2 halflines automatically */ 5313 /* the chip adds 2 halflines automatically */
@@ -5317,7 +5318,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5317 adjusted_mode->crtc_vsync_end -= 1; 5318 adjusted_mode->crtc_vsync_end -= 1;
5318 adjusted_mode->crtc_vsync_start -= 1; 5319 adjusted_mode->crtc_vsync_start -= 1;
5319 } else 5320 } else
5320 pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */ 5321 pipeconf |= PIPECONF_PROGRESSIVE;
5321 5322
5322 I915_WRITE(HTOTAL(pipe), 5323 I915_WRITE(HTOTAL(pipe),
5323 (adjusted_mode->crtc_hdisplay - 1) | 5324 (adjusted_mode->crtc_hdisplay - 1) |
@@ -5902,6 +5903,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5902 } 5903 }
5903 } 5904 }
5904 5905
5906 pipeconf &= ~PIPECONF_INTERLACE_MASK;
5905 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5907 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5906 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5908 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5907 /* the chip adds 2 halflines automatically */ 5909 /* the chip adds 2 halflines automatically */
@@ -5912,7 +5914,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5912 adjusted_mode->crtc_vsync_end -= 1; 5914 adjusted_mode->crtc_vsync_end -= 1;
5913 adjusted_mode->crtc_vsync_start -= 1; 5915 adjusted_mode->crtc_vsync_start -= 1;
5914 } else 5916 } else
5915 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 5917 pipeconf |= PIPECONF_PROGRESSIVE;
5916 5918
5917 I915_WRITE(HTOTAL(pipe), 5919 I915_WRITE(HTOTAL(pipe),
5918 (adjusted_mode->crtc_hdisplay - 1) | 5920 (adjusted_mode->crtc_hdisplay - 1) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index db3b461ad412..94f860cce3f7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,17 +208,8 @@ intel_dp_link_clock(uint8_t link_bw)
208 */ 208 */
209 209
210static int 210static int
211intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp) 211intel_dp_link_required(int pixel_clock, int bpp)
212{ 212{
213 struct drm_crtc *crtc = intel_dp->base.base.crtc;
214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
215 int bpp = 24;
216
217 if (check_bpp)
218 bpp = check_bpp;
219 else if (intel_crtc)
220 bpp = intel_crtc->bpp;
221
222 return (pixel_clock * bpp + 9) / 10; 213 return (pixel_clock * bpp + 9) / 10;
223} 214}
224 215
@@ -245,12 +236,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
245 return MODE_PANEL; 236 return MODE_PANEL;
246 } 237 }
247 238
248 mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0); 239 mode_rate = intel_dp_link_required(mode->clock, 24);
249 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 240 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
250 241
251 if (mode_rate > max_rate) { 242 if (mode_rate > max_rate) {
252 mode_rate = intel_dp_link_required(intel_dp, 243 mode_rate = intel_dp_link_required(mode->clock, 18);
253 mode->clock, 18);
254 if (mode_rate > max_rate) 244 if (mode_rate > max_rate)
255 return MODE_CLOCK_HIGH; 245 return MODE_CLOCK_HIGH;
256 else 246 else
@@ -683,7 +673,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
683 int lane_count, clock; 673 int lane_count, clock;
684 int max_lane_count = intel_dp_max_lane_count(intel_dp); 674 int max_lane_count = intel_dp_max_lane_count(intel_dp);
685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 675 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
686 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0; 676 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 677 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
688 678
689 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 679 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -701,7 +691,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
701 for (clock = 0; clock <= max_clock; clock++) { 691 for (clock = 0; clock <= max_clock; clock++) {
702 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 692 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
703 693
704 if (intel_dp_link_required(intel_dp, mode->clock, bpp) 694 if (intel_dp_link_required(mode->clock, bpp)
705 <= link_avail) { 695 <= link_avail) {
706 intel_dp->link_bw = bws[clock]; 696 intel_dp->link_bw = bws[clock];
707 intel_dp->lane_count = lane_count; 697 intel_dp->lane_count = lane_count;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 798f6e1aa544..aa84832b0e1a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -694,6 +694,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
694 }, 694 },
695 { 695 {
696 .callback = intel_no_lvds_dmi_callback, 696 .callback = intel_no_lvds_dmi_callback,
697 .ident = "AOpen i45GMx-I",
698 .matches = {
699 DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
700 DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
701 },
702 },
703 {
704 .callback = intel_no_lvds_dmi_callback,
697 .ident = "Aopen i945GTt-VFA", 705 .ident = "Aopen i945GTt-VFA",
698 .matches = { 706 .matches = {
699 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 707 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 1e382ad5a2b8..a37c31e358aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -54,9 +54,10 @@ struct bit_entry {
54int bit_table(struct drm_device *, u8 id, struct bit_entry *); 54int bit_table(struct drm_device *, u8 id, struct bit_entry *);
55 55
56enum dcb_gpio_tag { 56enum dcb_gpio_tag {
57 DCB_GPIO_TVDAC0 = 0xc, 57 DCB_GPIO_PANEL_POWER = 0x01,
58 DCB_GPIO_TVDAC0 = 0x0c,
58 DCB_GPIO_TVDAC1 = 0x2d, 59 DCB_GPIO_TVDAC1 = 0x2d,
59 DCB_GPIO_PWM_FAN = 0x9, 60 DCB_GPIO_PWM_FAN = 0x09,
60 DCB_GPIO_FAN_SENSE = 0x3d, 61 DCB_GPIO_FAN_SENSE = 0x3d,
61 DCB_GPIO_UNUSED = 0xff 62 DCB_GPIO_UNUSED = 0xff
62}; 63};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 3cb52bc52b21..795a9e3c990a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -219,6 +219,16 @@ nouveau_display_init(struct drm_device *dev)
219 if (ret) 219 if (ret)
220 return ret; 220 return ret;
221 221
222 /* power on internal panel if it's not already. the init tables of
223 * some vbios default this to off for some reason, causing the
224 * panel to not work after resume
225 */
226 if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
227 nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
228 msleep(300);
229 }
230
231 /* enable polling for external displays */
222 drm_kms_helper_poll_enable(dev); 232 drm_kms_helper_poll_enable(dev);
223 233
224 /* enable hotplug interrupts */ 234 /* enable hotplug interrupts */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index e4a7cfe7898d..81d7962e7252 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
124int nouveau_ctxfw; 124int nouveau_ctxfw;
125module_param_named(ctxfw, nouveau_ctxfw, int, 0400); 125module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
126 126
127MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n"); 127MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n");
128int nouveau_mxmdcb = 1; 128int nouveau_mxmdcb = 1;
129module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400); 129module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
130 130
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5f0bc57fdaab..7ce3fde40743 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -380,6 +380,25 @@ retry:
380} 380}
381 381
382static int 382static int
383validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
384{
385 struct nouveau_fence *fence = NULL;
386 int ret = 0;
387
388 spin_lock(&nvbo->bo.bdev->fence_lock);
389 if (nvbo->bo.sync_obj)
390 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
391 spin_unlock(&nvbo->bo.bdev->fence_lock);
392
393 if (fence) {
394 ret = nouveau_fence_sync(fence, chan);
395 nouveau_fence_unref(&fence);
396 }
397
398 return ret;
399}
400
401static int
383validate_list(struct nouveau_channel *chan, struct list_head *list, 402validate_list(struct nouveau_channel *chan, struct list_head *list,
384 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 403 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
385{ 404{
@@ -393,7 +412,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
393 list_for_each_entry(nvbo, list, entry) { 412 list_for_each_entry(nvbo, list, entry) {
394 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 413 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
395 414
396 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); 415 ret = validate_sync(chan, nvbo);
397 if (unlikely(ret)) { 416 if (unlikely(ret)) {
398 NV_ERROR(dev, "fail pre-validate sync\n"); 417 NV_ERROR(dev, "fail pre-validate sync\n");
399 return ret; 418 return ret;
@@ -416,7 +435,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
416 return ret; 435 return ret;
417 } 436 }
418 437
419 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); 438 ret = validate_sync(chan, nvbo);
420 if (unlikely(ret)) { 439 if (unlikely(ret)) {
421 NV_ERROR(dev, "fail post-validate sync\n"); 440 NV_ERROR(dev, "fail post-validate sync\n");
422 return ret; 441 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
index 8bccddf4eff0..e5a64f0f4cb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -656,7 +656,16 @@ nouveau_mxm_init(struct drm_device *dev)
656 656
657 if (mxm_shadow(dev, mxm[0])) { 657 if (mxm_shadow(dev, mxm[0])) {
658 MXM_MSG(dev, "failed to locate valid SIS\n"); 658 MXM_MSG(dev, "failed to locate valid SIS\n");
659#if 0
660 /* we should, perhaps, fall back to some kind of limited
661 * mode here if the x86 vbios hasn't already done the
662 * work for us (so we prevent loading with completely
663 * whacked vbios tables).
664 */
659 return -EINVAL; 665 return -EINVAL;
666#else
667 return 0;
668#endif
660 } 669 }
661 670
662 MXM_MSG(dev, "MXMS Version %d.%d\n", 671 MXM_MSG(dev, "MXMS Version %d.%d\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 03937212e9d8..ec5481dfcd82 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -495,9 +495,9 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
495 struct drm_nouveau_private *dev_priv = dev->dev_private; 495 struct drm_nouveau_private *dev_priv = dev->dev_private;
496 struct nv50_pm_state *info; 496 struct nv50_pm_state *info;
497 struct pll_lims pll; 497 struct pll_lims pll;
498 int ret = -EINVAL; 498 int clk, ret = -EINVAL;
499 int N, M, P1, P2; 499 int N, M, P1, P2;
500 u32 clk, out; 500 u32 out;
501 501
502 if (dev_priv->chipset == 0xaa || 502 if (dev_priv->chipset == 0xaa ||
503 dev_priv->chipset == 0xac) 503 dev_priv->chipset == 0xac)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 891935271d34..742f17f009a9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1184,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1184 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1184 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1185 1185
1186 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1186 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1187 crtc->mode.vdisplay); 1187 target_fb->height);
1188 x &= ~3; 1188 x &= ~3;
1189 y &= ~1; 1189 y &= ~1;
1190 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1190 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1353,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1353 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1353 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1354 1354
1355 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1355 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1356 crtc->mode.vdisplay); 1356 target_fb->height);
1357 x &= ~3; 1357 x &= ~3;
1358 y &= ~1; 1358 y &= ~1;
1359 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, 1359 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a71557ce01dc..552b436451fd 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -564,9 +564,21 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
564 ENCODER_OBJECT_ID_NUTMEG) 564 ENCODER_OBJECT_ID_NUTMEG)
565 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 565 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
566 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 566 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
567 ENCODER_OBJECT_ID_TRAVIS) 567 ENCODER_OBJECT_ID_TRAVIS) {
568 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 568 u8 id[6];
569 else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 569 int i;
570 for (i = 0; i < 6; i++)
571 id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
572 if (id[0] == 0x73 &&
573 id[1] == 0x69 &&
574 id[2] == 0x76 &&
575 id[3] == 0x61 &&
576 id[4] == 0x72 &&
577 id[5] == 0x54)
578 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
579 else
580 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
581 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
570 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 582 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
571 if (tmp & 1) 583 if (tmp & 1)
572 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 584 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ae09fe82afbc..9be353b894cc 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3191,6 +3191,7 @@ static int evergreen_startup(struct radeon_device *rdev)
3191 if (r) { 3191 if (r) {
3192 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 3192 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3193 rdev->accel_working = false; 3193 rdev->accel_working = false;
3194 return r;
3194 } 3195 }
3195 3196
3196 r = r600_audio_init(rdev); 3197 r = r600_audio_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index bfd36ab643a6..18cd84fae99c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -789,9 +789,7 @@ int r100_irq_process(struct radeon_device *rdev)
789 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 789 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
790 break; 790 break;
791 default: 791 default:
792 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 792 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
793 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
794 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
795 break; 793 break;
796 } 794 }
797 } 795 }
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d996f4381130..accc032c103f 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -468,27 +468,42 @@ set_default_state(struct radeon_device *rdev)
468 radeon_ring_write(ring, sq_stack_resource_mgmt_2); 468 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
469} 469}
470 470
471#define I2F_MAX_BITS 15
472#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
473#define I2F_SHIFT (24 - I2F_MAX_BITS)
474
475/*
476 * Converts unsigned integer into 32-bit IEEE floating point representation.
477 * Conversion is not universal and only works for the range from 0
478 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
479 * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
480 * I2F_MAX_BITS can be increased, but that will add to the loop iterations
481 * and slow us down. Conversion is done by shifting the input and counting
482 * down until the first 1 reaches bit position 23. The resulting counter
483 * and the shifted input are, respectively, the exponent and the fraction.
484 * The sign is always zero.
485 */
471static uint32_t i2f(uint32_t input) 486static uint32_t i2f(uint32_t input)
472{ 487{
473 u32 result, i, exponent, fraction; 488 u32 result, i, exponent, fraction;
474 489
475 if ((input & 0x3fff) == 0) 490 WARN_ON_ONCE(input > I2F_MAX_INPUT);
476 result = 0; /* 0 is a special case */ 491
492 if ((input & I2F_MAX_INPUT) == 0)
493 result = 0;
477 else { 494 else {
478 exponent = 140; /* exponent biased by 127; */ 495 exponent = 126 + I2F_MAX_BITS;
479 fraction = (input & 0x3fff) << 10; /* cheat and only 496 fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
480 handle numbers below 2^^15 */ 497
481 for (i = 0; i < 14; i++) { 498 for (i = 0; i < I2F_MAX_BITS; i++) {
482 if (fraction & 0x800000) 499 if (fraction & 0x800000)
483 break; 500 break;
484 else { 501 else {
485 fraction = fraction << 1; /* keep 502 fraction = fraction << 1;
486 shifting left until top bit = 1 */
487 exponent = exponent - 1; 503 exponent = exponent - 1;
488 } 504 }
489 } 505 }
490 result = exponent << 23 | (fraction & 0x7fffff); /* mask 506 result = exponent << 23 | (fraction & 0x7fffff);
491 off top bit; assumed 1 */
492 } 507 }
493 return result; 508 return result;
494} 509}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5082d17d14dc..9e72daeeddc6 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2931,6 +2931,20 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
2931 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; 2931 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
2932 } 2932 }
2933 } 2933 }
2934 if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
2935 (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
2936 if (connected) {
2937 DRM_DEBUG_KMS("DFP6 connected\n");
2938 bios_0_scratch |= ATOM_S0_DFP6;
2939 bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
2940 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
2941 } else {
2942 DRM_DEBUG_KMS("DFP6 disconnected\n");
2943 bios_0_scratch &= ~ATOM_S0_DFP6;
2944 bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
2945 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
2946 }
2947 }
2934 2948
2935 if (rdev->family >= CHIP_R600) { 2949 if (rdev->family >= CHIP_R600) {
2936 WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch); 2950 WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
@@ -2951,6 +2965,9 @@ radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
2951 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2965 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2952 uint32_t bios_3_scratch; 2966 uint32_t bios_3_scratch;
2953 2967
2968 if (ASIC_IS_DCE4(rdev))
2969 return;
2970
2954 if (rdev->family >= CHIP_R600) 2971 if (rdev->family >= CHIP_R600)
2955 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); 2972 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
2956 else 2973 else
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 13ac63ba6075..98724fcb0088 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,8 +59,9 @@ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
59 59
60 obj = (union acpi_object *)buffer.pointer; 60 obj = (union acpi_object *)buffer.pointer;
61 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); 61 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
62 len = obj->buffer.length;
62 kfree(buffer.pointer); 63 kfree(buffer.pointer);
63 return obj->buffer.length; 64 return len;
64} 65}
65 66
66bool radeon_atrm_supported(struct pci_dev *pdev) 67bool radeon_atrm_supported(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index cec51a5b69dd..49f7cb7e226b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -883,6 +883,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
883 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 883 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
884 return 0; 884 return 0;
885 885
886 drm_kms_helper_poll_disable(dev);
887
886 /* turn off display hw */ 888 /* turn off display hw */
887 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 889 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
888 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 890 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
@@ -972,6 +974,8 @@ int radeon_resume_kms(struct drm_device *dev)
972 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 974 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
973 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 975 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
974 } 976 }
977
978 drm_kms_helper_poll_enable(dev);
975 return 0; 979 return 0;
976} 980}
977 981
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 64ea3dd9e6ff..4bd36a354fbe 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -364,8 +364,10 @@ int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
364 int not_processed = 0; 364 int not_processed = 0;
365 365
366 read_lock_irqsave(&rdev->fence_lock, irq_flags); 366 read_lock_irqsave(&rdev->fence_lock, irq_flags);
367 if (!rdev->fence_drv[ring].initialized) 367 if (!rdev->fence_drv[ring].initialized) {
368 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
368 return 0; 369 return 0;
370 }
369 371
370 if (!list_empty(&rdev->fence_drv[ring].emitted)) { 372 if (!list_empty(&rdev->fence_drv[ring].emitted)) {
371 struct list_head *ptr; 373 struct list_head *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e2a393ff0c44..98a8ad680109 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -958,6 +958,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
958 i2c->rec = *rec; 958 i2c->rec = *rec;
959 i2c->adapter.owner = THIS_MODULE; 959 i2c->adapter.owner = THIS_MODULE;
960 i2c->adapter.class = I2C_CLASS_DDC; 960 i2c->adapter.class = I2C_CLASS_DDC;
961 i2c->adapter.dev.parent = &dev->pdev->dev;
961 i2c->dev = dev; 962 i2c->dev = dev;
962 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 963 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
963 "Radeon aux bus %s", name); 964 "Radeon aux bus %s", name);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index ec46eb45e34c..c05865e5521f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -684,9 +684,7 @@ int rs600_irq_process(struct radeon_device *rdev)
684 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 684 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
685 break; 685 break;
686 default: 686 default:
687 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 687 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
688 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
689 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
690 break; 688 break;
691 } 689 }
692 } 690 }
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0c33ae9cf0f0..406632472c1b 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -548,6 +548,7 @@ static int mousevsc_remove(struct hv_device *dev)
548 struct mousevsc_dev *input_dev = hv_get_drvdata(dev); 548 struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
549 549
550 vmbus_close(dev->channel); 550 vmbus_close(dev->channel);
551 hid_hw_stop(input_dev->hid_device);
551 hid_destroy_device(input_dev->hid_device); 552 hid_destroy_device(input_dev->hid_device);
552 mousevsc_free_device(input_dev); 553 mousevsc_free_device(input_dev);
553 554
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index b47e58b52d9f..acab74cde727 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -531,7 +531,6 @@ static int wacom_probe(struct hid_device *hdev,
531 wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; 531 wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
532 wdata->battery.use_for_apm = 0; 532 wdata->battery.use_for_apm = 0;
533 533
534 power_supply_powers(&wdata->battery, &hdev->dev);
535 534
536 ret = power_supply_register(&hdev->dev, &wdata->battery); 535 ret = power_supply_register(&hdev->dev, &wdata->battery);
537 if (ret) { 536 if (ret) {
@@ -540,6 +539,8 @@ static int wacom_probe(struct hid_device *hdev,
540 goto err_battery; 539 goto err_battery;
541 } 540 }
542 541
542 power_supply_powers(&wdata->battery, &hdev->dev);
543
543 wdata->ac.properties = wacom_ac_props; 544 wdata->ac.properties = wacom_ac_props;
544 wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props); 545 wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
545 wdata->ac.get_property = wacom_ac_get_property; 546 wdata->ac.get_property = wacom_ac_get_property;
@@ -547,14 +548,14 @@ static int wacom_probe(struct hid_device *hdev,
547 wdata->ac.type = POWER_SUPPLY_TYPE_MAINS; 548 wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
548 wdata->ac.use_for_apm = 0; 549 wdata->ac.use_for_apm = 0;
549 550
550 power_supply_powers(&wdata->battery, &hdev->dev);
551
552 ret = power_supply_register(&hdev->dev, &wdata->ac); 551 ret = power_supply_register(&hdev->dev, &wdata->ac);
553 if (ret) { 552 if (ret) {
554 hid_warn(hdev, 553 hid_warn(hdev,
555 "can't create ac battery attribute, err: %d\n", ret); 554 "can't create ac battery attribute, err: %d\n", ret);
556 goto err_ac; 555 goto err_ac;
557 } 556 }
557
558 power_supply_powers(&wdata->ac, &hdev->dev);
558#endif 559#endif
559 return 0; 560 return 0;
560 561
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index fc253b472f9d..cac3589b1ed5 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1226,14 +1226,14 @@ static int wiimote_hid_probe(struct hid_device *hdev,
1226 wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; 1226 wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
1227 wdata->battery.use_for_apm = 0; 1227 wdata->battery.use_for_apm = 0;
1228 1228
1229 power_supply_powers(&wdata->battery, &hdev->dev);
1230
1231 ret = power_supply_register(&wdata->hdev->dev, &wdata->battery); 1229 ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
1232 if (ret) { 1230 if (ret) {
1233 hid_err(hdev, "Cannot register battery device\n"); 1231 hid_err(hdev, "Cannot register battery device\n");
1234 goto err_battery; 1232 goto err_battery;
1235 } 1233 }
1236 1234
1235 power_supply_powers(&wdata->battery, &hdev->dev);
1236
1237 ret = wiimote_leds_create(wdata); 1237 ret = wiimote_leds_create(wdata);
1238 if (ret) 1238 if (ret)
1239 goto err_free; 1239 goto err_free;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7c297d305d5d..b1ec0e2aeb57 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -922,11 +922,11 @@ void hiddev_disconnect(struct hid_device *hid)
922 struct hiddev *hiddev = hid->hiddev; 922 struct hiddev *hiddev = hid->hiddev;
923 struct usbhid_device *usbhid = hid->driver_data; 923 struct usbhid_device *usbhid = hid->driver_data;
924 924
925 usb_deregister_dev(usbhid->intf, &hiddev_class);
926
925 mutex_lock(&hiddev->existancelock); 927 mutex_lock(&hiddev->existancelock);
926 hiddev->exist = 0; 928 hiddev->exist = 0;
927 929
928 usb_deregister_dev(usbhid->intf, &hiddev_class);
929
930 if (hiddev->open) { 930 if (hiddev->open) {
931 mutex_unlock(&hiddev->existancelock); 931 mutex_unlock(&hiddev->existancelock);
932 usbhid_close(hiddev->hid); 932 usbhid_close(hiddev->hid);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index eedf574ab539..f609b5727ba9 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -172,7 +172,7 @@ static inline void f75375_write8(struct i2c_client *client, u8 reg,
172static inline void f75375_write16(struct i2c_client *client, u8 reg, 172static inline void f75375_write16(struct i2c_client *client, u8 reg,
173 u16 value) 173 u16 value)
174{ 174{
175 int err = i2c_smbus_write_byte_data(client, reg, (value << 8)); 175 int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
176 if (err) 176 if (err)
177 return; 177 return;
178 i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF)); 178 i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
@@ -200,9 +200,6 @@ static struct f75375_data *f75375_update_device(struct device *dev)
200 f75375_read16(client, F75375_REG_FAN_MIN(nr)); 200 f75375_read16(client, F75375_REG_FAN_MIN(nr));
201 data->fan_target[nr] = 201 data->fan_target[nr] =
202 f75375_read16(client, F75375_REG_FAN_EXP(nr)); 202 f75375_read16(client, F75375_REG_FAN_EXP(nr));
203 data->pwm[nr] = f75375_read8(client,
204 F75375_REG_FAN_PWM_DUTY(nr));
205
206 } 203 }
207 for (nr = 0; nr < 4; nr++) { 204 for (nr = 0; nr < 4; nr++) {
208 data->in_max[nr] = 205 data->in_max[nr] =
@@ -218,6 +215,8 @@ static struct f75375_data *f75375_update_device(struct device *dev)
218 if (time_after(jiffies, data->last_updated + 2 * HZ) 215 if (time_after(jiffies, data->last_updated + 2 * HZ)
219 || !data->valid) { 216 || !data->valid) {
220 for (nr = 0; nr < 2; nr++) { 217 for (nr = 0; nr < 2; nr++) {
218 data->pwm[nr] = f75375_read8(client,
219 F75375_REG_FAN_PWM_DUTY(nr));
221 /* assign MSB, therefore shift it by 8 bits */ 220 /* assign MSB, therefore shift it by 8 bits */
222 data->temp11[nr] = 221 data->temp11[nr] =
223 f75375_read8(client, F75375_REG_TEMP(nr)) << 8; 222 f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
@@ -369,7 +368,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
369 fanmode |= (3 << FAN_CTRL_MODE(nr)); 368 fanmode |= (3 << FAN_CTRL_MODE(nr));
370 break; 369 break;
371 case 2: /* AUTOMATIC*/ 370 case 2: /* AUTOMATIC*/
372 fanmode |= (2 << FAN_CTRL_MODE(nr)); 371 fanmode |= (1 << FAN_CTRL_MODE(nr));
373 break; 372 break;
374 case 3: /* fan speed */ 373 case 3: /* fan speed */
375 break; 374 break;
@@ -723,7 +722,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
723 if (data->kind == f75387) { 722 if (data->kind == f75387) {
724 bool manu, duty; 723 bool manu, duty;
725 724
726 if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr)))) 725 if (!(mode & (1 << F75387_FAN_CTRL_LINEAR(nr))))
727 data->pwm_mode[nr] = 1; 726 data->pwm_mode[nr] = 1;
728 727
729 manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1); 728 manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 2dfae7d7cc5b..5276d1933dbc 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1920,9 +1920,26 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
1920 fan4min = 0; 1920 fan4min = 0;
1921 fan5pin = 0; 1921 fan5pin = 0;
1922 } else if (sio_data->kind == nct6776) { 1922 } else if (sio_data->kind == nct6776) {
1923 fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); 1923 bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
1924 fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); 1924
1925 fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); 1925 superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
1926 regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
1927
1928 if (regval & 0x80)
1929 fan3pin = gpok;
1930 else
1931 fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
1932
1933 if (regval & 0x40)
1934 fan4pin = gpok;
1935 else
1936 fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
1937
1938 if (regval & 0x20)
1939 fan5pin = gpok;
1940 else
1941 fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
1942
1926 fan4min = fan4pin; 1943 fan4min = fan4pin;
1927 } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { 1944 } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
1928 fan3pin = 1; 1945 fan3pin = 1;
@@ -2337,11 +2354,6 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
2337 for (i = 0; i < data->pwm_num; i++) 2354 for (i = 0; i < data->pwm_num; i++)
2338 data->pwm_enable_orig[i] = data->pwm_enable[i]; 2355 data->pwm_enable_orig[i] = data->pwm_enable[i];
2339 2356
2340 /* Read pwm data to save original values */
2341 w83627ehf_update_pwm_common(dev, data);
2342 for (i = 0; i < data->pwm_num; i++)
2343 data->pwm_enable_orig[i] = data->pwm_enable[i];
2344
2345 /* Register sysfs hooks */ 2357 /* Register sysfs hooks */
2346 for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { 2358 for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
2347 err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); 2359 err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index f713eac55047..801df6000e9b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1018,7 +1018,7 @@ omap_i2c_probe(struct platform_device *pdev)
1018 goto err_release_region; 1018 goto err_release_region;
1019 } 1019 }
1020 1020
1021 match = of_match_device(omap_i2c_of_match, &pdev->dev); 1021 match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
1022 if (match) { 1022 if (match) {
1023 u32 freq = 100000; /* default to 100000 Hz */ 1023 u32 freq = 100000; /* default to 100000 Hz */
1024 1024
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 6381604696d3..0ab4a9548745 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -755,7 +755,7 @@ MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
755 755
756static struct platform_driver tegra_i2c_driver = { 756static struct platform_driver tegra_i2c_driver = {
757 .probe = tegra_i2c_probe, 757 .probe = tegra_i2c_probe,
758 .remove = tegra_i2c_remove, 758 .remove = __devexit_p(tegra_i2c_remove),
759#ifdef CONFIG_PM 759#ifdef CONFIG_PM
760 .suspend = tegra_i2c_suspend, 760 .suspend = tegra_i2c_suspend,
761 .resume = tegra_i2c_resume, 761 .resume = tegra_i2c_resume,
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 7f879b2397b0..af8d016c37ea 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -116,4 +116,3 @@ obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
116 116
117obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o 117obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
118obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o 118obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
119obj-$(CONFIG_BLK_DEV_IDE_AT91) += at91_ide.o
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
deleted file mode 100644
index 41d415529479..000000000000
--- a/drivers/ide/at91_ide.c
+++ /dev/null
@@ -1,366 +0,0 @@
1/*
2 * IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
3 * with Compact Flash True IDE logic
4 *
5 * Copyright (c) 2008, 2009 Kelvatek Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/clk.h>
26#include <linux/err.h>
27#include <linux/ide.h>
28#include <linux/platform_device.h>
29
30#include <mach/board.h>
31#include <asm/gpio.h>
32#include <mach/at91sam9_smc.h>
33
34#define DRV_NAME "at91_ide"
35
36#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
37#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
38
39/*
40 * Access to IDE device is possible through EBI Static Memory Controller
41 * with Compact Flash logic. For details see EBI and SMC datasheet sections
42 * of any microcontroller from AT91SAM9 family.
43 *
44 * Within SMC chip select address space, lines A[23:21] distinguish Compact
45 * Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
46 * 0x00c0000 - True IDE
47 * 0x00e0000 - Alternate True IDE (Alt Status Register)
48 *
49 * On True IDE mode Task File and Data Register are mapped at the same address.
50 * To distinguish access between these two different bus data width is used:
51 * 8Bit for Task File, 16Bit for Data I/O.
52 *
53 * After initialization we do 8/16 bit flipping (changes in SMC MODE register)
54 * only inside IDE callback routines which are serialized by IDE layer,
55 * so no additional locking needed.
56 */
57
58#define TASK_FILE 0x00c00000
59#define ALT_MODE 0x00e00000
60#define REGS_SIZE 8
61
62#define enter_16bit(cs, mode) do { \
63 mode = at91_sys_read(AT91_SMC_MODE(cs)); \
64 at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16); \
65} while (0)
66
67#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
68
69static void set_smc_timings(const u8 chipselect, const u16 cycle,
70 const u16 setup, const u16 pulse,
71 const u16 data_float, int use_iordy)
72{
73 unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
74 AT91_SMC_BAT_SELECT;
75
76 /* disable or enable waiting for IORDY signal */
77 if (use_iordy)
78 mode |= AT91_SMC_EXNWMODE_READY;
79
80 /* add data float cycles if needed */
81 if (data_float)
82 mode |= AT91_SMC_TDF_(data_float);
83
84 at91_sys_write(AT91_SMC_MODE(chipselect), mode);
85
86 /* setup timings in SMC */
87 at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
88 AT91_SMC_NCS_WRSETUP_(0) |
89 AT91_SMC_NRDSETUP_(setup) |
90 AT91_SMC_NCS_RDSETUP_(0));
91 at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
92 AT91_SMC_NCS_WRPULSE_(cycle) |
93 AT91_SMC_NRDPULSE_(pulse) |
94 AT91_SMC_NCS_RDPULSE_(cycle));
95 at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
96 AT91_SMC_NRDCYCLE_(cycle));
97}
98
99static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
100{
101 u64 tmp = ns;
102
103 tmp *= mck_hz;
104 tmp += 1000*1000*1000 - 1; /* round up */
105 do_div(tmp, 1000*1000*1000);
106 return (unsigned int) tmp;
107}
108
109static void apply_timings(const u8 chipselect, const u8 pio,
110 const struct ide_timing *timing, int use_iordy)
111{
112 unsigned int t0, t1, t2, t6z;
113 unsigned int cycle, setup, pulse, data_float;
114 unsigned int mck_hz;
115 struct clk *mck;
116
117 /* see table 22 of Compact Flash standard 4.1 for the meaning,
118 * we do not stretch active (t2) time, so setup (t1) + hold time (th)
119 * assure at least minimal recovery (t2i) time */
120 t0 = timing->cyc8b;
121 t1 = timing->setup;
122 t2 = timing->act8b;
123 t6z = (pio < 5) ? 30 : 20;
124
125 pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
126
127 mck = clk_get(NULL, "mck");
128 BUG_ON(IS_ERR(mck));
129 mck_hz = clk_get_rate(mck);
130 pdbg("mck_hz=%u\n", mck_hz);
131
132 cycle = calc_mck_cycles(t0, mck_hz);
133 setup = calc_mck_cycles(t1, mck_hz);
134 pulse = calc_mck_cycles(t2, mck_hz);
135 data_float = calc_mck_cycles(t6z, mck_hz);
136
137 pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
138 cycle, setup, pulse, data_float);
139
140 set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
141}
142
143static void at91_ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
144 void *buf, unsigned int len)
145{
146 ide_hwif_t *hwif = drive->hwif;
147 struct ide_io_ports *io_ports = &hwif->io_ports;
148 u8 chipselect = hwif->select_data;
149 unsigned long mode;
150
151 pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
152
153 len++;
154
155 enter_16bit(chipselect, mode);
156 readsw((void __iomem *)io_ports->data_addr, buf, len / 2);
157 leave_16bit(chipselect, mode);
158}
159
160static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
161 void *buf, unsigned int len)
162{
163 ide_hwif_t *hwif = drive->hwif;
164 struct ide_io_ports *io_ports = &hwif->io_ports;
165 u8 chipselect = hwif->select_data;
166 unsigned long mode;
167
168 pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
169
170 enter_16bit(chipselect, mode);
171 writesw((void __iomem *)io_ports->data_addr, buf, len / 2);
172 leave_16bit(chipselect, mode);
173}
174
175static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
176{
177 struct ide_timing *timing;
178 u8 chipselect = hwif->select_data;
179 int use_iordy = 0;
180 const u8 pio = drive->pio_mode - XFER_PIO_0;
181
182 pdbg("chipselect %u pio %u\n", chipselect, pio);
183
184 timing = ide_timing_find_mode(XFER_PIO_0 + pio);
185 BUG_ON(!timing);
186
187 if (ide_pio_need_iordy(drive, pio))
188 use_iordy = 1;
189
190 apply_timings(chipselect, pio, timing, use_iordy);
191}
192
193static const struct ide_tp_ops at91_ide_tp_ops = {
194 .exec_command = ide_exec_command,
195 .read_status = ide_read_status,
196 .read_altstatus = ide_read_altstatus,
197 .write_devctl = ide_write_devctl,
198
199 .dev_select = ide_dev_select,
200 .tf_load = ide_tf_load,
201 .tf_read = ide_tf_read,
202
203 .input_data = at91_ide_input_data,
204 .output_data = at91_ide_output_data,
205};
206
207static const struct ide_port_ops at91_ide_port_ops = {
208 .set_pio_mode = at91_ide_set_pio_mode,
209};
210
211static const struct ide_port_info at91_ide_port_info __initdata = {
212 .port_ops = &at91_ide_port_ops,
213 .tp_ops = &at91_ide_tp_ops,
214 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
215 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
216 .pio_mask = ATA_PIO6,
217 .chipset = ide_generic,
218};
219
220/*
221 * If interrupt is delivered through GPIO, IRQ are triggered on falling
222 * and rising edge of signal. Whereas IDE device request interrupt on high
223 * level (rising edge in our case). This mean we have fake interrupts, so
224 * we need to check interrupt pin and exit instantly from ISR when line
225 * is on low level.
226 */
227
228irqreturn_t at91_irq_handler(int irq, void *dev_id)
229{
230 int ntries = 8;
231 int pin_val1, pin_val2;
232
233 /* additional deglitch, line can be noisy in badly designed PCB */
234 do {
235 pin_val1 = at91_get_gpio_value(irq);
236 pin_val2 = at91_get_gpio_value(irq);
237 } while (pin_val1 != pin_val2 && --ntries > 0);
238
239 if (pin_val1 == 0 || ntries <= 0)
240 return IRQ_HANDLED;
241
242 return ide_intr(irq, dev_id);
243}
244
245static int __init at91_ide_probe(struct platform_device *pdev)
246{
247 int ret;
248 struct ide_hw hw, *hws[] = { &hw };
249 struct ide_host *host;
250 struct resource *res;
251 unsigned long tf_base = 0, ctl_base = 0;
252 struct at91_cf_data *board = pdev->dev.platform_data;
253
254 if (!board)
255 return -ENODEV;
256
257 if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
258 perr("no device detected\n");
259 return -ENODEV;
260 }
261
262 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
263 if (!res) {
264 perr("can't get memory resource\n");
265 return -ENODEV;
266 }
267
268 if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
269 REGS_SIZE, "ide") ||
270 !devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
271 REGS_SIZE, "alt")) {
272 perr("memory resources in use\n");
273 return -EBUSY;
274 }
275
276 pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
277 board->irq_pin, (unsigned long) res->start);
278
279 tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
280 REGS_SIZE);
281 ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
282 REGS_SIZE);
283 if (!tf_base || !ctl_base) {
284 perr("can't map memory regions\n");
285 return -EBUSY;
286 }
287
288 memset(&hw, 0, sizeof(hw));
289
290 if (board->flags & AT91_IDE_SWAP_A0_A2) {
291 /* workaround for stupid hardware bug */
292 hw.io_ports.data_addr = tf_base + 0;
293 hw.io_ports.error_addr = tf_base + 4;
294 hw.io_ports.nsect_addr = tf_base + 2;
295 hw.io_ports.lbal_addr = tf_base + 6;
296 hw.io_ports.lbam_addr = tf_base + 1;
297 hw.io_ports.lbah_addr = tf_base + 5;
298 hw.io_ports.device_addr = tf_base + 3;
299 hw.io_ports.command_addr = tf_base + 7;
300 hw.io_ports.ctl_addr = ctl_base + 3;
301 } else
302 ide_std_init_ports(&hw, tf_base, ctl_base + 6);
303
304 hw.irq = board->irq_pin;
305 hw.dev = &pdev->dev;
306
307 host = ide_host_alloc(&at91_ide_port_info, hws, 1);
308 if (!host) {
309 perr("failed to allocate ide host\n");
310 return -ENOMEM;
311 }
312
313 /* setup Static Memory Controller - PIO 0 as default */
314 apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
315
316 /* with GPIO interrupt we have to do quirks in handler */
317 if (gpio_is_valid(board->irq_pin))
318 host->irq_handler = at91_irq_handler;
319
320 host->ports[0]->select_data = board->chipselect;
321
322 ret = ide_host_register(host, &at91_ide_port_info, hws);
323 if (ret) {
324 perr("failed to register ide host\n");
325 goto err_free_host;
326 }
327 platform_set_drvdata(pdev, host);
328 return 0;
329
330err_free_host:
331 ide_host_free(host);
332 return ret;
333}
334
335static int __exit at91_ide_remove(struct platform_device *pdev)
336{
337 struct ide_host *host = platform_get_drvdata(pdev);
338
339 ide_host_remove(host);
340 return 0;
341}
342
343static struct platform_driver at91_ide_driver = {
344 .driver = {
345 .name = DRV_NAME,
346 .owner = THIS_MODULE,
347 },
348 .remove = __exit_p(at91_ide_remove),
349};
350
351static int __init at91_ide_init(void)
352{
353 return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
354}
355
356static void __exit at91_ide_exit(void)
357{
358 platform_driver_unregister(&at91_ide_driver);
359}
360
361module_init(at91_ide_init);
362module_exit(at91_ide_exit);
363
364MODULE_LICENSE("GPL");
365MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
366
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b37b0c02a7b9..5034a87cc72d 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -808,9 +808,12 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
808 return PTR_ERR(ctx); 808 return PTR_ERR(ctx);
809 809
810 if (cmd.conn_param.valid) { 810 if (cmd.conn_param.valid) {
811 ctx->uid = cmd.uid;
812 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 811 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
812 mutex_lock(&file->mut);
813 ret = rdma_accept(ctx->cm_id, &conn_param); 813 ret = rdma_accept(ctx->cm_id, &conn_param);
814 if (!ret)
815 ctx->uid = cmd.uid;
816 mutex_unlock(&file->mut);
814 } else 817 } else
815 ret = rdma_accept(ctx->cm_id, NULL); 818 ret = rdma_accept(ctx->cm_id, NULL);
816 819
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b930da4c0c63..4d27e4c3fe34 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1485,6 +1485,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1485 qp->event_handler = attr.event_handler; 1485 qp->event_handler = attr.event_handler;
1486 qp->qp_context = attr.qp_context; 1486 qp->qp_context = attr.qp_context;
1487 qp->qp_type = attr.qp_type; 1487 qp->qp_type = attr.qp_type;
1488 atomic_set(&qp->usecnt, 0);
1488 atomic_inc(&pd->usecnt); 1489 atomic_inc(&pd->usecnt);
1489 atomic_inc(&attr.send_cq->usecnt); 1490 atomic_inc(&attr.send_cq->usecnt);
1490 if (attr.recv_cq) 1491 if (attr.recv_cq)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 602b1bd723a9..575b78045aaf 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -421,6 +421,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
421 qp->uobject = NULL; 421 qp->uobject = NULL;
422 qp->qp_type = qp_init_attr->qp_type; 422 qp->qp_type = qp_init_attr->qp_type;
423 423
424 atomic_set(&qp->usecnt, 0);
424 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 425 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
425 qp->event_handler = __ib_shared_qp_event_handler; 426 qp->event_handler = __ib_shared_qp_event_handler;
426 qp->qp_context = qp; 427 qp->qp_context = qp;
@@ -430,7 +431,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
430 qp->xrcd = qp_init_attr->xrcd; 431 qp->xrcd = qp_init_attr->xrcd;
431 atomic_inc(&qp_init_attr->xrcd->usecnt); 432 atomic_inc(&qp_init_attr->xrcd->usecnt);
432 INIT_LIST_HEAD(&qp->open_list); 433 INIT_LIST_HEAD(&qp->open_list);
433 atomic_set(&qp->usecnt, 0);
434 434
435 real_qp = qp; 435 real_qp = qp;
436 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 436 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index b7d4216db3c3..a4de9d58e9b4 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -89,7 +89,7 @@ static int create_file(const char *name, umode_t mode,
89 error = ipathfs_mknod(parent->d_inode, *dentry, 89 error = ipathfs_mknod(parent->d_inode, *dentry,
90 mode, fops, data); 90 mode, fops, data);
91 else 91 else
92 error = PTR_ERR(dentry); 92 error = PTR_ERR(*dentry);
93 mutex_unlock(&parent->d_inode->i_mutex); 93 mutex_unlock(&parent->d_inode->i_mutex);
94 94
95 return error; 95 return error;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 95c94d8f0254..259b0670b51c 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -257,12 +257,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
257 return IB_MAD_RESULT_SUCCESS; 257 return IB_MAD_RESULT_SUCCESS;
258 258
259 /* 259 /*
260 * Don't process SMInfo queries or vendor-specific 260 * Don't process SMInfo queries -- the SMA can't handle them.
261 * MADs -- the SMA can't handle them.
262 */ 261 */
263 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 262 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
264 ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
265 IB_SMP_ATTR_VENDOR_MASK))
266 return IB_MAD_RESULT_SUCCESS; 263 return IB_MAD_RESULT_SUCCESS;
267 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 264 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
268 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || 265 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 7013da5e9eda..7140199f562e 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 568b4f11380a..c438e4691b3c 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 425065b36b8c..a4972abedef1 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -233,6 +233,7 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
233 u8 *start_ptr = &start_addr; 233 u8 *start_ptr = &start_addr;
234 u8 **start_buff = &start_ptr; 234 u8 **start_buff = &start_ptr;
235 u16 buff_len = 0; 235 u16 buff_len = 0;
236 struct ietf_mpa_v1 *mpa_frame;
236 237
237 skb = dev_alloc_skb(MAX_CM_BUFFER); 238 skb = dev_alloc_skb(MAX_CM_BUFFER);
238 if (!skb) { 239 if (!skb) {
@@ -242,6 +243,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
242 243
243 /* send an MPA reject frame */ 244 /* send an MPA reject frame */
244 cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY); 245 cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY);
246 mpa_frame = (struct ietf_mpa_v1 *)*start_buff;
247 mpa_frame->flags |= IETF_MPA_FLAGS_REJECT;
245 form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN); 248 form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN);
246 249
247 cm_node->state = NES_CM_STATE_FIN_WAIT1; 250 cm_node->state = NES_CM_STATE_FIN_WAIT1;
@@ -1360,8 +1363,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1360 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, 1363 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
1361 neigh->ha, ETH_ALEN)) { 1364 neigh->ha, ETH_ALEN)) {
1362 /* Mac address same as in nes_arp_table */ 1365 /* Mac address same as in nes_arp_table */
1363 ip_rt_put(rt); 1366 goto out;
1364 return rc;
1365 } 1367 }
1366 1368
1367 nes_manage_arp_cache(nesvnic->netdev, 1369 nes_manage_arp_cache(nesvnic->netdev,
@@ -1377,6 +1379,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1377 neigh_event_send(neigh, NULL); 1379 neigh_event_send(neigh, NULL);
1378 } 1380 }
1379 } 1381 }
1382
1383out:
1380 rcu_read_unlock(); 1384 rcu_read_unlock();
1381 ip_rt_put(rt); 1385 ip_rt_put(rt);
1382 return rc; 1386 return rc;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index bdfa1fbb35fc..4646e6666087 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index b4393a16099d..a69eef16d72d 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 055f4b545df0..d42c9f435b1b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 0b590e152c6a..d748e4b31b8d 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2* Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3* 3*
4* This software is available to you under a choice of one of two 4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU 5* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index b3b2a240c6e9..3ba7be369452 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h
index 8c8af254555a..4f7f701c4a81 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.h
+++ b/drivers/infiniband/hw/nes/nes_mgt.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2010 Intel-NE, Inc. All rights reserved. 2* Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
3* 3*
4* This software is available to you under a choice of one of two 4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU 5* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 4b3fa711a247..f3a3ecf8d09e 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index 71e133ab209b..4926de744488 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 8b4c2ff54888..e98f4fc0b768 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 5095bc41c6cc..0927b5cc65d3 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -3428,6 +3428,8 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3428 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3428 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3429 ib_wr->wr.fast_reg.length); 3429 ib_wr->wr.fast_reg.length);
3430 set_wqe_32bit_value(wqe->wqe_words, 3430 set_wqe_32bit_value(wqe->wqe_words,
3431 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
3432 set_wqe_32bit_value(wqe->wqe_words,
3431 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, 3433 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
3432 ib_wr->wr.fast_reg.rkey); 3434 ib_wr->wr.fast_reg.rkey);
3433 /* Set page size: */ 3435 /* Set page size: */
@@ -3724,7 +3726,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3724 entry->opcode = IB_WC_SEND; 3726 entry->opcode = IB_WC_SEND;
3725 break; 3727 break;
3726 case NES_IWARP_SQ_OP_LOCINV: 3728 case NES_IWARP_SQ_OP_LOCINV:
3727 entry->opcode = IB_WR_LOCAL_INV; 3729 entry->opcode = IB_WC_LOCAL_INV;
3728 break; 3730 break;
3729 case NES_IWARP_SQ_OP_FAST_REG: 3731 case NES_IWARP_SQ_OP_FAST_REG:
3730 entry->opcode = IB_WC_FAST_REG_MR; 3732 entry->opcode = IB_WC_FAST_REG_MR;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index fe6b6e92fa90..0eff7c44d76b 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 4f18e2d332df..d0c64d514813 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2105,7 +2105,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd)
2105 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, 2105 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2106 dd->rcd[0]->rcvhdrq_size, 2106 dd->rcd[0]->rcvhdrq_size,
2107 &dd->cspec->dummy_hdrq_phys, 2107 &dd->cspec->dummy_hdrq_phys,
2108 GFP_KERNEL | __GFP_COMP); 2108 GFP_ATOMIC | __GFP_COMP);
2109 if (!dd->cspec->dummy_hdrq) { 2109 if (!dd->cspec->dummy_hdrq) {
2110 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); 2110 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2111 /* fallback to just 0'ing */ 2111 /* fallback to just 0'ing */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index f695061d688e..0fde788e1100 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -560,7 +560,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
560 * BIOS may not set PCIe bus-utilization parameters for best performance. 560 * BIOS may not set PCIe bus-utilization parameters for best performance.
561 * Check and optionally adjust them to maximize our throughput. 561 * Check and optionally adjust them to maximize our throughput.
562 */ 562 */
563static int qib_pcie_caps = 0x51; 563static int qib_pcie_caps;
564module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); 564module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
565MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); 565MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
566 566
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index b3cc1e062b17..86df632ea612 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -44,6 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45 45
46#include <net/neighbour.h> 46#include <net/neighbour.h>
47#include <net/sch_generic.h>
47 48
48#include <linux/atomic.h> 49#include <linux/atomic.h>
49 50
@@ -117,8 +118,9 @@ struct ipoib_header {
117 u16 reserved; 118 u16 reserved;
118}; 119};
119 120
120struct ipoib_pseudoheader { 121struct ipoib_cb {
121 u8 hwaddr[INFINIBAND_ALEN]; 122 struct qdisc_skb_cb qdisc_cb;
123 u8 hwaddr[INFINIBAND_ALEN];
122}; 124};
123 125
124/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 126/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3514ca05deea..3974c290b667 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -653,7 +653,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct n
653} 653}
654 654
655static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 655static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
656 struct ipoib_pseudoheader *phdr) 656 struct ipoib_cb *cb)
657{ 657{
658 struct ipoib_dev_priv *priv = netdev_priv(dev); 658 struct ipoib_dev_priv *priv = netdev_priv(dev);
659 struct ipoib_path *path; 659 struct ipoib_path *path;
@@ -661,17 +661,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
661 661
662 spin_lock_irqsave(&priv->lock, flags); 662 spin_lock_irqsave(&priv->lock, flags);
663 663
664 path = __path_find(dev, phdr->hwaddr + 4); 664 path = __path_find(dev, cb->hwaddr + 4);
665 if (!path || !path->valid) { 665 if (!path || !path->valid) {
666 int new_path = 0; 666 int new_path = 0;
667 667
668 if (!path) { 668 if (!path) {
669 path = path_rec_create(dev, phdr->hwaddr + 4); 669 path = path_rec_create(dev, cb->hwaddr + 4);
670 new_path = 1; 670 new_path = 1;
671 } 671 }
672 if (path) { 672 if (path) {
673 /* put pseudoheader back on for next time */
674 skb_push(skb, sizeof *phdr);
675 __skb_queue_tail(&path->queue, skb); 673 __skb_queue_tail(&path->queue, skb);
676 674
677 if (!path->query && path_rec_start(dev, path)) { 675 if (!path->query && path_rec_start(dev, path)) {
@@ -695,12 +693,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
695 be16_to_cpu(path->pathrec.dlid)); 693 be16_to_cpu(path->pathrec.dlid));
696 694
697 spin_unlock_irqrestore(&priv->lock, flags); 695 spin_unlock_irqrestore(&priv->lock, flags);
698 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 696 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
699 return; 697 return;
700 } else if ((path->query || !path_rec_start(dev, path)) && 698 } else if ((path->query || !path_rec_start(dev, path)) &&
701 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 699 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
702 /* put pseudoheader back on for next time */
703 skb_push(skb, sizeof *phdr);
704 __skb_queue_tail(&path->queue, skb); 700 __skb_queue_tail(&path->queue, skb);
705 } else { 701 } else {
706 ++dev->stats.tx_dropped; 702 ++dev->stats.tx_dropped;
@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
774 dev_kfree_skb_any(skb); 770 dev_kfree_skb_any(skb);
775 } 771 }
776 } else { 772 } else {
777 struct ipoib_pseudoheader *phdr = 773 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
778 (struct ipoib_pseudoheader *) skb->data;
779 skb_pull(skb, sizeof *phdr);
780 774
781 if (phdr->hwaddr[4] == 0xff) { 775 if (cb->hwaddr[4] == 0xff) {
782 /* Add in the P_Key for multicast*/ 776 /* Add in the P_Key for multicast*/
783 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 777 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
784 phdr->hwaddr[9] = priv->pkey & 0xff; 778 cb->hwaddr[9] = priv->pkey & 0xff;
785 779
786 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); 780 ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
787 } else { 781 } else {
788 /* unicast GID -- should be ARP or RARP reply */ 782 /* unicast GID -- should be ARP or RARP reply */
789 783
@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
792 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", 786 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
793 skb_dst(skb) ? "neigh" : "dst", 787 skb_dst(skb) ? "neigh" : "dst",
794 be16_to_cpup((__be16 *) skb->data), 788 be16_to_cpup((__be16 *) skb->data),
795 IPOIB_QPN(phdr->hwaddr), 789 IPOIB_QPN(cb->hwaddr),
796 phdr->hwaddr + 4); 790 cb->hwaddr + 4);
797 dev_kfree_skb_any(skb); 791 dev_kfree_skb_any(skb);
798 ++dev->stats.tx_dropped; 792 ++dev->stats.tx_dropped;
799 goto unlock; 793 goto unlock;
800 } 794 }
801 795
802 unicast_arp_send(skb, dev, phdr); 796 unicast_arp_send(skb, dev, cb);
803 } 797 }
804 } 798 }
805unlock: 799unlock:
@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
825 const void *daddr, const void *saddr, unsigned len) 819 const void *daddr, const void *saddr, unsigned len)
826{ 820{
827 struct ipoib_header *header; 821 struct ipoib_header *header;
828 struct dst_entry *dst;
829 struct neighbour *n;
830 822
831 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 823 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
832 824
@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
834 header->reserved = 0; 826 header->reserved = 0;
835 827
836 /* 828 /*
837 * If we don't have a neighbour structure, stuff the 829 * If we don't have a dst_entry structure, stuff the
838 * destination address onto the front of the skb so we can 830 * destination address into skb->cb so we can figure out where
839 * figure out where to send the packet later. 831 * to send the packet later.
840 */ 832 */
841 dst = skb_dst(skb); 833 if (!skb_dst(skb)) {
842 n = NULL; 834 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
843 if (dst) 835 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
844 n = dst_get_neighbour_noref_raw(dst);
845 if ((!dst || !n) && daddr) {
846 struct ipoib_pseudoheader *phdr =
847 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
848 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
849 } 836 }
850 837
851 return 0; 838 return 0;
@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
1021 1008
1022 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1009 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1023 1010
1024 /* 1011 dev->hard_header_len = IPOIB_ENCAP_LEN;
1025 * We add in INFINIBAND_ALEN to allow for the destination
1026 * address "pseudoheader" for skbs without neighbour struct.
1027 */
1028 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1029 dev->addr_len = INFINIBAND_ALEN; 1012 dev->addr_len = INFINIBAND_ALEN;
1030 dev->type = ARPHRD_INFINIBAND; 1013 dev->type = ARPHRD_INFINIBAND;
1031 dev->tx_queue_len = ipoib_sendq_size * 2; 1014 dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index f7ff9dd66cda..20ebc6fd1bb9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -262,21 +262,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
262 netif_tx_lock_bh(dev); 262 netif_tx_lock_bh(dev);
263 while (!skb_queue_empty(&mcast->pkt_queue)) { 263 while (!skb_queue_empty(&mcast->pkt_queue)) {
264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265 struct dst_entry *dst = skb_dst(skb);
266 struct neighbour *n = NULL;
267 265
268 netif_tx_unlock_bh(dev); 266 netif_tx_unlock_bh(dev);
269 267
270 skb->dev = dev; 268 skb->dev = dev;
271 if (dst)
272 n = dst_get_neighbour_noref_raw(dst);
273 if (!dst || !n) {
274 /* put pseudoheader back on for next time */
275 skb_push(skb, sizeof (struct ipoib_pseudoheader));
276 }
277
278 if (dev_queue_xmit(skb)) 269 if (dev_queue_xmit(skb))
279 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 270 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
271
280 netif_tx_lock_bh(dev); 272 netif_tx_lock_bh(dev);
281 } 273 }
282 netif_tx_unlock_bh(dev); 274 netif_tx_unlock_bh(dev);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index cd5d05e22a77..2b73d43cd691 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -69,8 +69,8 @@ MODULE_LICENSE("Dual BSD/GPL");
69 */ 69 */
70 70
71static u64 srpt_service_guid; 71static u64 srpt_service_guid;
72static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */ 72static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
73static struct list_head srpt_dev_list; /* List of srpt_device structures. */ 73static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
74 74
75static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; 75static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76module_param(srp_max_req_size, int, 0444); 76module_param(srp_max_req_size, int, 0444);
@@ -687,6 +687,7 @@ err:
687 while (--i >= 0) 687 while (--i >= 0)
688 srpt_free_ioctx(sdev, ring[i], dma_size, dir); 688 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 kfree(ring); 689 kfree(ring);
690 ring = NULL;
690out: 691out:
691 return ring; 692 return ring;
692} 693}
@@ -2595,7 +2596,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2595 } 2596 }
2596 2597
2597 ch->sess = transport_init_session(); 2598 ch->sess = transport_init_session();
2598 if (!ch->sess) { 2599 if (IS_ERR(ch->sess)) {
2599 rej->reason = __constant_cpu_to_be32( 2600 rej->reason = __constant_cpu_to_be32(
2600 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2601 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2601 pr_debug("Failed to create session\n"); 2602 pr_debug("Failed to create session\n");
@@ -3264,8 +3265,7 @@ static void srpt_add_one(struct ib_device *device)
3264 for (i = 0; i < sdev->srq_size; ++i) 3265 for (i = 0; i < sdev->srq_size; ++i)
3265 srpt_post_recv(sdev, sdev->ioctx_ring[i]); 3266 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3266 3267
3267 WARN_ON(sdev->device->phys_port_cnt 3268 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
3268 > sizeof(sdev->port)/sizeof(sdev->port[0]));
3269 3269
3270 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 3270 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3271 sport = &sdev->port[i - 1]; 3271 sport = &sdev->port[i - 1];
@@ -4010,13 +4010,10 @@ static int __init srpt_init_module(void)
4010 goto out; 4010 goto out;
4011 } 4011 }
4012 4012
4013 spin_lock_init(&srpt_dev_lock);
4014 INIT_LIST_HEAD(&srpt_dev_list);
4015
4016 ret = -ENODEV;
4017 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); 4013 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
4018 if (!srpt_target) { 4014 if (IS_ERR(srpt_target)) {
4019 printk(KERN_ERR "couldn't register\n"); 4015 printk(KERN_ERR "couldn't register\n");
4016 ret = PTR_ERR(srpt_target);
4020 goto out; 4017 goto out;
4021 } 4018 }
4022 4019
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index b4b4bbcd7f16..61e52b830816 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -35,7 +35,6 @@
35#ifndef IB_SRPT_H 35#ifndef IB_SRPT_H
36#define IB_SRPT_H 36#define IB_SRPT_H
37 37
38#include <linux/version.h>
39#include <linux/types.h> 38#include <linux/types.h>
40#include <linux/list.h> 39#include <linux/list.h>
41#include <linux/wait.h> 40#include <linux/wait.h>
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 76457d50bc34..afc166fcc3d9 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -386,7 +386,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
386 struct evdev_client *client = file->private_data; 386 struct evdev_client *client = file->private_data;
387 struct evdev *evdev = client->evdev; 387 struct evdev *evdev = client->evdev;
388 struct input_event event; 388 struct input_event event;
389 int retval; 389 int retval = 0;
390 390
391 if (count < input_event_size()) 391 if (count < input_event_size())
392 return -EINVAL; 392 return -EINVAL;
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a588578037eb..67bec14e8b96 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -34,7 +34,6 @@
34#include <linux/i2c/twl.h> 34#include <linux/i2c/twl.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36 36
37
38/* 37/*
39 * The TWL4030 family chips include a keypad controller that supports 38 * The TWL4030 family chips include a keypad controller that supports
40 * up to an 8x8 switch matrix. The controller can issue system wakeup 39 * up to an 8x8 switch matrix. The controller can issue system wakeup
@@ -302,7 +301,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
302 if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0) 301 if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0)
303 return -EIO; 302 return -EIO;
304 303
305 /* Set timeout period to 100 ms */ 304 /* Set timeout period to 200 ms */
306 i = KEYP_PERIOD_US(200000, PTV_PRESCALER); 305 i = KEYP_PERIOD_US(200000, PTV_PRESCALER);
307 if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0) 306 if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0)
308 return -EIO; 307 return -EIO;
@@ -466,4 +465,3 @@ MODULE_AUTHOR("Texas Instruments");
466MODULE_DESCRIPTION("TWL4030 Keypad Driver"); 465MODULE_DESCRIPTION("TWL4030 Keypad Driver");
467MODULE_LICENSE("GPL"); 466MODULE_LICENSE("GPL");
468MODULE_ALIAS("platform:twl4030_keypad"); 467MODULE_ALIAS("platform:twl4030_keypad");
469
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b4cfc6c8be89..5ec774d6c82b 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -512,6 +512,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
512 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), 512 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
513 }, 513 },
514 }, 514 },
515 {
516 /* Lenovo Ideapad U455 */
517 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
519 DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
520 },
521 },
515 { } 522 { }
516}; 523};
517 524
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 8250299fd64f..4494233d331a 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -164,7 +164,8 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
164 struct serio_raw_client *client = file->private_data; 164 struct serio_raw_client *client = file->private_data;
165 struct serio_raw *serio_raw = client->serio_raw; 165 struct serio_raw *serio_raw = client->serio_raw;
166 char uninitialized_var(c); 166 char uninitialized_var(c);
167 ssize_t retval = 0; 167 ssize_t read = 0;
168 int retval;
168 169
169 if (serio_raw->dead) 170 if (serio_raw->dead)
170 return -ENODEV; 171 return -ENODEV;
@@ -180,13 +181,15 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
180 if (serio_raw->dead) 181 if (serio_raw->dead)
181 return -ENODEV; 182 return -ENODEV;
182 183
183 while (retval < count && serio_raw_fetch_byte(serio_raw, &c)) { 184 while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
184 if (put_user(c, buffer++)) 185 if (put_user(c, buffer++)) {
185 return -EFAULT; 186 retval = -EFAULT;
186 retval++; 187 break;
188 }
189 read++;
187 } 190 }
188 191
189 return retval; 192 return read ?: retval;
190} 193}
191 194
192static ssize_t serio_raw_write(struct file *file, const char __user *buffer, 195static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index cce1f03b8895..f75e0608be5b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2863,6 +2863,9 @@ static unsigned device_dma_ops_init(void)
2863 2863
2864 for_each_pci_dev(pdev) { 2864 for_each_pci_dev(pdev) {
2865 if (!check_device(&pdev->dev)) { 2865 if (!check_device(&pdev->dev)) {
2866
2867 iommu_ignore_device(&pdev->dev);
2868
2866 unhandled += 1; 2869 unhandled += 1;
2867 continue; 2870 continue;
2868 } 2871 }
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 08a90b88e40d..cee307e86606 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -482,23 +482,19 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
482 482
483 priv = domain->priv; 483 priv = domain->priv;
484 484
485 if (!priv) { 485 if (!priv)
486 ret = -ENODEV;
487 goto fail; 486 goto fail;
488 }
489 487
490 fl_table = priv->pgtable; 488 fl_table = priv->pgtable;
491 489
492 if (len != SZ_16M && len != SZ_1M && 490 if (len != SZ_16M && len != SZ_1M &&
493 len != SZ_64K && len != SZ_4K) { 491 len != SZ_64K && len != SZ_4K) {
494 pr_debug("Bad length: %d\n", len); 492 pr_debug("Bad length: %d\n", len);
495 ret = -EINVAL;
496 goto fail; 493 goto fail;
497 } 494 }
498 495
499 if (!fl_table) { 496 if (!fl_table) {
500 pr_debug("Null page table\n"); 497 pr_debug("Null page table\n");
501 ret = -EINVAL;
502 goto fail; 498 goto fail;
503 } 499 }
504 500
@@ -507,7 +503,6 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
507 503
508 if (*fl_pte == 0) { 504 if (*fl_pte == 0) {
509 pr_debug("First level PTE is 0\n"); 505 pr_debug("First level PTE is 0\n");
510 ret = -ENODEV;
511 goto fail; 506 goto fail;
512 } 507 }
513 508
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 2339d7396b9e..802ab87a78b6 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
1901{ 1901{
1902 isdn_net_local *lp = netdev_priv(dev); 1902 isdn_net_local *lp = netdev_priv(dev);
1903 unsigned char *p; 1903 unsigned char *p;
1904 ushort len = 0; 1904 int len = 0;
1905 1905
1906 switch (lp->p_encap) { 1906 switch (lp->p_encap) {
1907 case ISDN_NET_ENCAP_ETHER: 1907 case ISDN_NET_ENCAP_ETHER:
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 45e6878d7374..e59c166a0ce2 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -164,8 +164,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
164 164
165 if (drvdata->mode == LM3530_BL_MODE_ALS) { 165 if (drvdata->mode == LM3530_BL_MODE_ALS) {
166 if (pltfm->als_vmax == 0) { 166 if (pltfm->als_vmax == 0) {
167 pltfm->als_vmin = als_vmin = 0; 167 pltfm->als_vmin = 0;
168 pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV; 168 pltfm->als_vmax = LM3530_ALS_WINDOW_mV;
169 } 169 }
170 170
171 als_vmin = pltfm->als_vmin; 171 als_vmin = pltfm->als_vmin;
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 75049e765191..b026896206ca 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -710,7 +710,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
710 req = NULL; 710 req = NULL;
711 spin_lock_irqsave(&state->lock, flags); 711 spin_lock_irqsave(&state->lock, flags);
712 add_wait_queue(&state->wait_queue, &wait); 712 add_wait_queue(&state->wait_queue, &wait);
713 current->state = TASK_INTERRUPTIBLE; 713 set_current_state(TASK_INTERRUPTIBLE);
714 714
715 for (;;) { 715 for (;;) {
716 req = state->completed; 716 req = state->completed;
@@ -734,7 +734,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
734 spin_lock_irqsave(&state->lock, flags); 734 spin_lock_irqsave(&state->lock, flags);
735 } 735 }
736 736
737 current->state = TASK_RUNNING; 737 set_current_state(TASK_RUNNING);
738 remove_wait_queue(&state->wait_queue, &wait); 738 remove_wait_queue(&state->wait_queue, &wait);
739 spin_unlock_irqrestore(&state->lock, flags); 739 spin_unlock_irqrestore(&state->lock, flags);
740 740
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c2907d836e4e..86cb7e5d83d5 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -56,7 +56,8 @@ struct raid_dev {
56struct raid_set { 56struct raid_set {
57 struct dm_target *ti; 57 struct dm_target *ti;
58 58
59 uint64_t print_flags; 59 uint32_t bitmap_loaded;
60 uint32_t print_flags;
60 61
61 struct mddev md; 62 struct mddev md;
62 struct raid_type *raid_type; 63 struct raid_type *raid_type;
@@ -1085,7 +1086,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
1085 raid_param_cnt += 2; 1086 raid_param_cnt += 2;
1086 } 1087 }
1087 1088
1088 raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2); 1089 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
1089 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1090 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1090 raid_param_cnt--; 1091 raid_param_cnt--;
1091 1092
@@ -1197,7 +1198,12 @@ static void raid_resume(struct dm_target *ti)
1197{ 1198{
1198 struct raid_set *rs = ti->private; 1199 struct raid_set *rs = ti->private;
1199 1200
1200 bitmap_load(&rs->md); 1201 if (!rs->bitmap_loaded) {
1202 bitmap_load(&rs->md);
1203 rs->bitmap_loaded = 1;
1204 } else
1205 md_wakeup_thread(rs->md.thread);
1206
1201 mddev_resume(&rs->md); 1207 mddev_resume(&rs->md);
1202} 1208}
1203 1209
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9417ae2fa0bb..ce88755baf4a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7333,7 +7333,8 @@ void md_do_sync(struct mddev *mddev)
7333 printk(KERN_INFO 7333 printk(KERN_INFO
7334 "md: checkpointing %s of %s.\n", 7334 "md: checkpointing %s of %s.\n",
7335 desc, mdname(mddev)); 7335 desc, mdname(mddev));
7336 mddev->recovery_cp = mddev->curr_resync; 7336 mddev->recovery_cp =
7337 mddev->curr_resync_completed;
7337 } 7338 }
7338 } else 7339 } else
7339 mddev->recovery_cp = MaxSector; 7340 mddev->recovery_cp = MaxSector;
@@ -7351,9 +7352,9 @@ void md_do_sync(struct mddev *mddev)
7351 rcu_read_unlock(); 7352 rcu_read_unlock();
7352 } 7353 }
7353 } 7354 }
7355 skip:
7354 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7356 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7355 7357
7356 skip:
7357 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7358 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7358 /* We completed so min/max setting can be forgotten if used. */ 7359 /* We completed so min/max setting can be forgotten if used. */
7359 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7360 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index cd13e9f2f5e6..f147395bac9a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -200,7 +200,7 @@ config MENELAUS
200 200
201config TWL4030_CORE 201config TWL4030_CORE
202 bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support" 202 bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
203 depends on I2C=y && GENERIC_HARDIRQS && IRQ_DOMAIN 203 depends on I2C=y && GENERIC_HARDIRQS
204 help 204 help
205 Say yes here if you have TWL4030 / TWL6030 family chip on your board. 205 Say yes here if you have TWL4030 / TWL6030 family chip on your board.
206 This core driver provides register access and IRQ handling 206 This core driver provides register access and IRQ handling
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index e04e04ddc15e..8ce3959c6919 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -263,7 +263,9 @@ struct twl_client {
263 263
264static struct twl_client twl_modules[TWL_NUM_SLAVES]; 264static struct twl_client twl_modules[TWL_NUM_SLAVES];
265 265
266#ifdef CONFIG_IRQ_DOMAIN
266static struct irq_domain domain; 267static struct irq_domain domain;
268#endif
267 269
268/* mapping the module id to slave id and base address */ 270/* mapping the module id to slave id and base address */
269struct twl_mapping { 271struct twl_mapping {
@@ -1226,13 +1228,13 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1226 pdata->irq_base = status; 1228 pdata->irq_base = status;
1227 pdata->irq_end = pdata->irq_base + nr_irqs; 1229 pdata->irq_end = pdata->irq_base + nr_irqs;
1228 1230
1231#ifdef CONFIG_IRQ_DOMAIN
1229 domain.irq_base = pdata->irq_base; 1232 domain.irq_base = pdata->irq_base;
1230 domain.nr_irq = nr_irqs; 1233 domain.nr_irq = nr_irqs;
1231#ifdef CONFIG_OF_IRQ
1232 domain.of_node = of_node_get(node); 1234 domain.of_node = of_node_get(node);
1233 domain.ops = &irq_domain_simple_ops; 1235 domain.ops = &irq_domain_simple_ops;
1234#endif
1235 irq_domain_add(&domain); 1236 irq_domain_add(&domain);
1237#endif
1236 1238
1237 if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { 1239 if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
1238 dev_dbg(&client->dev, "can't talk I2C?\n"); 1240 dev_dbg(&client->dev, "can't talk I2C?\n");
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index d905f5171153..79ca33dfacca 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -124,7 +124,7 @@ static u8 res_config_addrs[] = {
124 [RES_MAIN_REF] = 0x94, 124 [RES_MAIN_REF] = 0x94,
125}; 125};
126 126
127static int __init twl4030_write_script_byte(u8 address, u8 byte) 127static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
128{ 128{
129 int err; 129 int err;
130 130
@@ -138,7 +138,7 @@ out:
138 return err; 138 return err;
139} 139}
140 140
141static int __init twl4030_write_script_ins(u8 address, u16 pmb_message, 141static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
142 u8 delay, u8 next) 142 u8 delay, u8 next)
143{ 143{
144 int err; 144 int err;
@@ -158,7 +158,7 @@ out:
158 return err; 158 return err;
159} 159}
160 160
161static int __init twl4030_write_script(u8 address, struct twl4030_ins *script, 161static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
162 int len) 162 int len)
163{ 163{
164 int err; 164 int err;
@@ -183,7 +183,7 @@ static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
183 return err; 183 return err;
184} 184}
185 185
186static int __init twl4030_config_wakeup3_sequence(u8 address) 186static int __devinit twl4030_config_wakeup3_sequence(u8 address)
187{ 187{
188 int err; 188 int err;
189 u8 data; 189 u8 data;
@@ -208,7 +208,7 @@ out:
208 return err; 208 return err;
209} 209}
210 210
211static int __init twl4030_config_wakeup12_sequence(u8 address) 211static int __devinit twl4030_config_wakeup12_sequence(u8 address)
212{ 212{
213 int err = 0; 213 int err = 0;
214 u8 data; 214 u8 data;
@@ -262,7 +262,7 @@ out:
262 return err; 262 return err;
263} 263}
264 264
265static int __init twl4030_config_sleep_sequence(u8 address) 265static int __devinit twl4030_config_sleep_sequence(u8 address)
266{ 266{
267 int err; 267 int err;
268 268
@@ -276,7 +276,7 @@ static int __init twl4030_config_sleep_sequence(u8 address)
276 return err; 276 return err;
277} 277}
278 278
279static int __init twl4030_config_warmreset_sequence(u8 address) 279static int __devinit twl4030_config_warmreset_sequence(u8 address)
280{ 280{
281 int err; 281 int err;
282 u8 rd_data; 282 u8 rd_data;
@@ -324,7 +324,7 @@ out:
324 return err; 324 return err;
325} 325}
326 326
327static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) 327static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
328{ 328{
329 int rconfig_addr; 329 int rconfig_addr;
330 int err; 330 int err;
@@ -416,7 +416,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
416 return 0; 416 return 0;
417} 417}
418 418
419static int __init load_twl4030_script(struct twl4030_script *tscript, 419static int __devinit load_twl4030_script(struct twl4030_script *tscript,
420 u8 address) 420 u8 address)
421{ 421{
422 int err; 422 int err;
@@ -527,7 +527,7 @@ void twl4030_power_off(void)
527 pr_err("TWL4030 Unable to power off\n"); 527 pr_err("TWL4030 Unable to power off\n");
528} 528}
529 529
530void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts) 530void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
531{ 531{
532 int err = 0; 532 int err = 0;
533 int i; 533 int i;
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index dda86293dc9f..b2d8e512d3cb 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -282,6 +282,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
282 /* Default PLL configuration after power up */ 282 /* Default PLL configuration after power up */
283 twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; 283 twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
284 twl6040->sysclk = 19200000; 284 twl6040->sysclk = 19200000;
285 twl6040->mclk = 32768;
285 } else { 286 } else {
286 /* already powered-down */ 287 /* already powered-down */
287 if (!twl6040->power_count) { 288 if (!twl6040->power_count) {
@@ -305,6 +306,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
305 twl6040_power_down(twl6040); 306 twl6040_power_down(twl6040);
306 } 307 }
307 twl6040->sysclk = 0; 308 twl6040->sysclk = 0;
309 twl6040->mclk = 0;
308 } 310 }
309 311
310out: 312out:
@@ -324,23 +326,38 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
324 hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL); 326 hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
325 lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL); 327 lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
326 328
329 /* Force full reconfiguration when switching between PLL */
330 if (pll_id != twl6040->pll) {
331 twl6040->sysclk = 0;
332 twl6040->mclk = 0;
333 }
334
327 switch (pll_id) { 335 switch (pll_id) {
328 case TWL6040_SYSCLK_SEL_LPPLL: 336 case TWL6040_SYSCLK_SEL_LPPLL:
329 /* low-power PLL divider */ 337 /* low-power PLL divider */
330 switch (freq_out) { 338 /* Change the sysclk configuration only if it has been canged */
331 case 17640000: 339 if (twl6040->sysclk != freq_out) {
332 lppllctl |= TWL6040_LPLLFIN; 340 switch (freq_out) {
333 break; 341 case 17640000:
334 case 19200000: 342 lppllctl |= TWL6040_LPLLFIN;
335 lppllctl &= ~TWL6040_LPLLFIN; 343 break;
336 break; 344 case 19200000:
337 default: 345 lppllctl &= ~TWL6040_LPLLFIN;
338 dev_err(twl6040->dev, 346 break;
339 "freq_out %d not supported\n", freq_out); 347 default:
340 ret = -EINVAL; 348 dev_err(twl6040->dev,
341 goto pll_out; 349 "freq_out %d not supported\n",
350 freq_out);
351 ret = -EINVAL;
352 goto pll_out;
353 }
354 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
355 lppllctl);
342 } 356 }
343 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); 357
358 /* The PLL in use has not been change, we can exit */
359 if (twl6040->pll == pll_id)
360 break;
344 361
345 switch (freq_in) { 362 switch (freq_in) {
346 case 32768: 363 case 32768:
@@ -371,48 +388,56 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
371 goto pll_out; 388 goto pll_out;
372 } 389 }
373 390
374 hppllctl &= ~TWL6040_MCLK_MSK; 391 if (twl6040->mclk != freq_in) {
392 hppllctl &= ~TWL6040_MCLK_MSK;
393
394 switch (freq_in) {
395 case 12000000:
396 /* PLL enabled, active mode */
397 hppllctl |= TWL6040_MCLK_12000KHZ |
398 TWL6040_HPLLENA;
399 break;
400 case 19200000:
401 /*
402 * PLL disabled
403 * (enable PLL if MCLK jitter quality
404 * doesn't meet specification)
405 */
406 hppllctl |= TWL6040_MCLK_19200KHZ;
407 break;
408 case 26000000:
409 /* PLL enabled, active mode */
410 hppllctl |= TWL6040_MCLK_26000KHZ |
411 TWL6040_HPLLENA;
412 break;
413 case 38400000:
414 /* PLL enabled, active mode */
415 hppllctl |= TWL6040_MCLK_38400KHZ |
416 TWL6040_HPLLENA;
417 break;
418 default:
419 dev_err(twl6040->dev,
420 "freq_in %d not supported\n", freq_in);
421 ret = -EINVAL;
422 goto pll_out;
423 }
375 424
376 switch (freq_in) {
377 case 12000000:
378 /* PLL enabled, active mode */
379 hppllctl |= TWL6040_MCLK_12000KHZ |
380 TWL6040_HPLLENA;
381 break;
382 case 19200000:
383 /* 425 /*
384 * PLL disabled 426 * enable clock slicer to ensure input waveform is
385 * (enable PLL if MCLK jitter quality 427 * square
386 * doesn't meet specification)
387 */ 428 */
388 hppllctl |= TWL6040_MCLK_19200KHZ; 429 hppllctl |= TWL6040_HPLLSQRENA;
389 break;
390 case 26000000:
391 /* PLL enabled, active mode */
392 hppllctl |= TWL6040_MCLK_26000KHZ |
393 TWL6040_HPLLENA;
394 break;
395 case 38400000:
396 /* PLL enabled, active mode */
397 hppllctl |= TWL6040_MCLK_38400KHZ |
398 TWL6040_HPLLENA;
399 break;
400 default:
401 dev_err(twl6040->dev,
402 "freq_in %d not supported\n", freq_in);
403 ret = -EINVAL;
404 goto pll_out;
405 }
406 430
407 /* enable clock slicer to ensure input waveform is square */ 431 twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
408 hppllctl |= TWL6040_HPLLSQRENA; 432 hppllctl);
409 433 usleep_range(500, 700);
410 twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl); 434 lppllctl |= TWL6040_HPLLSEL;
411 usleep_range(500, 700); 435 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
412 lppllctl |= TWL6040_HPLLSEL; 436 lppllctl);
413 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); 437 lppllctl &= ~TWL6040_LPLLENA;
414 lppllctl &= ~TWL6040_LPLLENA; 438 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
415 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); 439 lppllctl);
440 }
416 break; 441 break;
417 default: 442 default:
418 dev_err(twl6040->dev, "unknown pll id %d\n", pll_id); 443 dev_err(twl6040->dev, "unknown pll id %d\n", pll_id);
@@ -421,6 +446,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
421 } 446 }
422 447
423 twl6040->sysclk = freq_out; 448 twl6040->sysclk = freq_out;
449 twl6040->mclk = freq_in;
424 twl6040->pll = pll_id; 450 twl6040->pll = pll_id;
425 451
426pll_out: 452pll_out:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6a1a092db146..c7795096d43b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -2,24 +2,14 @@
2# Misc strange devices 2# Misc strange devices
3# 3#
4 4
5# This one has to live outside of the MISC_DEVICES conditional, 5menu "Misc devices"
6# because it may be selected by drivers/platform/x86/hp_accel. 6
7config SENSORS_LIS3LV02D 7config SENSORS_LIS3LV02D
8 tristate 8 tristate
9 depends on INPUT 9 depends on INPUT
10 select INPUT_POLLDEV 10 select INPUT_POLLDEV
11 default n 11 default n
12 12
13menuconfig MISC_DEVICES
14 bool "Misc devices"
15 ---help---
16 Say Y here to get to see options for device drivers from various
17 different categories. This option alone does not add any kernel code.
18
19 If you say N, all options in this submenu will be skipped and disabled.
20
21if MISC_DEVICES
22
23config AD525X_DPOT 13config AD525X_DPOT
24 tristate "Analog Devices Digital Potentiometers" 14 tristate "Analog Devices Digital Potentiometers"
25 depends on (I2C || SPI) && SYSFS 15 depends on (I2C || SPI) && SYSFS
@@ -516,5 +506,4 @@ source "drivers/misc/ti-st/Kconfig"
516source "drivers/misc/lis3lv02d/Kconfig" 506source "drivers/misc/lis3lv02d/Kconfig"
517source "drivers/misc/carma/Kconfig" 507source "drivers/misc/carma/Kconfig"
518source "drivers/misc/altera-stapl/Kconfig" 508source "drivers/misc/altera-stapl/Kconfig"
519 509endmenu
520endif # MISC_DEVICES
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 778fc3fdfb9b..5484301d57d9 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/ioport.h>
18#include <linux/c2port.h> 19#include <linux/c2port.h>
19 20
20#define DATA_PORT 0x325 21#define DATA_PORT 0x325
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index 68cd05b6d829..85cc7710193c 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -245,6 +245,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev,
245 if (err) 245 if (err)
246 return err; 246 return err;
247 247
248 spin_lock_init(&chip->irq_lock);
248 chip->pdev = pdev; 249 chip->pdev = pdev;
249 chip->iobase = pcim_iomap_table(pdev)[0]; 250 chip->iobase = pcim_iomap_table(pdev)[0];
250 251
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index bc685bfc4c33..87a390de054c 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -262,7 +262,7 @@ static void __init reset_all_timers(void)
262 * In other cases (such as with VSAless OpenFirmware), the system firmware 262 * In other cases (such as with VSAless OpenFirmware), the system firmware
263 * leaves timers available for us to use. 263 * leaves timers available for us to use.
264 */ 264 */
265static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) 265static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
266{ 266{
267 struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; 267 struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
268 unsigned long flags; 268 unsigned long flags;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 150cd7061b80..28adefe70f96 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -354,6 +354,7 @@ static void lkdtm_do_action(enum ctype which)
354static void lkdtm_handler(void) 354static void lkdtm_handler(void)
355{ 355{
356 unsigned long flags; 356 unsigned long flags;
357 bool do_it = false;
357 358
358 spin_lock_irqsave(&count_lock, flags); 359 spin_lock_irqsave(&count_lock, flags);
359 count--; 360 count--;
@@ -361,10 +362,13 @@ static void lkdtm_handler(void)
361 cp_name_to_str(cpoint), cp_type_to_str(cptype), count); 362 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
362 363
363 if (count == 0) { 364 if (count == 0) {
364 lkdtm_do_action(cptype); 365 do_it = true;
365 count = cpoint_count; 366 count = cpoint_count;
366 } 367 }
367 spin_unlock_irqrestore(&count_lock, flags); 368 spin_unlock_irqrestore(&count_lock, flags);
369
370 if (do_it)
371 lkdtm_do_action(cptype);
368} 372}
369 373
370static int lkdtm_register_cpoint(enum cname which) 374static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cd41d403c9df..cb56e270da11 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -314,7 +314,7 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
314 * fear that guest will need it. Host may reject some pages, we need to 314 * fear that guest will need it. Host may reject some pages, we need to
315 * check the return value and maybe submit a different page. 315 * check the return value and maybe submit a different page.
316 */ 316 */
317static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, 317static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
318 unsigned int *hv_status) 318 unsigned int *hv_status)
319{ 319{
320 unsigned long status, dummy; 320 unsigned long status, dummy;
@@ -322,17 +322,17 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
322 322
323 pfn32 = (u32)pfn; 323 pfn32 = (u32)pfn;
324 if (pfn32 != pfn) 324 if (pfn32 != pfn)
325 return false; 325 return -1;
326 326
327 STATS_INC(b->stats.lock); 327 STATS_INC(b->stats.lock);
328 328
329 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); 329 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330 if (vmballoon_check_status(b, status)) 330 if (vmballoon_check_status(b, status))
331 return true; 331 return 0;
332 332
333 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); 333 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334 STATS_INC(b->stats.lock_fail); 334 STATS_INC(b->stats.lock_fail);
335 return false; 335 return 1;
336} 336}
337 337
338/* 338/*
@@ -411,7 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
411 struct page *page; 411 struct page *page;
412 gfp_t flags; 412 gfp_t flags;
413 unsigned int hv_status; 413 unsigned int hv_status;
414 bool locked = false; 414 int locked;
415 flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; 415 flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
416 416
417 do { 417 do {
@@ -431,7 +431,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
431 431
432 /* inform monitor */ 432 /* inform monitor */
433 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); 433 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
434 if (!locked) { 434 if (locked > 0) {
435 STATS_INC(b->stats.refused_alloc); 435 STATS_INC(b->stats.refused_alloc);
436 436
437 if (hv_status == VMW_BALLOON_ERROR_RESET || 437 if (hv_status == VMW_BALLOON_ERROR_RESET ||
@@ -449,7 +449,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
449 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) 449 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
450 return -EIO; 450 return -EIO;
451 } 451 }
452 } while (!locked); 452 } while (locked != 0);
453 453
454 /* track allocated page */ 454 /* track allocated page */
455 list_add(&page->lru, &b->pages); 455 list_add(&page->lru, &b->pages);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 0cad48a284a8..c6a383d0244d 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1694,6 +1694,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
1694 1694
1695 md->power_ro_lock.show = power_ro_lock_show; 1695 md->power_ro_lock.show = power_ro_lock_show;
1696 md->power_ro_lock.store = power_ro_lock_store; 1696 md->power_ro_lock.store = power_ro_lock_store;
1697 sysfs_attr_init(&md->power_ro_lock.attr);
1697 md->power_ro_lock.attr.mode = mode; 1698 md->power_ro_lock.attr.mode = mode;
1698 md->power_ro_lock.attr.name = 1699 md->power_ro_lock.attr.name =
1699 "ro_lock_until_next_power_on"; 1700 "ro_lock_until_next_power_on";
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f545a3e6eb80..690255c7d4dc 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -290,8 +290,11 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
290static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 290static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
291 bool is_first_req) 291 bool is_first_req)
292{ 292{
293 if (host->ops->pre_req) 293 if (host->ops->pre_req) {
294 mmc_host_clk_hold(host);
294 host->ops->pre_req(host, mrq, is_first_req); 295 host->ops->pre_req(host, mrq, is_first_req);
296 mmc_host_clk_release(host);
297 }
295} 298}
296 299
297/** 300/**
@@ -306,8 +309,11 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
306static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 309static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
307 int err) 310 int err)
308{ 311{
309 if (host->ops->post_req) 312 if (host->ops->post_req) {
313 mmc_host_clk_hold(host);
310 host->ops->post_req(host, mrq, err); 314 host->ops->post_req(host, mrq, err);
315 mmc_host_clk_release(host);
316 }
311} 317}
312 318
313/** 319/**
@@ -620,7 +626,9 @@ int mmc_host_enable(struct mmc_host *host)
620 int err; 626 int err;
621 627
622 host->en_dis_recurs = 1; 628 host->en_dis_recurs = 1;
629 mmc_host_clk_hold(host);
623 err = host->ops->enable(host); 630 err = host->ops->enable(host);
631 mmc_host_clk_release(host);
624 host->en_dis_recurs = 0; 632 host->en_dis_recurs = 0;
625 633
626 if (err) { 634 if (err) {
@@ -640,7 +648,9 @@ static int mmc_host_do_disable(struct mmc_host *host, int lazy)
640 int err; 648 int err;
641 649
642 host->en_dis_recurs = 1; 650 host->en_dis_recurs = 1;
651 mmc_host_clk_hold(host);
643 err = host->ops->disable(host, lazy); 652 err = host->ops->disable(host, lazy);
653 mmc_host_clk_release(host);
644 host->en_dis_recurs = 0; 654 host->en_dis_recurs = 0;
645 655
646 if (err < 0) { 656 if (err < 0) {
@@ -1121,6 +1131,10 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
1121 * might not allow this operation 1131 * might not allow this operation
1122 */ 1132 */
1123 voltage = regulator_get_voltage(supply); 1133 voltage = regulator_get_voltage(supply);
1134
1135 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
1136 min_uV = max_uV = voltage;
1137
1124 if (voltage < 0) 1138 if (voltage < 0)
1125 result = voltage; 1139 result = voltage;
1126 else if (voltage < min_uV || voltage > max_uV) 1140 else if (voltage < min_uV || voltage > max_uV)
@@ -1203,8 +1217,11 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
1203 1217
1204 host->ios.signal_voltage = signal_voltage; 1218 host->ios.signal_voltage = signal_voltage;
1205 1219
1206 if (host->ops->start_signal_voltage_switch) 1220 if (host->ops->start_signal_voltage_switch) {
1221 mmc_host_clk_hold(host);
1207 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1222 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1223 mmc_host_clk_release(host);
1224 }
1208 1225
1209 return err; 1226 return err;
1210} 1227}
@@ -1239,6 +1256,7 @@ static void mmc_poweroff_notify(struct mmc_host *host)
1239 int err = 0; 1256 int err = 0;
1240 1257
1241 card = host->card; 1258 card = host->card;
1259 mmc_claim_host(host);
1242 1260
1243 /* 1261 /*
1244 * Send power notify command only if card 1262 * Send power notify command only if card
@@ -1269,6 +1287,7 @@ static void mmc_poweroff_notify(struct mmc_host *host)
1269 /* Set the card state to no notification after the poweroff */ 1287 /* Set the card state to no notification after the poweroff */
1270 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1288 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1271 } 1289 }
1290 mmc_release_host(host);
1272} 1291}
1273 1292
1274/* 1293/*
@@ -1327,12 +1346,28 @@ static void mmc_power_up(struct mmc_host *host)
1327 1346
1328void mmc_power_off(struct mmc_host *host) 1347void mmc_power_off(struct mmc_host *host)
1329{ 1348{
1349 int err = 0;
1330 mmc_host_clk_hold(host); 1350 mmc_host_clk_hold(host);
1331 1351
1332 host->ios.clock = 0; 1352 host->ios.clock = 0;
1333 host->ios.vdd = 0; 1353 host->ios.vdd = 0;
1334 1354
1335 mmc_poweroff_notify(host); 1355 /*
1356 * For eMMC 4.5 device send AWAKE command before
1357 * POWER_OFF_NOTIFY command, because in sleep state
1358 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1359 */
1360 if (host->card && mmc_card_is_sleep(host->card) &&
1361 host->bus_ops->resume) {
1362 err = host->bus_ops->resume(host);
1363
1364 if (!err)
1365 mmc_poweroff_notify(host);
1366 else
1367 pr_warning("%s: error %d during resume "
1368 "(continue with poweroff sequence)\n",
1369 mmc_hostname(host), err);
1370 }
1336 1371
1337 /* 1372 /*
1338 * Reset ocr mask to be the highest possible voltage supported for 1373 * Reset ocr mask to be the highest possible voltage supported for
@@ -2386,12 +2421,6 @@ int mmc_suspend_host(struct mmc_host *host)
2386 */ 2421 */
2387 if (mmc_try_claim_host(host)) { 2422 if (mmc_try_claim_host(host)) {
2388 if (host->bus_ops->suspend) { 2423 if (host->bus_ops->suspend) {
2389 /*
2390 * For eMMC 4.5 device send notify command
2391 * before sleep, because in sleep state eMMC 4.5
2392 * devices respond to only RESET and AWAKE cmd
2393 */
2394 mmc_poweroff_notify(host);
2395 err = host->bus_ops->suspend(host); 2424 err = host->bus_ops->suspend(host);
2396 } 2425 }
2397 mmc_do_release_host(host); 2426 mmc_do_release_host(host);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb8a5cd2e4a1..08a7852ade44 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,27 +14,6 @@
14 14
15int mmc_register_host_class(void); 15int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_release(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
38void mmc_host_deeper_disable(struct work_struct *work); 17void mmc_host_deeper_disable(struct work_struct *work);
39 18
40#endif 19#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 59b9ba52e66a..a48066344fa8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -376,7 +376,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
376 } 376 }
377 377
378 card->ext_csd.raw_hc_erase_gap_size = 378 card->ext_csd.raw_hc_erase_gap_size =
379 ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; 379 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
380 card->ext_csd.raw_sec_trim_mult = 380 card->ext_csd.raw_sec_trim_mult =
381 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 381 ext_csd[EXT_CSD_SEC_TRIM_MULT];
382 card->ext_csd.raw_sec_erase_mult = 382 card->ext_csd.raw_sec_erase_mult =
@@ -551,7 +551,7 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
551 goto out; 551 goto out;
552 552
553 /* only compare read only fields */ 553 /* only compare read only fields */
554 err = (!(card->ext_csd.raw_partition_support == 554 err = !((card->ext_csd.raw_partition_support ==
555 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 555 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
556 (card->ext_csd.raw_erased_mem_count == 556 (card->ext_csd.raw_erased_mem_count ==
557 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 557 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
@@ -1006,7 +1006,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1006 err = mmc_select_hs200(card); 1006 err = mmc_select_hs200(card);
1007 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1007 else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
1008 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1008 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1009 EXT_CSD_HS_TIMING, 1, 0); 1009 EXT_CSD_HS_TIMING, 1,
1010 card->ext_csd.generic_cmd6_time);
1010 1011
1011 if (err && err != -EBADMSG) 1012 if (err && err != -EBADMSG)
1012 goto free_card; 1013 goto free_card;
@@ -1116,7 +1117,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1116 * Activate wide bus and DDR (if supported). 1117 * Activate wide bus and DDR (if supported).
1117 */ 1118 */
1118 if (!mmc_card_hs200(card) && 1119 if (!mmc_card_hs200(card) &&
1119 (card->csd.mmca_vsn >= CSD_SPEC_VER_3) && 1120 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
1120 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1121 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
1121 static unsigned ext_csd_bits[][2] = { 1122 static unsigned ext_csd_bits[][2] = {
1122 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1123 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
@@ -1315,11 +1316,13 @@ static int mmc_suspend(struct mmc_host *host)
1315 BUG_ON(!host->card); 1316 BUG_ON(!host->card);
1316 1317
1317 mmc_claim_host(host); 1318 mmc_claim_host(host);
1318 if (mmc_card_can_sleep(host)) 1319 if (mmc_card_can_sleep(host)) {
1319 err = mmc_card_sleep(host); 1320 err = mmc_card_sleep(host);
1320 else if (!mmc_host_is_spi(host)) 1321 if (!err)
1322 mmc_card_set_sleep(host->card);
1323 } else if (!mmc_host_is_spi(host))
1321 mmc_deselect_cards(host); 1324 mmc_deselect_cards(host);
1322 host->card->state &= ~MMC_STATE_HIGHSPEED; 1325 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1323 mmc_release_host(host); 1326 mmc_release_host(host);
1324 1327
1325 return err; 1328 return err;
@@ -1339,7 +1342,11 @@ static int mmc_resume(struct mmc_host *host)
1339 BUG_ON(!host->card); 1342 BUG_ON(!host->card);
1340 1343
1341 mmc_claim_host(host); 1344 mmc_claim_host(host);
1342 err = mmc_init_card(host, host->ocr, host->card); 1345 if (mmc_card_is_sleep(host->card)) {
1346 err = mmc_card_awake(host);
1347 mmc_card_clr_sleep(host->card);
1348 } else
1349 err = mmc_init_card(host, host->ocr, host->card);
1343 mmc_release_host(host); 1350 mmc_release_host(host);
1344 1351
1345 return err; 1352 return err;
@@ -1349,7 +1356,8 @@ static int mmc_power_restore(struct mmc_host *host)
1349{ 1356{
1350 int ret; 1357 int ret;
1351 1358
1352 host->card->state &= ~MMC_STATE_HIGHSPEED; 1359 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1360 mmc_card_clr_sleep(host->card);
1353 mmc_claim_host(host); 1361 mmc_claim_host(host);
1354 ret = mmc_init_card(host, host->ocr, host->card); 1362 ret = mmc_init_card(host, host->ocr, host->card);
1355 mmc_release_host(host); 1363 mmc_release_host(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c63ad03c29c7..5017f9354ce2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -451,9 +451,11 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
451 * information and let the hardware specific code 451 * information and let the hardware specific code
452 * return what is possible given the options 452 * return what is possible given the options
453 */ 453 */
454 mmc_host_clk_hold(card->host);
454 drive_strength = card->host->ops->select_drive_strength( 455 drive_strength = card->host->ops->select_drive_strength(
455 card->sw_caps.uhs_max_dtr, 456 card->sw_caps.uhs_max_dtr,
456 host_drv_type, card_drv_type); 457 host_drv_type, card_drv_type);
458 mmc_host_clk_release(card->host);
457 459
458 err = mmc_sd_switch(card, 1, 2, drive_strength, status); 460 err = mmc_sd_switch(card, 1, 2, drive_strength, status);
459 if (err) 461 if (err)
@@ -660,9 +662,12 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
660 goto out; 662 goto out;
661 663
662 /* SPI mode doesn't define CMD19 */ 664 /* SPI mode doesn't define CMD19 */
663 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) 665 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) {
666 mmc_host_clk_hold(card->host);
664 err = card->host->ops->execute_tuning(card->host, 667 err = card->host->ops->execute_tuning(card->host,
665 MMC_SEND_TUNING_BLOCK); 668 MMC_SEND_TUNING_BLOCK);
669 mmc_host_clk_release(card->host);
670 }
666 671
667out: 672out:
668 kfree(status); 673 kfree(status);
@@ -850,8 +855,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
850 if (!reinit) { 855 if (!reinit) {
851 int ro = -1; 856 int ro = -1;
852 857
853 if (host->ops->get_ro) 858 if (host->ops->get_ro) {
859 mmc_host_clk_hold(card->host);
854 ro = host->ops->get_ro(host); 860 ro = host->ops->get_ro(host);
861 mmc_host_clk_release(card->host);
862 }
855 863
856 if (ro < 0) { 864 if (ro < 0) {
857 pr_warning("%s: host does not " 865 pr_warning("%s: host does not "
@@ -967,8 +975,11 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
967 * Since initialization is now complete, enable preset 975 * Since initialization is now complete, enable preset
968 * value registers for UHS-I cards. 976 * value registers for UHS-I cards.
969 */ 977 */
970 if (host->ops->enable_preset_value) 978 if (host->ops->enable_preset_value) {
979 mmc_host_clk_hold(card->host);
971 host->ops->enable_preset_value(host, true); 980 host->ops->enable_preset_value(host, true);
981 mmc_host_clk_release(card->host);
982 }
972 } else { 983 } else {
973 /* 984 /*
974 * Attempt to change to high-speed (if supported) 985 * Attempt to change to high-speed (if supported)
@@ -1151,8 +1162,11 @@ int mmc_attach_sd(struct mmc_host *host)
1151 return err; 1162 return err;
1152 1163
1153 /* Disable preset value enable if already set since last time */ 1164 /* Disable preset value enable if already set since last time */
1154 if (host->ops->enable_preset_value) 1165 if (host->ops->enable_preset_value) {
1166 mmc_host_clk_hold(host);
1155 host->ops->enable_preset_value(host, false); 1167 host->ops->enable_preset_value(host, false);
1168 mmc_host_clk_release(host);
1169 }
1156 1170
1157 err = mmc_send_app_op_cond(host, 0, &ocr); 1171 err = mmc_send_app_op_cond(host, 0, &ocr);
1158 if (err) 1172 if (err)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd7bacc950dc..12cde6ee17f5 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -98,10 +98,11 @@ fail:
98 return ret; 98 return ret;
99} 99}
100 100
101static int sdio_read_cccr(struct mmc_card *card) 101static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
102{ 102{
103 int ret; 103 int ret;
104 int cccr_vsn; 104 int cccr_vsn;
105 int uhs = ocr & R4_18V_PRESENT;
105 unsigned char data; 106 unsigned char data;
106 unsigned char speed; 107 unsigned char speed;
107 108
@@ -149,7 +150,7 @@ static int sdio_read_cccr(struct mmc_card *card)
149 card->scr.sda_spec3 = 0; 150 card->scr.sda_spec3 = 0;
150 card->sw_caps.sd3_bus_mode = 0; 151 card->sw_caps.sd3_bus_mode = 0;
151 card->sw_caps.sd3_drv_type = 0; 152 card->sw_caps.sd3_drv_type = 0;
152 if (cccr_vsn >= SDIO_CCCR_REV_3_00) { 153 if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
153 card->scr.sda_spec3 = 1; 154 card->scr.sda_spec3 = 1;
154 ret = mmc_io_rw_direct(card, 0, 0, 155 ret = mmc_io_rw_direct(card, 0, 0,
155 SDIO_CCCR_UHS, 0, &data); 156 SDIO_CCCR_UHS, 0, &data);
@@ -712,7 +713,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
712 /* 713 /*
713 * Read the common registers. 714 * Read the common registers.
714 */ 715 */
715 err = sdio_read_cccr(card); 716 err = sdio_read_cccr(card, ocr);
716 if (err) 717 if (err)
717 goto remove; 718 goto remove;
718 719
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 68f81b9ee0fb..f573e7f9f740 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -146,15 +146,21 @@ static int sdio_irq_thread(void *_host)
146 } 146 }
147 147
148 set_current_state(TASK_INTERRUPTIBLE); 148 set_current_state(TASK_INTERRUPTIBLE);
149 if (host->caps & MMC_CAP_SDIO_IRQ) 149 if (host->caps & MMC_CAP_SDIO_IRQ) {
150 mmc_host_clk_hold(host);
150 host->ops->enable_sdio_irq(host, 1); 151 host->ops->enable_sdio_irq(host, 1);
152 mmc_host_clk_release(host);
153 }
151 if (!kthread_should_stop()) 154 if (!kthread_should_stop())
152 schedule_timeout(period); 155 schedule_timeout(period);
153 set_current_state(TASK_RUNNING); 156 set_current_state(TASK_RUNNING);
154 } while (!kthread_should_stop()); 157 } while (!kthread_should_stop());
155 158
156 if (host->caps & MMC_CAP_SDIO_IRQ) 159 if (host->caps & MMC_CAP_SDIO_IRQ) {
160 mmc_host_clk_hold(host);
157 host->ops->enable_sdio_irq(host, 0); 161 host->ops->enable_sdio_irq(host, 0);
162 mmc_host_clk_release(host);
163 }
158 164
159 pr_debug("%s: IRQ thread exiting with code %d\n", 165 pr_debug("%s: IRQ thread exiting with code %d\n",
160 mmc_hostname(host), ret); 166 mmc_hostname(host), ret);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index cf444b0ca2cc..00fcbed1afd2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -477,7 +477,6 @@ config MMC_SDHI
477config MMC_CB710 477config MMC_CB710
478 tristate "ENE CB710 MMC/SD Interface support" 478 tristate "ENE CB710 MMC/SD Interface support"
479 depends on PCI 479 depends on PCI
480 select MISC_DEVICES
481 select CB710_CORE 480 select CB710_CORE
482 help 481 help
483 This option enables support for MMC/SD part of ENE CB710/720 Flash 482 This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fcfe1eb5acc8..6985cdb0bb26 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -969,11 +969,14 @@ static void atmci_start_request(struct atmel_mci *host,
969 host->data_status = 0; 969 host->data_status = 0;
970 970
971 if (host->need_reset) { 971 if (host->need_reset) {
972 iflags = atmci_readl(host, ATMCI_IMR);
973 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
972 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 974 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
973 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 975 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
974 atmci_writel(host, ATMCI_MR, host->mode_reg); 976 atmci_writel(host, ATMCI_MR, host->mode_reg);
975 if (host->caps.has_cfg_reg) 977 if (host->caps.has_cfg_reg)
976 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 978 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
979 atmci_writel(host, ATMCI_IER, iflags);
977 host->need_reset = false; 980 host->need_reset = false;
978 } 981 }
979 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); 982 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0e342793ff14..8bec1c36b159 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -22,7 +22,6 @@
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h> 25#include <linux/seq_file.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/stat.h> 27#include <linux/stat.h>
@@ -502,8 +501,14 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
502 host->dir_status = DW_MCI_SEND_STATUS; 501 host->dir_status = DW_MCI_SEND_STATUS;
503 502
504 if (dw_mci_submit_data_dma(host, data)) { 503 if (dw_mci_submit_data_dma(host, data)) {
504 int flags = SG_MITER_ATOMIC;
505 if (host->data->flags & MMC_DATA_READ)
506 flags |= SG_MITER_TO_SG;
507 else
508 flags |= SG_MITER_FROM_SG;
509
510 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
505 host->sg = data->sg; 511 host->sg = data->sg;
506 host->pio_offset = 0;
507 host->part_buf_start = 0; 512 host->part_buf_start = 0;
508 host->part_buf_count = 0; 513 host->part_buf_count = 0;
509 514
@@ -972,6 +977,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
972 * generates a block interrupt, hence setting 977 * generates a block interrupt, hence setting
973 * the scatter-gather pointer to NULL. 978 * the scatter-gather pointer to NULL.
974 */ 979 */
980 sg_miter_stop(&host->sg_miter);
975 host->sg = NULL; 981 host->sg = NULL;
976 ctrl = mci_readl(host, CTRL); 982 ctrl = mci_readl(host, CTRL);
977 ctrl |= SDMMC_CTRL_FIFO_RESET; 983 ctrl |= SDMMC_CTRL_FIFO_RESET;
@@ -1311,54 +1317,44 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1311 1317
1312static void dw_mci_read_data_pio(struct dw_mci *host) 1318static void dw_mci_read_data_pio(struct dw_mci *host)
1313{ 1319{
1314 struct scatterlist *sg = host->sg; 1320 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1315 void *buf = sg_virt(sg); 1321 void *buf;
1316 unsigned int offset = host->pio_offset; 1322 unsigned int offset;
1317 struct mmc_data *data = host->data; 1323 struct mmc_data *data = host->data;
1318 int shift = host->data_shift; 1324 int shift = host->data_shift;
1319 u32 status; 1325 u32 status;
1320 unsigned int nbytes = 0, len; 1326 unsigned int nbytes = 0, len;
1327 unsigned int remain, fcnt;
1321 1328
1322 do { 1329 do {
1323 len = host->part_buf_count + 1330 if (!sg_miter_next(sg_miter))
1324 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); 1331 goto done;
1325 if (offset + len <= sg->length) { 1332
1333 host->sg = sg_miter->__sg;
1334 buf = sg_miter->addr;
1335 remain = sg_miter->length;
1336 offset = 0;
1337
1338 do {
1339 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1340 << shift) + host->part_buf_count;
1341 len = min(remain, fcnt);
1342 if (!len)
1343 break;
1326 dw_mci_pull_data(host, (void *)(buf + offset), len); 1344 dw_mci_pull_data(host, (void *)(buf + offset), len);
1327
1328 offset += len; 1345 offset += len;
1329 nbytes += len; 1346 nbytes += len;
1330 1347 remain -= len;
1331 if (offset == sg->length) { 1348 } while (remain);
1332 flush_dcache_page(sg_page(sg)); 1349 sg_miter->consumed = offset;
1333 host->sg = sg = sg_next(sg);
1334 if (!sg)
1335 goto done;
1336
1337 offset = 0;
1338 buf = sg_virt(sg);
1339 }
1340 } else {
1341 unsigned int remaining = sg->length - offset;
1342 dw_mci_pull_data(host, (void *)(buf + offset),
1343 remaining);
1344 nbytes += remaining;
1345
1346 flush_dcache_page(sg_page(sg));
1347 host->sg = sg = sg_next(sg);
1348 if (!sg)
1349 goto done;
1350
1351 offset = len - remaining;
1352 buf = sg_virt(sg);
1353 dw_mci_pull_data(host, buf, offset);
1354 nbytes += offset;
1355 }
1356 1350
1357 status = mci_readl(host, MINTSTS); 1351 status = mci_readl(host, MINTSTS);
1358 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1352 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1359 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1353 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1360 host->data_status = status; 1354 host->data_status = status;
1361 data->bytes_xfered += nbytes; 1355 data->bytes_xfered += nbytes;
1356 sg_miter_stop(sg_miter);
1357 host->sg = NULL;
1362 smp_wmb(); 1358 smp_wmb();
1363 1359
1364 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1360 set_bit(EVENT_DATA_ERROR, &host->pending_events);
@@ -1367,65 +1363,66 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1367 return; 1363 return;
1368 } 1364 }
1369 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1365 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1370 host->pio_offset = offset;
1371 data->bytes_xfered += nbytes; 1366 data->bytes_xfered += nbytes;
1367
1368 if (!remain) {
1369 if (!sg_miter_next(sg_miter))
1370 goto done;
1371 sg_miter->consumed = 0;
1372 }
1373 sg_miter_stop(sg_miter);
1372 return; 1374 return;
1373 1375
1374done: 1376done:
1375 data->bytes_xfered += nbytes; 1377 data->bytes_xfered += nbytes;
1378 sg_miter_stop(sg_miter);
1379 host->sg = NULL;
1376 smp_wmb(); 1380 smp_wmb();
1377 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1381 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1378} 1382}
1379 1383
1380static void dw_mci_write_data_pio(struct dw_mci *host) 1384static void dw_mci_write_data_pio(struct dw_mci *host)
1381{ 1385{
1382 struct scatterlist *sg = host->sg; 1386 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1383 void *buf = sg_virt(sg); 1387 void *buf;
1384 unsigned int offset = host->pio_offset; 1388 unsigned int offset;
1385 struct mmc_data *data = host->data; 1389 struct mmc_data *data = host->data;
1386 int shift = host->data_shift; 1390 int shift = host->data_shift;
1387 u32 status; 1391 u32 status;
1388 unsigned int nbytes = 0, len; 1392 unsigned int nbytes = 0, len;
1393 unsigned int fifo_depth = host->fifo_depth;
1394 unsigned int remain, fcnt;
1389 1395
1390 do { 1396 do {
1391 len = ((host->fifo_depth - 1397 if (!sg_miter_next(sg_miter))
1392 SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift) 1398 goto done;
1393 - host->part_buf_count; 1399
1394 if (offset + len <= sg->length) { 1400 host->sg = sg_miter->__sg;
1401 buf = sg_miter->addr;
1402 remain = sg_miter->length;
1403 offset = 0;
1404
1405 do {
1406 fcnt = ((fifo_depth -
1407 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1408 << shift) - host->part_buf_count;
1409 len = min(remain, fcnt);
1410 if (!len)
1411 break;
1395 host->push_data(host, (void *)(buf + offset), len); 1412 host->push_data(host, (void *)(buf + offset), len);
1396
1397 offset += len; 1413 offset += len;
1398 nbytes += len; 1414 nbytes += len;
1399 if (offset == sg->length) { 1415 remain -= len;
1400 host->sg = sg = sg_next(sg); 1416 } while (remain);
1401 if (!sg) 1417 sg_miter->consumed = offset;
1402 goto done;
1403
1404 offset = 0;
1405 buf = sg_virt(sg);
1406 }
1407 } else {
1408 unsigned int remaining = sg->length - offset;
1409
1410 host->push_data(host, (void *)(buf + offset),
1411 remaining);
1412 nbytes += remaining;
1413
1414 host->sg = sg = sg_next(sg);
1415 if (!sg)
1416 goto done;
1417
1418 offset = len - remaining;
1419 buf = sg_virt(sg);
1420 host->push_data(host, (void *)buf, offset);
1421 nbytes += offset;
1422 }
1423 1418
1424 status = mci_readl(host, MINTSTS); 1419 status = mci_readl(host, MINTSTS);
1425 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1420 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1426 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1421 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1427 host->data_status = status; 1422 host->data_status = status;
1428 data->bytes_xfered += nbytes; 1423 data->bytes_xfered += nbytes;
1424 sg_miter_stop(sg_miter);
1425 host->sg = NULL;
1429 1426
1430 smp_wmb(); 1427 smp_wmb();
1431 1428
@@ -1435,12 +1432,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1435 return; 1432 return;
1436 } 1433 }
1437 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1434 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1438 host->pio_offset = offset;
1439 data->bytes_xfered += nbytes; 1435 data->bytes_xfered += nbytes;
1436
1437 if (!remain) {
1438 if (!sg_miter_next(sg_miter))
1439 goto done;
1440 sg_miter->consumed = 0;
1441 }
1442 sg_miter_stop(sg_miter);
1440 return; 1443 return;
1441 1444
1442done: 1445done:
1443 data->bytes_xfered += nbytes; 1446 data->bytes_xfered += nbytes;
1447 sg_miter_stop(sg_miter);
1448 host->sg = NULL;
1444 smp_wmb(); 1449 smp_wmb();
1445 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1450 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1446} 1451}
@@ -1643,6 +1648,7 @@ static void dw_mci_work_routine_card(struct work_struct *work)
1643 * block interrupt, hence setting the 1648 * block interrupt, hence setting the
1644 * scatter-gather pointer to NULL. 1649 * scatter-gather pointer to NULL.
1645 */ 1650 */
1651 sg_miter_stop(&host->sg_miter);
1646 host->sg = NULL; 1652 host->sg = NULL;
1647 1653
1648 ctrl = mci_readl(host, CTRL); 1654 ctrl = mci_readl(host, CTRL);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index ab66f2454dc4..1534b582c419 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -113,8 +113,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
113 const int j = i * 2; 113 const int j = i * 2;
114 u32 mask; 114 u32 mask;
115 115
116 mask = mmc_vddrange_to_ocrmask(voltage_ranges[j], 116 mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
117 voltage_ranges[j + 1]); 117 be32_to_cpu(voltage_ranges[j + 1]));
118 if (!mask) { 118 if (!mask) {
119 ret = -EINVAL; 119 ret = -EINVAL;
120 dev_err(dev, "OF: voltage-range #%d is invalid\n", i); 120 dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index ff4adc018041..5d876ff86f37 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -38,6 +38,23 @@ static u8 esdhc_readb(struct sdhci_host *host, int reg)
38 int base = reg & ~0x3; 38 int base = reg & ~0x3;
39 int shift = (reg & 0x3) * 8; 39 int shift = (reg & 0x3) * 8;
40 u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; 40 u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
41
42 /*
43 * "DMA select" locates at offset 0x28 in SD specification, but on
44 * P5020 or P3041, it locates at 0x29.
45 */
46 if (reg == SDHCI_HOST_CONTROL) {
47 u32 dma_bits;
48
49 dma_bits = in_be32(host->ioaddr + reg);
50 /* DMA select is 22,23 bits in Protocol Control Register */
51 dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
52
53 /* fixup the result */
54 ret &= ~SDHCI_CTRL_DMA_MASK;
55 ret |= dma_bits;
56 }
57
41 return ret; 58 return ret;
42} 59}
43 60
@@ -56,6 +73,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
56 73
57static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) 74static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
58{ 75{
76 /*
77 * "DMA select" location is offset 0x28 in SD specification, but on
78 * P5020 or P3041, it's located at 0x29.
79 */
80 if (reg == SDHCI_HOST_CONTROL) {
81 u32 dma_bits;
82
83 /* DMA select is 22,23 bits in Protocol Control Register */
84 dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
85 clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
86 dma_bits);
87 val &= ~SDHCI_CTRL_DMA_MASK;
88 val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
89 }
90
59 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ 91 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
60 if (reg == SDHCI_HOST_CONTROL) 92 if (reg == SDHCI_HOST_CONTROL)
61 val &= ~ESDHC_HOST_CONTROL_RES; 93 val &= ~ESDHC_HOST_CONTROL_RES;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 7165e6a09274..6ebdc4010e7c 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -250,7 +250,7 @@ static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
250 250
251static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) 251static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
252{ 252{
253 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD; 253 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
254 return 0; 254 return 0;
255} 255}
256 256
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 03970bcb3495..c5c2a48bdd94 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -2,7 +2,7 @@
2 * sdhci-pltfm.c Support for SDHCI platform devices 2 * sdhci-pltfm.c Support for SDHCI platform devices
3 * Copyright (c) 2009 Intel Corporation 3 * Copyright (c) 2009 Intel Corporation
4 * 4 *
5 * Copyright (c) 2007 Freescale Semiconductor, Inc. 5 * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc. 6 * Copyright (c) 2009 MontaVista Software, Inc.
7 * 7 *
8 * Authors: Xiaobo Xie <X.Xie@freescale.com> 8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
@@ -71,6 +71,14 @@ void sdhci_get_of_property(struct platform_device *pdev)
71 if (sdhci_of_wp_inverted(np)) 71 if (sdhci_of_wp_inverted(np))
72 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; 72 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
73 73
74 if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
75 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
76
77 if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
78 of_device_is_compatible(np, "fsl,p1010-esdhc") ||
79 of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
80 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
81
74 clk = of_get_property(np, "clock-frequency", &size); 82 clk = of_get_property(np, "clock-frequency", &size);
75 if (clk && size == sizeof(*clk) && *clk) 83 if (clk && size == sizeof(*clk) && *clk)
76 pltfm_host->clock = be32_to_cpup(clk); 84 pltfm_host->clock = be32_to_cpup(clk);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index f5d8b53be333..352d4797865b 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1327,7 +1327,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1327 if (ret < 0) 1327 if (ret < 0)
1328 goto clean_up2; 1328 goto clean_up2;
1329 1329
1330 mmc_add_host(mmc); 1330 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1331 1331
1332 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1332 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1333 1333
@@ -1338,22 +1338,24 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1338 } 1338 }
1339 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); 1339 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
1340 if (ret) { 1340 if (ret) {
1341 free_irq(irq[0], host);
1342 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1341 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1343 goto clean_up3; 1342 goto clean_up4;
1344 } 1343 }
1345 1344
1346 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); 1345 ret = mmc_add_host(mmc);
1347 1346 if (ret < 0)
1348 mmc_detect_change(host->mmc, 0); 1347 goto clean_up5;
1349 1348
1350 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1349 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1351 dev_dbg(&pdev->dev, "chip ver H'%04x\n", 1350 dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1352 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1351 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1353 return ret; 1352 return ret;
1354 1353
1354clean_up5:
1355 free_irq(irq[1], host);
1356clean_up4:
1357 free_irq(irq[0], host);
1355clean_up3: 1358clean_up3:
1356 mmc_remove_host(mmc);
1357 pm_runtime_suspend(&pdev->dev); 1359 pm_runtime_suspend(&pdev->dev);
1358clean_up2: 1360clean_up2:
1359 pm_runtime_disable(&pdev->dev); 1361 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index a95e6d901726..f96c536d130a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -20,8 +20,8 @@
20#include <linux/mmc/tmio.h> 20#include <linux/mmc/tmio.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/spinlock.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/spinlock.h>
25 25
26/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 26/* Definitions for values the CTRL_SDIO_STATUS register can take. */
27#define TMIO_SDIO_STAT_IOIRQ 0x0001 27#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -120,6 +120,7 @@ void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
120void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); 120void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
121void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 121void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
122void tmio_mmc_release_dma(struct tmio_mmc_host *host); 122void tmio_mmc_release_dma(struct tmio_mmc_host *host);
123void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
123#else 124#else
124static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, 125static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
125 struct mmc_data *data) 126 struct mmc_data *data)
@@ -140,6 +141,10 @@ static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
140static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) 141static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
141{ 142{
142} 143}
144
145static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
146{
147}
143#endif 148#endif
144 149
145#ifdef CONFIG_PM 150#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 7a6e6cc8f8b8..8253ec12003e 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -34,6 +34,18 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
34#endif 34#endif
35} 35}
36 36
37void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
38{
39 tmio_mmc_enable_dma(host, false);
40
41 if (host->chan_rx)
42 dmaengine_terminate_all(host->chan_rx);
43 if (host->chan_tx)
44 dmaengine_terminate_all(host->chan_tx);
45
46 tmio_mmc_enable_dma(host, true);
47}
48
37static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 49static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
38{ 50{
39 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 51 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index abad01b37cfb..5f9ad74fbf80 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -41,8 +41,8 @@
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/pm_runtime.h> 42#include <linux/pm_runtime.h>
43#include <linux/scatterlist.h> 43#include <linux/scatterlist.h>
44#include <linux/workqueue.h>
45#include <linux/spinlock.h> 44#include <linux/spinlock.h>
45#include <linux/workqueue.h>
46 46
47#include "tmio_mmc.h" 47#include "tmio_mmc.h"
48 48
@@ -246,6 +246,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
246 /* Ready for new calls */ 246 /* Ready for new calls */
247 host->mrq = NULL; 247 host->mrq = NULL;
248 248
249 tmio_mmc_abort_dma(host);
249 mmc_request_done(host->mmc, mrq); 250 mmc_request_done(host->mmc, mrq);
250} 251}
251 252
@@ -272,6 +273,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
272 host->mrq = NULL; 273 host->mrq = NULL;
273 spin_unlock_irqrestore(&host->lock, flags); 274 spin_unlock_irqrestore(&host->lock, flags);
274 275
276 if (mrq->cmd->error || (mrq->data && mrq->data->error))
277 tmio_mmc_abort_dma(host);
278
275 mmc_request_done(host->mmc, mrq); 279 mmc_request_done(host->mmc, mrq);
276} 280}
277 281
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6ae9ca01388b..9a9ce71a71fc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -119,7 +119,7 @@ static int mtd_cls_suspend(struct device *dev, pm_message_t state)
119{ 119{
120 struct mtd_info *mtd = dev_get_drvdata(dev); 120 struct mtd_info *mtd = dev_get_drvdata(dev);
121 121
122 return mtd_suspend(mtd); 122 return mtd ? mtd_suspend(mtd) : 0;
123} 123}
124 124
125static int mtd_cls_resume(struct device *dev) 125static int mtd_cls_resume(struct device *dev)
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 4dd056e2e16a..35b4fb55dbd6 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -161,6 +161,37 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
161 !!host->board->rdy_pin_active_low; 161 !!host->board->rdy_pin_active_low;
162} 162}
163 163
164/*
165 * Minimal-overhead PIO for data access.
166 */
167static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
168{
169 struct nand_chip *nand_chip = mtd->priv;
170
171 __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
172}
173
174static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
175{
176 struct nand_chip *nand_chip = mtd->priv;
177
178 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
179}
180
181static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
182{
183 struct nand_chip *nand_chip = mtd->priv;
184
185 __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
186}
187
188static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
189{
190 struct nand_chip *nand_chip = mtd->priv;
191
192 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
193}
194
164static void dma_complete_func(void *completion) 195static void dma_complete_func(void *completion)
165{ 196{
166 complete(completion); 197 complete(completion);
@@ -235,27 +266,33 @@ err_buf:
235static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 266static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
236{ 267{
237 struct nand_chip *chip = mtd->priv; 268 struct nand_chip *chip = mtd->priv;
269 struct atmel_nand_host *host = chip->priv;
238 270
239 if (use_dma && len > mtd->oobsize) 271 if (use_dma && len > mtd->oobsize)
240 /* only use DMA for bigger than oob size: better performances */ 272 /* only use DMA for bigger than oob size: better performances */
241 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 273 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
242 return; 274 return;
243 275
244 /* if no DMA operation possible, use PIO */ 276 if (host->board->bus_width_16)
245 memcpy_fromio(buf, chip->IO_ADDR_R, len); 277 atmel_read_buf16(mtd, buf, len);
278 else
279 atmel_read_buf8(mtd, buf, len);
246} 280}
247 281
248static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 282static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
249{ 283{
250 struct nand_chip *chip = mtd->priv; 284 struct nand_chip *chip = mtd->priv;
285 struct atmel_nand_host *host = chip->priv;
251 286
252 if (use_dma && len > mtd->oobsize) 287 if (use_dma && len > mtd->oobsize)
253 /* only use DMA for bigger than oob size: better performances */ 288 /* only use DMA for bigger than oob size: better performances */
254 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 289 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
255 return; 290 return;
256 291
257 /* if no DMA operation possible, use PIO */ 292 if (host->board->bus_width_16)
258 memcpy_toio(chip->IO_ADDR_W, buf, len); 293 atmel_write_buf16(mtd, buf, len);
294 else
295 atmel_write_buf8(mtd, buf, len);
259} 296}
260 297
261/* 298/*
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 7f680420bfab..7db6555ed3ba 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -69,17 +69,19 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
69 * [1] enable the module. 69 * [1] enable the module.
70 * [2] reset the module. 70 * [2] reset the module.
71 * 71 *
72 * In most of the cases, it's ok. But there is a hardware bug in the BCH block. 72 * In most of the cases, it's ok.
73 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
73 * If you try to soft reset the BCH block, it becomes unusable until 74 * If you try to soft reset the BCH block, it becomes unusable until
74 * the next hard reset. This case occurs in the NAND boot mode. When the board 75 * the next hard reset. This case occurs in the NAND boot mode. When the board
75 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. 76 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
76 * So If the driver tries to reset the BCH again, the BCH will not work anymore. 77 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
77 * You will see a DMA timeout in this case. 78 * You will see a DMA timeout in this case. The bug has been fixed
79 * in the following chips, such as MX28.
78 * 80 *
79 * To avoid this bug, just add a new parameter `just_enable` for 81 * To avoid this bug, just add a new parameter `just_enable` for
80 * the mxs_reset_block(), and rewrite it here. 82 * the mxs_reset_block(), and rewrite it here.
81 */ 83 */
82int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) 84static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
83{ 85{
84 int ret; 86 int ret;
85 int timeout = 0x400; 87 int timeout = 0x400;
@@ -206,7 +208,15 @@ int bch_set_geometry(struct gpmi_nand_data *this)
206 if (ret) 208 if (ret)
207 goto err_out; 209 goto err_out;
208 210
209 ret = gpmi_reset_block(r->bch_regs, true); 211 /*
212 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
213 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
214 * On the other hand, the MX28 needs the reset, because one case has been
215 * seen where the BCH produced ECC errors constantly after 10000
216 * consecutive reboots. The latter case has not been seen on the MX23 yet,
217 * still we don't know if it could happen there as well.
218 */
219 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
210 if (ret) 220 if (ret)
211 goto err_out; 221 goto err_out;
212 222
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 35b4565050f1..8a393f9e6027 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2588,7 +2588,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2588 instr->state = MTD_ERASING; 2588 instr->state = MTD_ERASING;
2589 2589
2590 while (len) { 2590 while (len) {
2591 /* Heck if we have a bad block, we do not erase bad blocks! */ 2591 /* Check if we have a bad block, we do not erase bad blocks! */
2592 if (nand_block_checkbad(mtd, ((loff_t) page) << 2592 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2593 chip->page_shift, 0, allowbbt)) { 2593 chip->page_shift, 0, allowbbt)) {
2594 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 2594 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 766896747643..c30f0e6f1048 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -440,12 +440,14 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
440 for (i = 0; i < dlc; i++) 440 for (i = 0; i < dlc; i++)
441 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); 441 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
442 442
443 /* Store echo skb before starting the transfer */
444 can_put_echo_skb(skb, dev, 0);
445
443 cc770_write_reg(priv, msgobj[mo].ctrl1, 446 cc770_write_reg(priv, msgobj[mo].ctrl1,
444 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); 447 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
445 448
446 stats->tx_bytes += dlc; 449 stats->tx_bytes += dlc;
447 450
448 can_put_echo_skb(skb, dev, 0);
449 451
450 /* 452 /*
451 * HM: We had some cases of repeated IRQs so make sure the 453 * HM: We had some cases of repeated IRQs so make sure the
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 4be5fe2c40a5..9f3a25ccd665 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -110,6 +110,11 @@ MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
110#define CC770_IOSIZE 0x20 110#define CC770_IOSIZE 0x20
111#define CC770_IOSIZE_INDIRECT 0x02 111#define CC770_IOSIZE_INDIRECT 0x02
112 112
113/* Spinlock for cc770_isa_port_write_reg_indirect
114 * and cc770_isa_port_read_reg_indirect
115 */
116static DEFINE_SPINLOCK(cc770_isa_port_lock);
117
113static struct platform_device *cc770_isa_devs[MAXDEV]; 118static struct platform_device *cc770_isa_devs[MAXDEV];
114 119
115static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) 120static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
@@ -138,18 +143,27 @@ static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
138 int reg) 143 int reg)
139{ 144{
140 unsigned long base = (unsigned long)priv->reg_base; 145 unsigned long base = (unsigned long)priv->reg_base;
146 unsigned long flags;
147 u8 val;
141 148
149 spin_lock_irqsave(&cc770_isa_port_lock, flags);
142 outb(reg, base); 150 outb(reg, base);
143 return inb(base + 1); 151 val = inb(base + 1);
152 spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
153
154 return val;
144} 155}
145 156
146static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, 157static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
147 int reg, u8 val) 158 int reg, u8 val)
148{ 159{
149 unsigned long base = (unsigned long)priv->reg_base; 160 unsigned long base = (unsigned long)priv->reg_base;
161 unsigned long flags;
150 162
163 spin_lock_irqsave(&cc770_isa_port_lock, flags);
151 outb(reg, base); 164 outb(reg, base);
152 outb(val, base + 1); 165 outb(val, base + 1);
166 spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
153} 167}
154 168
155static int __devinit cc770_isa_probe(struct platform_device *pdev) 169static int __devinit cc770_isa_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7fd8089946fb..96d235799ec1 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -118,6 +118,9 @@
118 (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) 118 (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
119#define FLEXCAN_ESR_ERR_ALL \ 119#define FLEXCAN_ESR_ERR_ALL \
120 (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) 120 (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
121#define FLEXCAN_ESR_ALL_INT \
122 (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
123 FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
121 124
122/* FLEXCAN interrupt flag register (IFLAG) bits */ 125/* FLEXCAN interrupt flag register (IFLAG) bits */
123#define FLEXCAN_TX_BUF_ID 8 126#define FLEXCAN_TX_BUF_ID 8
@@ -577,7 +580,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
577 580
578 reg_iflag1 = flexcan_read(&regs->iflag1); 581 reg_iflag1 = flexcan_read(&regs->iflag1);
579 reg_esr = flexcan_read(&regs->esr); 582 reg_esr = flexcan_read(&regs->esr);
580 flexcan_write(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */ 583 /* ACK all bus error and state change IRQ sources */
584 if (reg_esr & FLEXCAN_ESR_ALL_INT)
585 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
581 586
582 /* 587 /*
583 * schedule NAPI in case of: 588 * schedule NAPI in case of:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index d11fbb2b95ff..6edc25e0dd15 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -66,6 +66,7 @@
66#define PCH_IF_CREQ_BUSY BIT(15) 66#define PCH_IF_CREQ_BUSY BIT(15)
67 67
68#define PCH_STATUS_INT 0x8000 68#define PCH_STATUS_INT 0x8000
69#define PCH_RP 0x00008000
69#define PCH_REC 0x00007f00 70#define PCH_REC 0x00007f00
70#define PCH_TEC 0x000000ff 71#define PCH_TEC 0x000000ff
71 72
@@ -527,7 +528,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
527 priv->can.can_stats.error_passive++; 528 priv->can.can_stats.error_passive++;
528 state = CAN_STATE_ERROR_PASSIVE; 529 state = CAN_STATE_ERROR_PASSIVE;
529 cf->can_id |= CAN_ERR_CRTL; 530 cf->can_id |= CAN_ERR_CRTL;
530 if (((errc & PCH_REC) >> 8) > 127) 531 if (errc & PCH_RP)
531 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 532 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
532 if ((errc & PCH_TEC) > 127) 533 if ((errc & PCH_TEC) > 127)
533 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 534 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 2c7f5036f570..214795945bc4 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -39,9 +39,9 @@ MODULE_LICENSE("GPL v2");
39#define DRV_NAME "peak_pci" 39#define DRV_NAME "peak_pci"
40 40
41struct peak_pci_chan { 41struct peak_pci_chan {
42 void __iomem *cfg_base; /* Common for all channels */ 42 void __iomem *cfg_base; /* Common for all channels */
43 struct net_device *next_dev; /* Chain of network devices */ 43 struct net_device *prev_dev; /* Chain of network devices */
44 u16 icr_mask; /* Interrupt mask for fast ack */ 44 u16 icr_mask; /* Interrupt mask for fast ack */
45}; 45};
46 46
47#define PEAK_PCI_CAN_CLOCK (16000000 / 2) 47#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
@@ -98,7 +98,7 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
98{ 98{
99 struct sja1000_priv *priv; 99 struct sja1000_priv *priv;
100 struct peak_pci_chan *chan; 100 struct peak_pci_chan *chan;
101 struct net_device *dev, *dev0 = NULL; 101 struct net_device *dev;
102 void __iomem *cfg_base, *reg_base; 102 void __iomem *cfg_base, *reg_base;
103 u16 sub_sys_id, icr; 103 u16 sub_sys_id, icr;
104 int i, err, channels; 104 int i, err, channels;
@@ -196,18 +196,14 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
196 } 196 }
197 197
198 /* Create chain of SJA1000 devices */ 198 /* Create chain of SJA1000 devices */
199 if (i == 0) 199 chan->prev_dev = pci_get_drvdata(pdev);
200 dev0 = dev; 200 pci_set_drvdata(pdev, dev);
201 else
202 chan->next_dev = dev;
203 201
204 dev_info(&pdev->dev, 202 dev_info(&pdev->dev,
205 "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n", 203 "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
206 dev->name, priv->reg_base, chan->cfg_base, dev->irq); 204 dev->name, priv->reg_base, chan->cfg_base, dev->irq);
207 } 205 }
208 206
209 pci_set_drvdata(pdev, dev0);
210
211 /* Enable interrupts */ 207 /* Enable interrupts */
212 writew(icr, cfg_base + PITA_ICR + 2); 208 writew(icr, cfg_base + PITA_ICR + 2);
213 209
@@ -217,12 +213,11 @@ failure_remove_channels:
217 /* Disable interrupts */ 213 /* Disable interrupts */
218 writew(0x0, cfg_base + PITA_ICR + 2); 214 writew(0x0, cfg_base + PITA_ICR + 2);
219 215
220 for (dev = dev0; dev; dev = chan->next_dev) { 216 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
221 unregister_sja1000dev(dev); 217 unregister_sja1000dev(dev);
222 free_sja1000dev(dev); 218 free_sja1000dev(dev);
223 priv = netdev_priv(dev); 219 priv = netdev_priv(dev);
224 chan = priv->priv; 220 chan = priv->priv;
225 dev = chan->next_dev;
226 } 221 }
227 222
228 pci_iounmap(pdev, reg_base); 223 pci_iounmap(pdev, reg_base);
@@ -241,7 +236,7 @@ failure_disable_pci:
241 236
242static void __devexit peak_pci_remove(struct pci_dev *pdev) 237static void __devexit peak_pci_remove(struct pci_dev *pdev)
243{ 238{
244 struct net_device *dev = pci_get_drvdata(pdev); /* First device */ 239 struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
245 struct sja1000_priv *priv = netdev_priv(dev); 240 struct sja1000_priv *priv = netdev_priv(dev);
246 struct peak_pci_chan *chan = priv->priv; 241 struct peak_pci_chan *chan = priv->priv;
247 void __iomem *cfg_base = chan->cfg_base; 242 void __iomem *cfg_base = chan->cfg_base;
@@ -255,7 +250,7 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
255 dev_info(&pdev->dev, "removing device %s\n", dev->name); 250 dev_info(&pdev->dev, "removing device %s\n", dev->name);
256 unregister_sja1000dev(dev); 251 unregister_sja1000dev(dev);
257 free_sja1000dev(dev); 252 free_sja1000dev(dev);
258 dev = chan->next_dev; 253 dev = chan->prev_dev;
259 if (!dev) 254 if (!dev)
260 break; 255 break;
261 priv = netdev_priv(dev); 256 priv = netdev_priv(dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index df809e3f130e..5a2e1e3588a1 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -745,9 +745,10 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
745 } 745 }
746 } 746 }
747 747
748 netif_receive_skb(skb); 748 netif_rx(skb);
749 stats->rx_packets++; 749 stats->rx_packets++;
750 stats->rx_bytes += cf->can_dlc; 750 stats->rx_bytes += cf->can_dlc;
751
751 return 0; 752 return 0;
752} 753}
753 754
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 9697c14b8dc6..7dae64d44e83 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -627,9 +627,6 @@ static int ems_usb_start(struct ems_usb *dev)
627 627
628 err = usb_submit_urb(urb, GFP_KERNEL); 628 err = usb_submit_urb(urb, GFP_KERNEL);
629 if (err) { 629 if (err) {
630 if (err == -ENODEV)
631 netif_device_detach(dev->netdev);
632
633 usb_unanchor_urb(urb); 630 usb_unanchor_urb(urb);
634 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, 631 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
635 urb->transfer_dma); 632 urb->transfer_dma);
@@ -659,9 +656,6 @@ static int ems_usb_start(struct ems_usb *dev)
659 656
660 err = usb_submit_urb(dev->intr_urb, GFP_KERNEL); 657 err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
661 if (err) { 658 if (err) {
662 if (err == -ENODEV)
663 netif_device_detach(dev->netdev);
664
665 dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n", 659 dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
666 err); 660 err);
667 661
@@ -692,9 +686,6 @@ static int ems_usb_start(struct ems_usb *dev)
692 return 0; 686 return 0;
693 687
694failed: 688failed:
695 if (err == -ENODEV)
696 netif_device_detach(dev->netdev);
697
698 dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err); 689 dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
699 690
700 return err; 691 return err;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8153a3e0a1a4..f9b74c0a8492 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
1842 ok = 1; 1842 ok = 1;
1843 } 1843 }
1844 1844
1845 if (!netif_carrier_ok(dev)) 1845 if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
1846 next_tick = 5*HZ; 1846 next_tick = 5*HZ;
1847 1847
1848 if (vp->medialock) 1848 if (vp->medialock)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 986019b2c849..c7ca7ec065ee 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -797,7 +797,7 @@ static int bcm_enet_open(struct net_device *dev)
797 if (priv->has_phy) { 797 if (priv->has_phy) {
798 /* connect to PHY */ 798 /* connect to PHY */
799 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 799 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
800 priv->mac_id ? "1" : "0", priv->phy_id); 800 priv->mii_bus->id, priv->phy_id);
801 801
802 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, 802 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
803 PHY_INTERFACE_MODE_MII); 803 PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 03f3935fd8c2..7aee46983be4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -523,7 +523,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
523 skb = build_skb(data); 523 skb = build_skb(data);
524 524
525 if (likely(skb)) { 525 if (likely(skb)) {
526
527#ifdef BNX2X_STOP_ON_ERROR 526#ifdef BNX2X_STOP_ON_ERROR
528 if (pad + len > fp->rx_buf_size) { 527 if (pad + len > fp->rx_buf_size) {
529 BNX2X_ERR("skb_put is about to fail... " 528 BNX2X_ERR("skb_put is about to fail... "
@@ -557,7 +556,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
557 556
558 return; 557 return;
559 } 558 }
560 559 kfree(new_data);
561drop: 560drop:
562 /* drop the packet and keep the buffer in the bin */ 561 /* drop the packet and keep the buffer in the bin */
563 DP(NETIF_MSG_RX_STATUS, 562 DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1e3f978ee6da..254521319150 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -117,10 +117,6 @@ static int dropless_fc;
117module_param(dropless_fc, int, 0); 117module_param(dropless_fc, int, 0);
118MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 118MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
119 119
120static int poll;
121module_param(poll, int, 0);
122MODULE_PARM_DESC(poll, " Use polling (for debug)");
123
124static int mrrs = -1; 120static int mrrs = -1;
125module_param(mrrs, int, 0); 121module_param(mrrs, int, 0);
126MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 122MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
@@ -4834,20 +4830,11 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
4834 4830
4835static void bnx2x_timer(unsigned long data) 4831static void bnx2x_timer(unsigned long data)
4836{ 4832{
4837 u8 cos;
4838 struct bnx2x *bp = (struct bnx2x *) data; 4833 struct bnx2x *bp = (struct bnx2x *) data;
4839 4834
4840 if (!netif_running(bp->dev)) 4835 if (!netif_running(bp->dev))
4841 return; 4836 return;
4842 4837
4843 if (poll) {
4844 struct bnx2x_fastpath *fp = &bp->fp[0];
4845
4846 for_each_cos_in_tx_queue(fp, cos)
4847 bnx2x_tx_int(bp, &fp->txdata[cos]);
4848 bnx2x_rx_int(fp, 1000);
4849 }
4850
4851 if (!BP_NOMCP(bp)) { 4838 if (!BP_NOMCP(bp)) {
4852 int mb_idx = BP_FW_MB_IDX(bp); 4839 int mb_idx = BP_FW_MB_IDX(bp);
4853 u32 drv_pulse; 4840 u32 drv_pulse;
@@ -10063,7 +10050,6 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10063static int __devinit bnx2x_init_bp(struct bnx2x *bp) 10050static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10064{ 10051{
10065 int func; 10052 int func;
10066 int timer_interval;
10067 int rc; 10053 int rc;
10068 10054
10069 mutex_init(&bp->port.phy_mutex); 10055 mutex_init(&bp->port.phy_mutex);
@@ -10139,8 +10125,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10139 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10125 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
10140 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 10126 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
10141 10127
10142 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 10128 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
10143 bp->current_interval = (poll ? poll : timer_interval);
10144 10129
10145 init_timer(&bp->timer); 10130 init_timer(&bp->timer);
10146 bp->timer.expires = jiffies + bp->current_interval; 10131 bp->timer.expires = jiffies + bp->current_interval;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index bc0121ac291e..1adef266fcd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1081,17 +1081,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
1081 estats->rx_stat_ifhcinbadoctets_lo); 1081 estats->rx_stat_ifhcinbadoctets_lo);
1082 1082
1083 ADD_64(fstats->total_bytes_received_hi, 1083 ADD_64(fstats->total_bytes_received_hi,
1084 tfunc->rcv_error_bytes.hi, 1084 le32_to_cpu(tfunc->rcv_error_bytes.hi),
1085 fstats->total_bytes_received_lo, 1085 fstats->total_bytes_received_lo,
1086 tfunc->rcv_error_bytes.lo); 1086 le32_to_cpu(tfunc->rcv_error_bytes.lo));
1087 1087
1088 memcpy(estats, &(fstats->total_bytes_received_hi), 1088 memcpy(estats, &(fstats->total_bytes_received_hi),
1089 sizeof(struct host_func_stats) - 2*sizeof(u32)); 1089 sizeof(struct host_func_stats) - 2*sizeof(u32));
1090 1090
1091 ADD_64(estats->error_bytes_received_hi, 1091 ADD_64(estats->error_bytes_received_hi,
1092 tfunc->rcv_error_bytes.hi, 1092 le32_to_cpu(tfunc->rcv_error_bytes.hi),
1093 estats->error_bytes_received_lo, 1093 estats->error_bytes_received_lo,
1094 tfunc->rcv_error_bytes.lo); 1094 le32_to_cpu(tfunc->rcv_error_bytes.lo));
1095 1095
1096 ADD_64(estats->etherstatsoverrsizepkts_hi, 1096 ADD_64(estats->etherstatsoverrsizepkts_hi,
1097 estats->rx_stat_dot3statsframestoolong_hi, 1097 estats->rx_stat_dot3statsframestoolong_hi,
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 9b44ec8096ba..803ea32aa99d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -946,7 +946,7 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
946 946
947 flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); 947 flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
948 if (!flash_attr) 948 if (!flash_attr)
949 return -ENOMEM; 949 return 0;
950 950
951 fcomp.bnad = bnad; 951 fcomp.bnad = bnad;
952 fcomp.comp_status = 0; 952 fcomp.comp_status = 0;
@@ -958,7 +958,7 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
958 if (ret != BFA_STATUS_OK) { 958 if (ret != BFA_STATUS_OK) {
959 spin_unlock_irqrestore(&bnad->bna_lock, flags); 959 spin_unlock_irqrestore(&bnad->bna_lock, flags);
960 kfree(flash_attr); 960 kfree(flash_attr);
961 goto out_err; 961 return 0;
962 } 962 }
963 spin_unlock_irqrestore(&bnad->bna_lock, flags); 963 spin_unlock_irqrestore(&bnad->bna_lock, flags);
964 wait_for_completion(&fcomp.comp); 964 wait_for_completion(&fcomp.comp);
@@ -978,8 +978,6 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
978 } 978 }
979 kfree(flash_attr); 979 kfree(flash_attr);
980 return flash_part; 980 return flash_part;
981out_err:
982 return -EINVAL;
983} 981}
984 982
985static int 983static int
@@ -1006,7 +1004,7 @@ bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1006 /* Query the flash partition based on the offset */ 1004 /* Query the flash partition based on the offset */
1007 flash_part = bnad_get_flash_partition_by_offset(bnad, 1005 flash_part = bnad_get_flash_partition_by_offset(bnad,
1008 eeprom->offset, &base_offset); 1006 eeprom->offset, &base_offset);
1009 if (flash_part <= 0) 1007 if (flash_part == 0)
1010 return -EFAULT; 1008 return -EFAULT;
1011 1009
1012 fcomp.bnad = bnad; 1010 fcomp.bnad = bnad;
@@ -1048,7 +1046,7 @@ bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1048 /* Query the flash partition based on the offset */ 1046 /* Query the flash partition based on the offset */
1049 flash_part = bnad_get_flash_partition_by_offset(bnad, 1047 flash_part = bnad_get_flash_partition_by_offset(bnad,
1050 eeprom->offset, &base_offset); 1048 eeprom->offset, &base_offset);
1051 if (flash_part <= 0) 1049 if (flash_part == 0)
1052 return -EFAULT; 1050 return -EFAULT;
1053 1051
1054 fcomp.bnad = bnad; 1052 fcomp.bnad = bnad;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 6db6b6ae5e9b..802e5ddef8a8 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -716,12 +716,8 @@ static int
716be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 716be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
717{ 717{
718 struct be_adapter *adapter = netdev_priv(netdev); 718 struct be_adapter *adapter = netdev_priv(netdev);
719 char file_name[ETHTOOL_FLASH_MAX_FILENAME];
720 719
721 file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; 720 return be_load_fw(adapter, efl->data);
722 strcpy(file_name, efl->data);
723
724 return be_load_fw(adapter, file_name);
725} 721}
726 722
727static int 723static int
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 7b25e9cf13f6..e92ef1bd732a 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -986,11 +986,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
986 printk(KERN_INFO 986 printk(KERN_INFO
987 "%s: no PHY, assuming direct connection to switch\n", 987 "%s: no PHY, assuming direct connection to switch\n",
988 ndev->name); 988 ndev->name);
989 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 989 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
990 phy_id = 0; 990 phy_id = 0;
991 } 991 }
992 992
993 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 993 snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
994 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, 994 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
995 fep->phy_interface); 995 fep->phy_interface);
996 if (IS_ERR(phy_dev)) { 996 if (IS_ERR(phy_dev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 669ca3800c01..d94d64b5d695 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4740,12 +4740,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4740 e1000_setup_rctl(adapter); 4740 e1000_setup_rctl(adapter);
4741 e1000_set_rx_mode(netdev); 4741 e1000_set_rx_mode(netdev);
4742 4742
4743 rctl = er32(RCTL);
4744
4743 /* turn on all-multi mode if wake on multicast is enabled */ 4745 /* turn on all-multi mode if wake on multicast is enabled */
4744 if (wufc & E1000_WUFC_MC) { 4746 if (wufc & E1000_WUFC_MC)
4745 rctl = er32(RCTL);
4746 rctl |= E1000_RCTL_MPE; 4747 rctl |= E1000_RCTL_MPE;
4747 ew32(RCTL, rctl); 4748
4748 } 4749 /* enable receives in the hardware */
4750 ew32(RCTL, rctl | E1000_RCTL_EN);
4749 4751
4750 if (hw->mac_type >= e1000_82540) { 4752 if (hw->mac_type >= e1000_82540) {
4751 ctrl = er32(CTRL); 4753 ctrl = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e91d73c8aa4e..94be6c32fa7d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5012,7 +5012,8 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter)
5012 vf_devfn = pdev->devfn + 0x80; 5012 vf_devfn = pdev->devfn + 0x80;
5013 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); 5013 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5014 while (pvfdev) { 5014 while (pvfdev) {
5015 if (pvfdev->devfn == vf_devfn) 5015 if (pvfdev->devfn == vf_devfn &&
5016 (pvfdev->bus->number >= pdev->bus->number))
5016 vfs_found++; 5017 vfs_found++;
5017 vf_devfn += vf_stride; 5018 vf_devfn += vf_stride;
5018 pvfdev = pci_get_device(hw->vendor_id, 5019 pvfdev = pci_get_device(hw->vendor_id,
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index 0fa3db3dd8b6..044b0ad5fcb9 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel(R) 82576 Virtual Function Linux driver 3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 - 2010 Intel Corporation. 4# Copyright(c) 2009 - 2012 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 79f2604673fe..33f40d3474ae 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2dba53446064..db7dce2351c2 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fd4a7b780fdd..2c6d87e4d3d9 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 048aae248d06..b4b65bc9fc5d 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index c2883c45d477..24370bcb0e22 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index a4b20c865759..4e9141cfe81d 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -53,7 +53,7 @@ const char igbvf_driver_version[] = DRV_VERSION;
53static const char igbvf_driver_string[] = 53static const char igbvf_driver_string[] =
54 "Intel(R) Gigabit Virtual Function Network Driver"; 54 "Intel(R) Gigabit Virtual Function Network Driver";
55static const char igbvf_copyright[] = 55static const char igbvf_copyright[] =
56 "Copyright (c) 2009 - 2011 Intel Corporation."; 56 "Copyright (c) 2009 - 2012 Intel Corporation.";
57 57
58static int igbvf_poll(struct napi_struct *napi, int budget); 58static int igbvf_poll(struct napi_struct *napi, int budget);
59static void igbvf_reset(struct igbvf_adapter *); 59static void igbvf_reset(struct igbvf_adapter *);
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 77e18d3d6b15..7dc6341715dc 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index af3822f9ea9a..19551977b352 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index d7ed58fcd9bb..57db3c68dfcd 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 7d7387fbdecd..7a16177a12a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 10 Gigabit PCI Express Linux driver 3# Intel 10 Gigabit PCI Express Linux driver
4# Copyright(c) 1999 - 2010 Intel Corporation. 4# Copyright(c) 1999 - 2012 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 258164d6d45a..e6aeb64105a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index ef2afefb0cd4..b406c367b190 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 772072147bea..4e59083a3de2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index a3aa6333073f..383b9413292e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 863f9c1f145b..2c834c46bba1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -75,7 +75,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
75s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 75s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
76s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 76s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
77s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 77s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
78s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); 78s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
79s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); 79s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
80 80
81s32 ixgbe_validate_mac_addr(u8 *mac_addr); 81s32 ixgbe_validate_mac_addr(u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 318caf4bf623..8bfaaee5ac5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index e162775064da..24333b718166 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index fcd0e479721f..d3695edfcb8b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 2f318935561a..ba835708fcac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 32cd97bc794d..888a419dc3d9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index a59d5dc59d04..4dec47faeb00 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index da31735311f1..79a92fe987b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -112,6 +112,8 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
112static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) 112static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
113{ 113{
114 u8 err = 0; 114 u8 err = 0;
115 u8 prio_tc[MAX_USER_PRIORITY] = {0};
116 int i;
115 struct ixgbe_adapter *adapter = netdev_priv(netdev); 117 struct ixgbe_adapter *adapter = netdev_priv(netdev);
116 118
117 /* Fail command if not in CEE mode */ 119 /* Fail command if not in CEE mode */
@@ -122,10 +124,15 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
122 if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 124 if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
123 return err; 125 return err;
124 126
125 if (state > 0) 127 if (state > 0) {
126 err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs); 128 err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
127 else 129 ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
130 } else {
128 err = ixgbe_setup_tc(netdev, 0); 131 err = ixgbe_setup_tc(netdev, 0);
132 }
133
134 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
135 netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
129 136
130 return err; 137 return err;
131} 138}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index da7e580f517a..a62975480e37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,7 +58,7 @@ struct ixgbe_stats {
58 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 58 sizeof(((struct rtnl_link_stats64 *)0)->m), \
59 offsetof(struct rtnl_link_stats64, m) 59 offsetof(struct rtnl_link_stats64, m)
60 60
61static struct ixgbe_stats ixgbe_gstrings_stats[] = { 61static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
62 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 62 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
63 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 63 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
64 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 64 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
@@ -120,19 +120,23 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
120#endif /* IXGBE_FCOE */ 120#endif /* IXGBE_FCOE */
121}; 121};
122 122
123#define IXGBE_QUEUE_STATS_LEN \ 123/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
124 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ 124 * we set the num_rx_queues to evaluate to num_tx_queues. This is
125 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 125 * used because we do not have a good way to get the max number of
126 * rx queues with CONFIG_RPS disabled.
127 */
128#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
129
130#define IXGBE_QUEUE_STATS_LEN ( \
131 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
126 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 132 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
127#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 133#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
128#define IXGBE_PB_STATS_LEN ( \ 134#define IXGBE_PB_STATS_LEN ( \
129 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ 135 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
130 IXGBE_FLAG_DCB_ENABLED) ? \ 136 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
131 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 137 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
132 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 138 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
133 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 139 / sizeof(u64))
134 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
135 / sizeof(u64) : 0)
136#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 140#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
137 IXGBE_PB_STATS_LEN + \ 141 IXGBE_PB_STATS_LEN + \
138 IXGBE_QUEUE_STATS_LEN) 142 IXGBE_QUEUE_STATS_LEN)
@@ -1078,8 +1082,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1078 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1082 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1079 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1083 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1080 } 1084 }
1081 for (j = 0; j < adapter->num_tx_queues; j++) { 1085 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1082 ring = adapter->tx_ring[j]; 1086 ring = adapter->tx_ring[j];
1087 if (!ring) {
1088 data[i] = 0;
1089 data[i+1] = 0;
1090 i += 2;
1091 continue;
1092 }
1093
1083 do { 1094 do {
1084 start = u64_stats_fetch_begin_bh(&ring->syncp); 1095 start = u64_stats_fetch_begin_bh(&ring->syncp);
1085 data[i] = ring->stats.packets; 1096 data[i] = ring->stats.packets;
@@ -1087,8 +1098,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1087 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1098 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1088 i += 2; 1099 i += 2;
1089 } 1100 }
1090 for (j = 0; j < adapter->num_rx_queues; j++) { 1101 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1091 ring = adapter->rx_ring[j]; 1102 ring = adapter->rx_ring[j];
1103 if (!ring) {
1104 data[i] = 0;
1105 data[i+1] = 0;
1106 i += 2;
1107 continue;
1108 }
1109
1092 do { 1110 do {
1093 start = u64_stats_fetch_begin_bh(&ring->syncp); 1111 start = u64_stats_fetch_begin_bh(&ring->syncp);
1094 data[i] = ring->stats.packets; 1112 data[i] = ring->stats.packets;
@@ -1096,22 +1114,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1096 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1114 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1097 i += 2; 1115 i += 2;
1098 } 1116 }
1099 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1117
1100 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1118 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1101 data[i++] = adapter->stats.pxontxc[j]; 1119 data[i++] = adapter->stats.pxontxc[j];
1102 data[i++] = adapter->stats.pxofftxc[j]; 1120 data[i++] = adapter->stats.pxofftxc[j];
1103 } 1121 }
1104 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { 1122 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1105 data[i++] = adapter->stats.pxonrxc[j]; 1123 data[i++] = adapter->stats.pxonrxc[j];
1106 data[i++] = adapter->stats.pxoffrxc[j]; 1124 data[i++] = adapter->stats.pxoffrxc[j];
1107 }
1108 } 1125 }
1109} 1126}
1110 1127
1111static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1128static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1112 u8 *data) 1129 u8 *data)
1113{ 1130{
1114 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1115 char *p = (char *)data; 1131 char *p = (char *)data;
1116 int i; 1132 int i;
1117 1133
@@ -1126,31 +1142,29 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1126 ETH_GSTRING_LEN); 1142 ETH_GSTRING_LEN);
1127 p += ETH_GSTRING_LEN; 1143 p += ETH_GSTRING_LEN;
1128 } 1144 }
1129 for (i = 0; i < adapter->num_tx_queues; i++) { 1145 for (i = 0; i < netdev->num_tx_queues; i++) {
1130 sprintf(p, "tx_queue_%u_packets", i); 1146 sprintf(p, "tx_queue_%u_packets", i);
1131 p += ETH_GSTRING_LEN; 1147 p += ETH_GSTRING_LEN;
1132 sprintf(p, "tx_queue_%u_bytes", i); 1148 sprintf(p, "tx_queue_%u_bytes", i);
1133 p += ETH_GSTRING_LEN; 1149 p += ETH_GSTRING_LEN;
1134 } 1150 }
1135 for (i = 0; i < adapter->num_rx_queues; i++) { 1151 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1136 sprintf(p, "rx_queue_%u_packets", i); 1152 sprintf(p, "rx_queue_%u_packets", i);
1137 p += ETH_GSTRING_LEN; 1153 p += ETH_GSTRING_LEN;
1138 sprintf(p, "rx_queue_%u_bytes", i); 1154 sprintf(p, "rx_queue_%u_bytes", i);
1139 p += ETH_GSTRING_LEN; 1155 p += ETH_GSTRING_LEN;
1140 } 1156 }
1141 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1157 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1142 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 1158 sprintf(p, "tx_pb_%u_pxon", i);
1143 sprintf(p, "tx_pb_%u_pxon", i); 1159 p += ETH_GSTRING_LEN;
1144 p += ETH_GSTRING_LEN; 1160 sprintf(p, "tx_pb_%u_pxoff", i);
1145 sprintf(p, "tx_pb_%u_pxoff", i); 1161 p += ETH_GSTRING_LEN;
1146 p += ETH_GSTRING_LEN; 1162 }
1147 } 1163 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1148 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { 1164 sprintf(p, "rx_pb_%u_pxon", i);
1149 sprintf(p, "rx_pb_%u_pxon", i); 1165 p += ETH_GSTRING_LEN;
1150 p += ETH_GSTRING_LEN; 1166 sprintf(p, "rx_pb_%u_pxoff", i);
1151 sprintf(p, "rx_pb_%u_pxoff", i); 1167 p += ETH_GSTRING_LEN;
1152 p += ETH_GSTRING_LEN;
1153 }
1154 } 1168 }
1155 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1169 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1156 break; 1170 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index d18d6157dd2c..4bc794249801 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 261fd62dda18..1dbed17c8107 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1ee5d0fbb905..3dc6cef58107 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -64,7 +64,7 @@ char ixgbe_default_device_descr[] =
64 __stringify(BUILD) "-k" 64 __stringify(BUILD) "-k"
65const char ixgbe_driver_version[] = DRV_VERSION; 65const char ixgbe_driver_version[] = DRV_VERSION;
66static const char ixgbe_copyright[] = 66static const char ixgbe_copyright[] =
67 "Copyright (c) 1999-2011 Intel Corporation."; 67 "Copyright (c) 1999-2012 Intel Corporation.";
68 68
69static const struct ixgbe_info *ixgbe_info_tbl[] = { 69static const struct ixgbe_info *ixgbe_info_tbl[] = {
70 [board_82598] = &ixgbe_82598_info, 70 [board_82598] = &ixgbe_82598_info,
@@ -2633,22 +2633,22 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2633 /* 2633 /*
2634 * we must limit the number of descriptors so that the 2634 * we must limit the number of descriptors so that the
2635 * total size of max desc * buf_len is not greater 2635 * total size of max desc * buf_len is not greater
2636 * than 65535 2636 * than 65536
2637 */ 2637 */
2638 if (ring_is_ps_enabled(ring)) { 2638 if (ring_is_ps_enabled(ring)) {
2639#if (MAX_SKB_FRAGS > 16) 2639#if (PAGE_SIZE < 8192)
2640 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2640 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2641#elif (MAX_SKB_FRAGS > 8) 2641#elif (PAGE_SIZE < 16384)
2642 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 2642 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2643#elif (MAX_SKB_FRAGS > 4) 2643#elif (PAGE_SIZE < 32768)
2644 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2644 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2645#else 2645#else
2646 rscctrl |= IXGBE_RSCCTL_MAXDESC_1; 2646 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2647#endif 2647#endif
2648 } else { 2648 } else {
2649 if (rx_buf_len < IXGBE_RXBUFFER_4K) 2649 if (rx_buf_len <= IXGBE_RXBUFFER_4K)
2650 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2650 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2651 else if (rx_buf_len < IXGBE_RXBUFFER_8K) 2651 else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
2652 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 2652 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2653 else 2653 else
2654 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2654 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
@@ -2830,7 +2830,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2830 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); 2830 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2831 2831
2832 vf_shift = adapter->num_vfs % 32; 2832 vf_shift = adapter->num_vfs % 32;
2833 reg_offset = (adapter->num_vfs > 32) ? 1 : 0; 2833 reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
2834 2834
2835 /* Enable only the PF's pool for Tx/Rx */ 2835 /* Enable only the PF's pool for Tx/Rx */
2836 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2836 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
@@ -4330,6 +4330,10 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4330 adapter->num_tx_queues = 1; 4330 adapter->num_tx_queues = 1;
4331 4331
4332done: 4332done:
4333 if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
4334 (adapter->netdev->reg_state == NETREG_UNREGISTERING))
4335 return 0;
4336
4333 /* Notify the stack of the (possibly) reduced queue counts. */ 4337 /* Notify the stack of the (possibly) reduced queue counts. */
4334 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 4338 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4335 return netif_set_real_num_rx_queues(adapter->netdev, 4339 return netif_set_real_num_rx_queues(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 3f725d48336d..1f3e32b576a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b239bdac38da..310bdd961075 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 7cf1e1f56c69..b91773551a38 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 197bdd13106a..cc18165b4c05 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index cf6812dd1436..b01ecb4d2bb1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -67,7 +67,8 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
67 vf_devfn = pdev->devfn + 0x80; 67 vf_devfn = pdev->devfn + 0x80;
68 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); 68 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
69 while (pvfdev) { 69 while (pvfdev) {
70 if (pvfdev->devfn == vf_devfn) 70 if (pvfdev->devfn == vf_devfn &&
71 (pvfdev->bus->number >= pdev->bus->number))
71 vfs_found++; 72 vfs_found++;
72 vf_devfn += 2; 73 vf_devfn += 2;
73 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, 74 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
@@ -646,6 +647,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
646 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); 647 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
647 retval = ixgbe_set_vf_macvlan(adapter, vf, index, 648 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
648 (unsigned char *)(&msgbuf[1])); 649 (unsigned char *)(&msgbuf[1]));
650 if (retval == -ENOSPC)
651 e_warn(drv, "VF %d has requested a MACVLAN filter "
652 "but there is no space for it\n", vf);
649 break; 653 break;
650 default: 654 default:
651 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 655 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index e8badab03359..2ab38d5fda92 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 775602ef90e5..9b95bef60970 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 8cc5eccfd651..f838a2be8cfb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 1f35d229e71a..4ce4c97ef5ad 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82599 Virtual Function driver 3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2010 Intel Corporation. 4# Copyright(c) 1999 - 2012 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 2eb89cb94a0d..947b5c830735 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c85700318147..2bfe0d1d7958 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 9075c1d61039..dfed420a1bf6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index bed411bada21..e51d552410ae 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -60,7 +60,7 @@ static const char ixgbevf_driver_string[] =
60#define DRV_VERSION "2.2.0-k" 60#define DRV_VERSION "2.2.0-k"
61const char ixgbevf_driver_version[] = DRV_VERSION; 61const char ixgbevf_driver_version[] = DRV_VERSION;
62static char ixgbevf_copyright[] = 62static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2010 Intel Corporation."; 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
64 64
65static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 65static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
66 [board_82599_vf] = &ixgbevf_82599_vf_info, 66 [board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -935,7 +935,11 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
935 if (msg & IXGBE_VT_MSGTYPE_NACK) 935 if (msg & IXGBE_VT_MSGTYPE_NACK)
936 pr_warn("Last Request of type %2.2x to PF Nacked\n", 936 pr_warn("Last Request of type %2.2x to PF Nacked\n",
937 msg & 0xFF); 937 msg & 0xFF);
938 goto out; 938 /*
939 * Restore the PFSTS bit in case someone is polling for a
940 * return message from the PF
941 */
942 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
939 } 943 }
940 944
941 /* 945 /*
@@ -945,7 +949,7 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
945 */ 949 */
946 if (got_ack) 950 if (got_ack)
947 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 951 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
948out: 952
949 return IRQ_HANDLED; 953 return IRQ_HANDLED;
950} 954}
951 955
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 13532d9ba72d..9c955900fe64 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 9d38a94a348a..cf9131c5c115 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 5e4d5e5cdf38..debd8c0e1f28 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d0138d7a31a1..74be7411242a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -283,6 +283,17 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
283 return ret_val; 283 return ret_val;
284} 284}
285 285
286static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
287 u32 *msg, u16 size)
288{
289 struct ixgbe_mbx_info *mbx = &hw->mbx;
290 u32 retmsg[IXGBE_VFMAILBOX_SIZE];
291 s32 retval = mbx->ops.write_posted(hw, msg, size);
292
293 if (!retval)
294 mbx->ops.read_posted(hw, retmsg, size);
295}
296
286/** 297/**
287 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses 298 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
288 * @hw: pointer to the HW structure 299 * @hw: pointer to the HW structure
@@ -294,7 +305,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
294 struct net_device *netdev) 305 struct net_device *netdev)
295{ 306{
296 struct netdev_hw_addr *ha; 307 struct netdev_hw_addr *ha;
297 struct ixgbe_mbx_info *mbx = &hw->mbx;
298 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 308 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
299 u16 *vector_list = (u16 *)&msgbuf[1]; 309 u16 *vector_list = (u16 *)&msgbuf[1];
300 u32 cnt, i; 310 u32 cnt, i;
@@ -321,7 +331,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
321 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); 331 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
322 } 332 }
323 333
324 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); 334 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
325 335
326 return 0; 336 return 0;
327} 337}
@@ -336,7 +346,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
336static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, 346static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
337 bool vlan_on) 347 bool vlan_on)
338{ 348{
339 struct ixgbe_mbx_info *mbx = &hw->mbx;
340 u32 msgbuf[2]; 349 u32 msgbuf[2];
341 350
342 msgbuf[0] = IXGBE_VF_SET_VLAN; 351 msgbuf[0] = IXGBE_VF_SET_VLAN;
@@ -344,7 +353,9 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
344 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 353 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
345 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 354 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
346 355
347 return mbx->ops.write_posted(hw, msgbuf, 2); 356 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
357
358 return 0;
348} 359}
349 360
350/** 361/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d556619a9212..25c951daee5d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index edb9bda55d55..33947ac595c0 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,20 +931,17 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static int skge_rx_setup(struct pci_dev *pdev, 934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct skge_element *e, 935 struct sk_buff *skb, unsigned int bufsize)
936 struct sk_buff *skb, unsigned int bufsize)
937{ 936{
938 struct skge_rx_desc *rd = e->desc; 937 struct skge_rx_desc *rd = e->desc;
939 dma_addr_t map; 938 u64 map;
940 939
941 map = pci_map_single(pdev, skb->data, bufsize, 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
942 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
943 if (pci_dma_mapping_error(pdev, map))
944 goto mapping_error;
945 942
946 rd->dma_lo = lower_32_bits(map); 943 rd->dma_lo = map;
947 rd->dma_hi = upper_32_bits(map); 944 rd->dma_hi = map >> 32;
948 e->skb = skb; 945 e->skb = skb;
949 rd->csum1_start = ETH_HLEN; 946 rd->csum1_start = ETH_HLEN;
950 rd->csum2_start = ETH_HLEN; 947 rd->csum2_start = ETH_HLEN;
@@ -956,13 +953,6 @@ static int skge_rx_setup(struct pci_dev *pdev,
956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
957 dma_unmap_addr_set(e, mapaddr, map); 954 dma_unmap_addr_set(e, mapaddr, map);
958 dma_unmap_len_set(e, maplen, bufsize); 955 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
960
961mapping_error:
962 if (net_ratelimit())
963 dev_warn(&pdev->dev, "%s: rx mapping error\n",
964 skb->dev->name);
965 return -EIO;
966} 956}
967 957
968/* Resume receiving using existing skb, 958/* Resume receiving using existing skb,
@@ -1024,11 +1014,7 @@ static int skge_rx_fill(struct net_device *dev)
1024 return -ENOMEM; 1014 return -ENOMEM;
1025 1015
1026 skb_reserve(skb, NET_IP_ALIGN); 1016 skb_reserve(skb, NET_IP_ALIGN);
1027 if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { 1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
1028 kfree_skb(skb);
1029 return -ENOMEM;
1030 }
1031
1032 } while ((e = e->next) != ring->start); 1018 } while ((e = e->next) != ring->start);
1033 1019
1034 ring->to_clean = ring->start; 1020 ring->to_clean = ring->start;
@@ -2743,7 +2729,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2743 struct skge_tx_desc *td; 2729 struct skge_tx_desc *td;
2744 int i; 2730 int i;
2745 u32 control, len; 2731 u32 control, len;
2746 dma_addr_t map; 2732 u64 map;
2747 2733
2748 if (skb_padto(skb, ETH_ZLEN)) 2734 if (skb_padto(skb, ETH_ZLEN))
2749 return NETDEV_TX_OK; 2735 return NETDEV_TX_OK;
@@ -2757,14 +2743,11 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2757 e->skb = skb; 2743 e->skb = skb;
2758 len = skb_headlen(skb); 2744 len = skb_headlen(skb);
2759 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2760 if (pci_dma_mapping_error(hw->pdev, map))
2761 goto mapping_error;
2762
2763 dma_unmap_addr_set(e, mapaddr, map); 2746 dma_unmap_addr_set(e, mapaddr, map);
2764 dma_unmap_len_set(e, maplen, len); 2747 dma_unmap_len_set(e, maplen, len);
2765 2748
2766 td->dma_lo = lower_32_bits(map); 2749 td->dma_lo = map;
2767 td->dma_hi = upper_32_bits(map); 2750 td->dma_hi = map >> 32;
2768 2751
2769 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2752 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2770 const int offset = skb_checksum_start_offset(skb); 2753 const int offset = skb_checksum_start_offset(skb);
@@ -2795,16 +2778,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2795 2778
2796 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2797 skb_frag_size(frag), DMA_TO_DEVICE); 2780 skb_frag_size(frag), DMA_TO_DEVICE);
2798 if (dma_mapping_error(&hw->pdev->dev, map))
2799 goto mapping_unwind;
2800 2781
2801 e = e->next; 2782 e = e->next;
2802 e->skb = skb; 2783 e->skb = skb;
2803 tf = e->desc; 2784 tf = e->desc;
2804 BUG_ON(tf->control & BMU_OWN); 2785 BUG_ON(tf->control & BMU_OWN);
2805 2786
2806 tf->dma_lo = lower_32_bits(map); 2787 tf->dma_lo = map;
2807 tf->dma_hi = upper_32_bits(map); 2788 tf->dma_hi = (u64) map >> 32;
2808 dma_unmap_addr_set(e, mapaddr, map); 2789 dma_unmap_addr_set(e, mapaddr, map);
2809 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2790 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2810 2791
@@ -2834,28 +2815,6 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2834 } 2815 }
2835 2816
2836 return NETDEV_TX_OK; 2817 return NETDEV_TX_OK;
2837
2838mapping_unwind:
2839 /* unroll any pages that were already mapped. */
2840 if (e != skge->tx_ring.to_use) {
2841 struct skge_element *u;
2842
2843 for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
2844 pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
2845 dma_unmap_len(u, maplen),
2846 PCI_DMA_TODEVICE);
2847 e = skge->tx_ring.to_use;
2848 }
2849 /* undo the mapping for the skb header */
2850 pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
2851 dma_unmap_len(e, maplen),
2852 PCI_DMA_TODEVICE);
2853mapping_error:
2854 /* mapping error causes error message and packet to be discarded. */
2855 if (net_ratelimit())
2856 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2857 dev_kfree_skb(skb);
2858 return NETDEV_TX_OK;
2859} 2818}
2860 2819
2861 2820
@@ -3099,17 +3058,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3099 if (!nskb) 3058 if (!nskb)
3100 goto resubmit; 3059 goto resubmit;
3101 3060
3102 if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
3103 dev_kfree_skb(nskb);
3104 goto resubmit;
3105 }
3106
3107 pci_unmap_single(skge->hw->pdev, 3061 pci_unmap_single(skge->hw->pdev,
3108 dma_unmap_addr(e, mapaddr), 3062 dma_unmap_addr(e, mapaddr),
3109 dma_unmap_len(e, maplen), 3063 dma_unmap_len(e, maplen),
3110 PCI_DMA_FROMDEVICE); 3064 PCI_DMA_FROMDEVICE);
3111 skb = e->skb; 3065 skb = e->skb;
3112 prefetch(skb->data); 3066 prefetch(skb->data);
3067 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3113 } 3068 }
3114 3069
3115 skb_put(skb, len); 3070 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 405e6ac3faf6..eaf09d4f02d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1616,12 +1616,12 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1616 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 1616 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1617 } 1617 }
1618 kfree(priv->mfunc.master.slave_state); 1618 kfree(priv->mfunc.master.slave_state);
1619 iounmap(priv->mfunc.comm);
1620 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1621 priv->mfunc.vhcr,
1622 priv->mfunc.vhcr_dma);
1623 priv->mfunc.vhcr = NULL;
1624 } 1619 }
1620
1621 iounmap(priv->mfunc.comm);
1622 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1623 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1624 priv->mfunc.vhcr = NULL;
1625} 1625}
1626 1626
1627void mlx4_cmd_cleanup(struct mlx4_dev *dev) 1627void mlx4_cmd_cleanup(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 467ae5824875..149e60da0a32 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -892,7 +892,8 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
892 892
893 for (i = 0; i < priv->rx_ring_num; i++) { 893 for (i = 0; i < priv->rx_ring_num; i++) {
894 if (priv->rx_ring[i].rx_info) 894 if (priv->rx_ring[i].rx_info)
895 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 895 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
896 priv->prof->rx_ring_size, priv->stride);
896 if (priv->rx_cq[i].buf) 897 if (priv->rx_cq[i].buf)
897 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 898 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
898 } 899 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 971d4b6b8dfe..d4ad8c226b51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -168,8 +168,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
168 return 0; 168 return 0;
169 169
170err: 170err:
171 while (i--) 171 while (i--) {
172 dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
173 pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
174 PCI_DMA_FROMDEVICE);
172 put_page(skb_frags[i].page); 175 put_page(skb_frags[i].page);
176 }
173 return -ENOMEM; 177 return -ENOMEM;
174} 178}
175 179
@@ -380,12 +384,12 @@ err_allocator:
380} 384}
381 385
382void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 386void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
383 struct mlx4_en_rx_ring *ring) 387 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
384{ 388{
385 struct mlx4_en_dev *mdev = priv->mdev; 389 struct mlx4_en_dev *mdev = priv->mdev;
386 390
387 mlx4_en_unmap_buffer(&ring->wqres.buf); 391 mlx4_en_unmap_buffer(&ring->wqres.buf);
388 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); 392 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
389 vfree(ring->rx_info); 393 vfree(ring->rx_info);
390 ring->rx_info = NULL; 394 ring->rx_info = NULL;
391} 395}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 55d7bd4e210a..8fa41f3082cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -815,8 +815,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
815 int err; 815 int err;
816 int i; 816 int i;
817 817
818 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, 818 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
819 mlx4_num_eq_uar(dev), GFP_KERNEL); 819 sizeof *priv->eq_table.uar_map,
820 GFP_KERNEL);
820 if (!priv->eq_table.uar_map) { 821 if (!priv->eq_table.uar_map) {
821 err = -ENOMEM; 822 err = -ENOMEM;
822 goto err_out_free; 823 goto err_out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 0785d9b2a265..ca574d850b39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -136,7 +136,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
136 u32 prot; 136 u32 prot;
137 int err; 137 int err;
138 138
139 s_steer = &mlx4_priv(dev)->steer[0]; 139 s_steer = &mlx4_priv(dev)->steer[port - 1];
140 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 140 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
141 if (!new_entry) 141 if (!new_entry)
142 return -ENOMEM; 142 return -ENOMEM;
@@ -220,7 +220,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
220 struct mlx4_promisc_qp *pqp; 220 struct mlx4_promisc_qp *pqp;
221 struct mlx4_promisc_qp *dqp; 221 struct mlx4_promisc_qp *dqp;
222 222
223 s_steer = &mlx4_priv(dev)->steer[0]; 223 s_steer = &mlx4_priv(dev)->steer[port - 1];
224 224
225 pqp = get_promisc_qp(dev, 0, steer, qpn); 225 pqp = get_promisc_qp(dev, 0, steer, qpn);
226 if (!pqp) 226 if (!pqp)
@@ -265,7 +265,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
265 struct mlx4_steer_index *tmp_entry, *entry = NULL; 265 struct mlx4_steer_index *tmp_entry, *entry = NULL;
266 struct mlx4_promisc_qp *dqp, *tmp_dqp; 266 struct mlx4_promisc_qp *dqp, *tmp_dqp;
267 267
268 s_steer = &mlx4_priv(dev)->steer[0]; 268 s_steer = &mlx4_priv(dev)->steer[port - 1];
269 269
270 /* if qp is not promisc, it cannot be duplicated */ 270 /* if qp is not promisc, it cannot be duplicated */
271 if (!get_promisc_qp(dev, 0, steer, qpn)) 271 if (!get_promisc_qp(dev, 0, steer, qpn))
@@ -306,7 +306,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
306 bool ret = false; 306 bool ret = false;
307 int i; 307 int i;
308 308
309 s_steer = &mlx4_priv(dev)->steer[0]; 309 s_steer = &mlx4_priv(dev)->steer[port - 1];
310 310
311 mailbox = mlx4_alloc_cmd_mailbox(dev); 311 mailbox = mlx4_alloc_cmd_mailbox(dev);
312 if (IS_ERR(mailbox)) 312 if (IS_ERR(mailbox))
@@ -361,7 +361,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
361 int err; 361 int err;
362 struct mlx4_priv *priv = mlx4_priv(dev); 362 struct mlx4_priv *priv = mlx4_priv(dev);
363 363
364 s_steer = &mlx4_priv(dev)->steer[0]; 364 s_steer = &mlx4_priv(dev)->steer[port - 1];
365 365
366 mutex_lock(&priv->mcg_table.mutex); 366 mutex_lock(&priv->mcg_table.mutex);
367 367
@@ -466,7 +466,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
466 int loc, i; 466 int loc, i;
467 int err; 467 int err;
468 468
469 s_steer = &mlx4_priv(dev)->steer[0]; 469 s_steer = &mlx4_priv(dev)->steer[port - 1];
470 mutex_lock(&priv->mcg_table.mutex); 470 mutex_lock(&priv->mcg_table.mutex);
471 471
472 pqp = get_promisc_qp(dev, 0, steer, qpn); 472 pqp = get_promisc_qp(dev, 0, steer, qpn);
@@ -1004,7 +1004,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1004 1004
1005int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1005int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1006{ 1006{
1007 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) 1007 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1008 return 0; 1008 return 0;
1009 1009
1010 if (mlx4_is_mfunc(dev)) 1010 if (mlx4_is_mfunc(dev))
@@ -1016,7 +1016,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1016 1016
1017int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1017int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1018{ 1018{
1019 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) 1019 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1020 return 0; 1020 return 0;
1021 1021
1022 if (mlx4_is_mfunc(dev)) 1022 if (mlx4_is_mfunc(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35f08840813c..d60335f3c473 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -528,7 +528,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
528 struct mlx4_en_rx_ring *ring, 528 struct mlx4_en_rx_ring *ring,
529 u32 size, u16 stride); 529 u32 size, u16 stride);
530void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 530void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
531 struct mlx4_en_rx_ring *ring); 531 struct mlx4_en_rx_ring *ring,
532 u32 size, u16 stride);
532int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); 533int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
533void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 534void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
534 struct mlx4_en_rx_ring *ring); 535 struct mlx4_en_rx_ring *ring);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dcd819bfb2f0..bfdb7af19e49 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -73,6 +73,7 @@ struct res_gid {
73 struct list_head list; 73 struct list_head list;
74 u8 gid[16]; 74 u8 gid[16];
75 enum mlx4_protocol prot; 75 enum mlx4_protocol prot;
76 enum mlx4_steer_type steer;
76}; 77};
77 78
78enum res_qp_states { 79enum res_qp_states {
@@ -374,6 +375,7 @@ static struct res_common *alloc_qp_tr(int id)
374 375
375 ret->com.res_id = id; 376 ret->com.res_id = id;
376 ret->com.state = RES_QP_RESERVED; 377 ret->com.state = RES_QP_RESERVED;
378 ret->local_qpn = id;
377 INIT_LIST_HEAD(&ret->mcg_list); 379 INIT_LIST_HEAD(&ret->mcg_list);
378 spin_lock_init(&ret->mcg_spl); 380 spin_lock_init(&ret->mcg_spl);
379 381
@@ -2479,7 +2481,8 @@ static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2479} 2481}
2480 2482
2481static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 2483static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2482 u8 *gid, enum mlx4_protocol prot) 2484 u8 *gid, enum mlx4_protocol prot,
2485 enum mlx4_steer_type steer)
2483{ 2486{
2484 struct res_gid *res; 2487 struct res_gid *res;
2485 int err; 2488 int err;
@@ -2495,6 +2498,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2495 } else { 2498 } else {
2496 memcpy(res->gid, gid, 16); 2499 memcpy(res->gid, gid, 16);
2497 res->prot = prot; 2500 res->prot = prot;
2501 res->steer = steer;
2498 list_add_tail(&res->list, &rqp->mcg_list); 2502 list_add_tail(&res->list, &rqp->mcg_list);
2499 err = 0; 2503 err = 0;
2500 } 2504 }
@@ -2504,14 +2508,15 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2504} 2508}
2505 2509
2506static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 2510static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2507 u8 *gid, enum mlx4_protocol prot) 2511 u8 *gid, enum mlx4_protocol prot,
2512 enum mlx4_steer_type steer)
2508{ 2513{
2509 struct res_gid *res; 2514 struct res_gid *res;
2510 int err; 2515 int err;
2511 2516
2512 spin_lock_irq(&rqp->mcg_spl); 2517 spin_lock_irq(&rqp->mcg_spl);
2513 res = find_gid(dev, slave, rqp, gid); 2518 res = find_gid(dev, slave, rqp, gid);
2514 if (!res || res->prot != prot) 2519 if (!res || res->prot != prot || res->steer != steer)
2515 err = -EINVAL; 2520 err = -EINVAL;
2516 else { 2521 else {
2517 list_del(&res->list); 2522 list_del(&res->list);
@@ -2538,7 +2543,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2538 int attach = vhcr->op_modifier; 2543 int attach = vhcr->op_modifier;
2539 int block_loopback = vhcr->in_modifier >> 31; 2544 int block_loopback = vhcr->in_modifier >> 31;
2540 u8 steer_type_mask = 2; 2545 u8 steer_type_mask = 2;
2541 enum mlx4_steer_type type = gid[7] & steer_type_mask; 2546 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2542 2547
2543 qpn = vhcr->in_modifier & 0xffffff; 2548 qpn = vhcr->in_modifier & 0xffffff;
2544 err = get_res(dev, slave, qpn, RES_QP, &rqp); 2549 err = get_res(dev, slave, qpn, RES_QP, &rqp);
@@ -2547,7 +2552,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2547 2552
2548 qp.qpn = qpn; 2553 qp.qpn = qpn;
2549 if (attach) { 2554 if (attach) {
2550 err = add_mcg_res(dev, slave, rqp, gid, prot); 2555 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2551 if (err) 2556 if (err)
2552 goto ex_put; 2557 goto ex_put;
2553 2558
@@ -2556,7 +2561,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2556 if (err) 2561 if (err)
2557 goto ex_rem; 2562 goto ex_rem;
2558 } else { 2563 } else {
2559 err = rem_mcg_res(dev, slave, rqp, gid, prot); 2564 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2560 if (err) 2565 if (err)
2561 goto ex_put; 2566 goto ex_put;
2562 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type); 2567 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
@@ -2567,7 +2572,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2567 2572
2568ex_rem: 2573ex_rem:
2569 /* ignore error return below, already in error */ 2574 /* ignore error return below, already in error */
2570 err1 = rem_mcg_res(dev, slave, rqp, gid, prot); 2575 err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2571ex_put: 2576ex_put:
2572 put_res(dev, slave, qpn, RES_QP); 2577 put_res(dev, slave, qpn, RES_QP);
2573 2578
@@ -2606,7 +2611,7 @@ static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2606 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 2611 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2607 qp.qpn = rqp->local_qpn; 2612 qp.qpn = rqp->local_qpn;
2608 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, 2613 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2609 MLX4_MC_STEER); 2614 rgid->steer);
2610 list_del(&rgid->list); 2615 list_del(&rgid->list);
2611 kfree(rgid); 2616 kfree(rgid);
2612 } 2617 }
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 1ea811cf515b..fe42fc00d8d3 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,7 +42,6 @@ config KS8851
42 select NET_CORE 42 select NET_CORE
43 select MII 43 select MII
44 select CRC32 44 select CRC32
45 select MISC_DEVICES
46 select EEPROM_93CX6 45 select EEPROM_93CX6
47 ---help--- 46 ---help---
48 SPI driver for Micrel KS8851 SPI attached network chip. 47 SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 6b35e7da9a9c..0c3e4005224d 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -583,7 +583,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
583 ks8851_dbg_dumpkkt(ks, rxpkt); 583 ks8851_dbg_dumpkkt(ks, rxpkt);
584 584
585 skb->protocol = eth_type_trans(skb, ks->netdev); 585 skb->protocol = eth_type_trans(skb, ks->netdev);
586 netif_rx(skb); 586 netif_rx_ni(skb);
587 587
588 ks->netdev->stats.rx_packets++; 588 ks->netdev->stats.rx_packets++;
589 ks->netdev->stats.rx_bytes += rxlen; 589 ks->netdev->stats.rx_bytes += rxlen;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index e58e78e5c930..231176fcd2ba 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -394,7 +394,6 @@ union ks_tx_hdr {
394 * @msg_enable : The message flags controlling driver output (see ethtool). 394 * @msg_enable : The message flags controlling driver output (see ethtool).
395 * @frame_cnt : number of frames received. 395 * @frame_cnt : number of frames received.
396 * @bus_width : i/o bus width. 396 * @bus_width : i/o bus width.
397 * @irq : irq number assigned to this device.
398 * @rc_rxqcr : Cached copy of KS_RXQCR. 397 * @rc_rxqcr : Cached copy of KS_RXQCR.
399 * @rc_txcr : Cached copy of KS_TXCR. 398 * @rc_txcr : Cached copy of KS_TXCR.
400 * @rc_ier : Cached copy of KS_IER. 399 * @rc_ier : Cached copy of KS_IER.
@@ -441,7 +440,6 @@ struct ks_net {
441 u32 msg_enable; 440 u32 msg_enable;
442 u32 frame_cnt; 441 u32 frame_cnt;
443 int bus_width; 442 int bus_width;
444 int irq;
445 443
446 u16 rc_rxqcr; 444 u16 rc_rxqcr;
447 u16 rc_txcr; 445 u16 rc_txcr;
@@ -907,10 +905,10 @@ static int ks_net_open(struct net_device *netdev)
907 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__); 905 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
908 906
909 /* reset the HW */ 907 /* reset the HW */
910 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); 908 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
911 909
912 if (err) { 910 if (err) {
913 pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err); 911 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
914 return err; 912 return err;
915 } 913 }
916 914
@@ -955,7 +953,7 @@ static int ks_net_stop(struct net_device *netdev)
955 953
956 /* set powermode to soft power down to save power */ 954 /* set powermode to soft power down to save power */
957 ks_set_powermode(ks, PMECR_PM_SOFTDOWN); 955 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
958 free_irq(ks->irq, netdev); 956 free_irq(netdev->irq, netdev);
959 mutex_unlock(&ks->lock); 957 mutex_unlock(&ks->lock);
960 return 0; 958 return 0;
961} 959}
@@ -1545,10 +1543,10 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1545 if (!ks->hw_addr_cmd) 1543 if (!ks->hw_addr_cmd)
1546 goto err_ioremap1; 1544 goto err_ioremap1;
1547 1545
1548 ks->irq = platform_get_irq(pdev, 0); 1546 netdev->irq = platform_get_irq(pdev, 0);
1549 1547
1550 if (ks->irq < 0) { 1548 if (netdev->irq < 0) {
1551 err = ks->irq; 1549 err = netdev->irq;
1552 goto err_get_irq; 1550 goto err_get_irq;
1553 } 1551 }
1554 1552
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 212f43b308a3..cd827ff4a021 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -670,7 +670,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
670static int octeon_mgmt_init_phy(struct net_device *netdev) 670static int octeon_mgmt_init_phy(struct net_device *netdev)
671{ 671{
672 struct octeon_mgmt *p = netdev_priv(netdev); 672 struct octeon_mgmt *p = netdev_priv(netdev);
673 char phy_id[20]; 673 char phy_id[MII_BUS_ID_SIZE + 3];
674 674
675 if (octeon_is_simulation()) { 675 if (octeon_is_simulation()) {
676 /* No PHYs in the simulator. */ 676 /* No PHYs in the simulator. */
@@ -678,7 +678,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
678 return 0; 678 return 0;
679 } 679 }
680 680
681 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); 681 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port);
682 682
683 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, 683 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
684 PHY_INTERFACE_MODE_MII); 684 PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 813d41c4a845..87b650131774 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -38,6 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/ethtool.h> 40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
41#include <linux/sh_eth.h> 42#include <linux/sh_eth.h>
42 43
43#include "sh_eth.h" 44#include "sh_eth.h"
@@ -817,7 +818,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
817 sh_eth_write(ndev, 0, TRIMD); 818 sh_eth_write(ndev, 0, TRIMD);
818 819
819 /* Recv frame limit set register */ 820 /* Recv frame limit set register */
820 sh_eth_write(ndev, RFLR_VALUE, RFLR); 821 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
822 RFLR);
821 823
822 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 824 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
823 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 825 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 47877b13ffad..cdbd844662a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -575,9 +575,6 @@ enum RPADIR_BIT {
575 RPADIR_PADR = 0x0003f, 575 RPADIR_PADR = 0x0003f,
576}; 576};
577 577
578/* RFLR */
579#define RFLR_VALUE 0x1000
580
581/* FDR */ 578/* FDR */
582#define DEFAULT_FDR_INIT 0x00000707 579#define DEFAULT_FDR_INIT 0x00000707
583 580
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index d0b814ef0675..0319d640f728 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -67,6 +67,7 @@ struct stmmac_extra_stats {
67 unsigned long ipc_csum_error; 67 unsigned long ipc_csum_error;
68 unsigned long rx_collision; 68 unsigned long rx_collision;
69 unsigned long rx_crc; 69 unsigned long rx_crc;
70 unsigned long dribbling_bit;
70 unsigned long rx_length; 71 unsigned long rx_length;
71 unsigned long rx_mii; 72 unsigned long rx_mii;
72 unsigned long rx_multicast; 73 unsigned long rx_multicast;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index d87976364ec5..ad1b627f8ec2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,7 +201,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 201
202 if (unlikely(p->des01.erx.dribbling)) { 202 if (unlikely(p->des01.erx.dribbling)) {
203 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 203 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
204 ret = discard_frame; 204 x->dribbling_bit++;
205 } 205 }
206 if (unlikely(p->des01.erx.sa_filter_fail)) { 206 if (unlikely(p->des01.erx.sa_filter_fail)) {
207 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); 207 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index fda5d2b31d3a..25953bb45a73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -104,7 +104,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
104 ret = discard_frame; 104 ret = discard_frame;
105 } 105 }
106 if (unlikely(p->des01.rx.dribbling)) 106 if (unlikely(p->des01.rx.dribbling))
107 ret = discard_frame; 107 x->dribbling_bit++;
108 108
109 if (unlikely(p->des01.rx.length_error)) { 109 if (unlikely(p->des01.rx.length_error)) {
110 x->rx_length++; 110 x->rx_length++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 120740020e2c..b4b095fdcf29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,7 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define STMMAC_RESOURCE_NAME "stmmaceth" 23#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "Dec_2011" 24#define DRV_MODULE_VERSION "Feb_2012"
25#include <linux/stmmac.h> 25#include <linux/stmmac.h>
26#include <linux/phy.h> 26#include <linux/phy.h>
27#include "common.h" 27#include "common.h"
@@ -97,4 +97,5 @@ int stmmac_resume(struct net_device *ndev);
97int stmmac_suspend(struct net_device *ndev); 97int stmmac_suspend(struct net_device *ndev);
98int stmmac_dvr_remove(struct net_device *ndev); 98int stmmac_dvr_remove(struct net_device *ndev);
99struct stmmac_priv *stmmac_dvr_probe(struct device *device, 99struct stmmac_priv *stmmac_dvr_probe(struct device *device,
100 struct plat_stmmacenet_data *plat_dat); 100 struct plat_stmmacenet_data *plat_dat,
101 void __iomem *addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9573303a706b..f98e1511660f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -47,23 +47,25 @@ struct stmmac_stats {
47 offsetof(struct stmmac_priv, xstats.m)} 47 offsetof(struct stmmac_priv, xstats.m)}
48 48
49static const struct stmmac_stats stmmac_gstrings_stats[] = { 49static const struct stmmac_stats stmmac_gstrings_stats[] = {
50 /* Transmit errors */
50 STMMAC_STAT(tx_underflow), 51 STMMAC_STAT(tx_underflow),
51 STMMAC_STAT(tx_carrier), 52 STMMAC_STAT(tx_carrier),
52 STMMAC_STAT(tx_losscarrier), 53 STMMAC_STAT(tx_losscarrier),
53 STMMAC_STAT(vlan_tag), 54 STMMAC_STAT(vlan_tag),
54 STMMAC_STAT(tx_deferred), 55 STMMAC_STAT(tx_deferred),
55 STMMAC_STAT(tx_vlan), 56 STMMAC_STAT(tx_vlan),
56 STMMAC_STAT(rx_vlan),
57 STMMAC_STAT(tx_jabber), 57 STMMAC_STAT(tx_jabber),
58 STMMAC_STAT(tx_frame_flushed), 58 STMMAC_STAT(tx_frame_flushed),
59 STMMAC_STAT(tx_payload_error), 59 STMMAC_STAT(tx_payload_error),
60 STMMAC_STAT(tx_ip_header_error), 60 STMMAC_STAT(tx_ip_header_error),
61 /* Receive errors */
61 STMMAC_STAT(rx_desc), 62 STMMAC_STAT(rx_desc),
62 STMMAC_STAT(sa_filter_fail), 63 STMMAC_STAT(sa_filter_fail),
63 STMMAC_STAT(overflow_error), 64 STMMAC_STAT(overflow_error),
64 STMMAC_STAT(ipc_csum_error), 65 STMMAC_STAT(ipc_csum_error),
65 STMMAC_STAT(rx_collision), 66 STMMAC_STAT(rx_collision),
66 STMMAC_STAT(rx_crc), 67 STMMAC_STAT(rx_crc),
68 STMMAC_STAT(dribbling_bit),
67 STMMAC_STAT(rx_length), 69 STMMAC_STAT(rx_length),
68 STMMAC_STAT(rx_mii), 70 STMMAC_STAT(rx_mii),
69 STMMAC_STAT(rx_multicast), 71 STMMAC_STAT(rx_multicast),
@@ -73,6 +75,8 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
73 STMMAC_STAT(sa_rx_filter_fail), 75 STMMAC_STAT(sa_rx_filter_fail),
74 STMMAC_STAT(rx_missed_cntr), 76 STMMAC_STAT(rx_missed_cntr),
75 STMMAC_STAT(rx_overflow_cntr), 77 STMMAC_STAT(rx_overflow_cntr),
78 STMMAC_STAT(rx_vlan),
79 /* Tx/Rx IRQ errors */
76 STMMAC_STAT(tx_undeflow_irq), 80 STMMAC_STAT(tx_undeflow_irq),
77 STMMAC_STAT(tx_process_stopped_irq), 81 STMMAC_STAT(tx_process_stopped_irq),
78 STMMAC_STAT(tx_jabber_irq), 82 STMMAC_STAT(tx_jabber_irq),
@@ -82,6 +86,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
82 STMMAC_STAT(rx_watchdog_irq), 86 STMMAC_STAT(rx_watchdog_irq),
83 STMMAC_STAT(tx_early_irq), 87 STMMAC_STAT(tx_early_irq),
84 STMMAC_STAT(fatal_bus_error_irq), 88 STMMAC_STAT(fatal_bus_error_irq),
89 /* Extra info */
85 STMMAC_STAT(threshold), 90 STMMAC_STAT(threshold),
86 STMMAC_STAT(tx_pkt_n), 91 STMMAC_STAT(tx_pkt_n),
87 STMMAC_STAT(rx_pkt_n), 92 STMMAC_STAT(rx_pkt_n),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 96fa2da30763..6ee593a55a64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -241,7 +241,7 @@ static void stmmac_adjust_link(struct net_device *dev)
241 case 1000: 241 case 1000:
242 if (likely(priv->plat->has_gmac)) 242 if (likely(priv->plat->has_gmac))
243 ctrl &= ~priv->hw->link.port; 243 ctrl &= ~priv->hw->link.port;
244 stmmac_hw_fix_mac_speed(priv); 244 stmmac_hw_fix_mac_speed(priv);
245 break; 245 break;
246 case 100: 246 case 100:
247 case 10: 247 case 10:
@@ -785,7 +785,7 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
785 u32 uid = ((hwid & 0x0000ff00) >> 8); 785 u32 uid = ((hwid & 0x0000ff00) >> 8);
786 u32 synid = (hwid & 0x000000ff); 786 u32 synid = (hwid & 0x000000ff);
787 787
788 pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", 788 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
789 uid, synid); 789 uid, synid);
790 790
791 return synid; 791 return synid;
@@ -869,38 +869,6 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
869 return hw_cap; 869 return hw_cap;
870} 870}
871 871
872/**
873 * stmmac_mac_device_setup
874 * @dev : device pointer
875 * Description: this is to attach the GMAC or MAC 10/100
876 * main core structures that will be completed during the
877 * open step.
878 */
879static int stmmac_mac_device_setup(struct net_device *dev)
880{
881 struct stmmac_priv *priv = netdev_priv(dev);
882
883 struct mac_device_info *device;
884
885 if (priv->plat->has_gmac)
886 device = dwmac1000_setup(priv->ioaddr);
887 else
888 device = dwmac100_setup(priv->ioaddr);
889
890 if (!device)
891 return -ENOMEM;
892
893 priv->hw = device;
894 priv->hw->ring = &ring_mode_ops;
895
896 if (device_can_wakeup(priv->device)) {
897 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
898 enable_irq_wake(priv->wol_irq);
899 }
900
901 return 0;
902}
903
904static void stmmac_check_ether_addr(struct stmmac_priv *priv) 872static void stmmac_check_ether_addr(struct stmmac_priv *priv)
905{ 873{
906 /* verify if the MAC address is valid, in case of failures it 874 /* verify if the MAC address is valid, in case of failures it
@@ -930,20 +898,8 @@ static int stmmac_open(struct net_device *dev)
930 struct stmmac_priv *priv = netdev_priv(dev); 898 struct stmmac_priv *priv = netdev_priv(dev);
931 int ret; 899 int ret;
932 900
933 /* MAC HW device setup */
934 ret = stmmac_mac_device_setup(dev);
935 if (ret < 0)
936 return ret;
937
938 stmmac_check_ether_addr(priv); 901 stmmac_check_ether_addr(priv);
939 902
940 stmmac_verify_args();
941
942 /* Override with kernel parameters if supplied XXX CRS XXX
943 * this needs to have multiple instances */
944 if ((phyaddr >= 0) && (phyaddr <= 31))
945 priv->plat->phy_addr = phyaddr;
946
947 /* MDIO bus Registration */ 903 /* MDIO bus Registration */
948 ret = stmmac_mdio_register(dev); 904 ret = stmmac_mdio_register(dev);
949 if (ret < 0) { 905 if (ret < 0) {
@@ -976,44 +932,6 @@ static int stmmac_open(struct net_device *dev)
976 goto open_error; 932 goto open_error;
977 } 933 }
978 934
979 stmmac_get_synopsys_id(priv);
980
981 priv->hw_cap_support = stmmac_get_hw_features(priv);
982
983 if (priv->hw_cap_support) {
984 pr_info(" Support DMA HW capability register");
985
986 /* We can override some gmac/dma configuration fields: e.g.
987 * enh_desc, tx_coe (e.g. that are passed through the
988 * platform) with the values from the HW capability
989 * register (if supported).
990 */
991 priv->plat->enh_desc = priv->dma_cap.enh_desc;
992 priv->plat->tx_coe = priv->dma_cap.tx_coe;
993 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
994
995 /* By default disable wol on magic frame if not supported */
996 if (!priv->dma_cap.pmt_magic_frame)
997 priv->wolopts &= ~WAKE_MAGIC;
998
999 } else
1000 pr_info(" No HW DMA feature register supported");
1001
1002 /* Select the enhnaced/normal descriptor structures */
1003 stmmac_selec_desc_mode(priv);
1004
1005 /* PMT module is not integrated in all the MAC devices. */
1006 if (priv->plat->pmt) {
1007 pr_info(" Remote wake-up capable\n");
1008 device_set_wakeup_capable(priv->device, 1);
1009 }
1010
1011 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
1012 if (priv->rx_coe)
1013 pr_info(" Checksum Offload Engine supported\n");
1014 if (priv->plat->tx_coe)
1015 pr_info(" Checksum insertion supported\n");
1016
1017 /* Create and initialize the TX/RX descriptors chains. */ 935 /* Create and initialize the TX/RX descriptors chains. */
1018 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 936 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1019 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 937 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -1030,14 +948,14 @@ static int stmmac_open(struct net_device *dev)
1030 948
1031 /* Copy the MAC addr into the HW */ 949 /* Copy the MAC addr into the HW */
1032 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 950 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
951
1033 /* If required, perform hw setup of the bus. */ 952 /* If required, perform hw setup of the bus. */
1034 if (priv->plat->bus_setup) 953 if (priv->plat->bus_setup)
1035 priv->plat->bus_setup(priv->ioaddr); 954 priv->plat->bus_setup(priv->ioaddr);
955
1036 /* Initialize the MAC Core */ 956 /* Initialize the MAC Core */
1037 priv->hw->mac->core_init(priv->ioaddr); 957 priv->hw->mac->core_init(priv->ioaddr);
1038 958
1039 netdev_update_features(dev);
1040
1041 /* Request the IRQ lines */ 959 /* Request the IRQ lines */
1042 ret = request_irq(dev->irq, stmmac_interrupt, 960 ret = request_irq(dev->irq, stmmac_interrupt,
1043 IRQF_SHARED, dev->name, dev); 961 IRQF_SHARED, dev->name, dev);
@@ -1047,6 +965,17 @@ static int stmmac_open(struct net_device *dev)
1047 goto open_error; 965 goto open_error;
1048 } 966 }
1049 967
968 /* Request the Wake IRQ in case of another line is used for WoL */
969 if (priv->wol_irq != dev->irq) {
970 ret = request_irq(priv->wol_irq, stmmac_interrupt,
971 IRQF_SHARED, dev->name, dev);
972 if (unlikely(ret < 0)) {
973 pr_err("%s: ERROR: allocating the ext WoL IRQ %d "
974 "(error: %d)\n", __func__, priv->wol_irq, ret);
975 goto open_error_wolirq;
976 }
977 }
978
1050 /* Enable the MAC Rx/Tx */ 979 /* Enable the MAC Rx/Tx */
1051 stmmac_set_mac(priv->ioaddr, true); 980 stmmac_set_mac(priv->ioaddr, true);
1052 981
@@ -1062,7 +991,7 @@ static int stmmac_open(struct net_device *dev)
1062#ifdef CONFIG_STMMAC_DEBUG_FS 991#ifdef CONFIG_STMMAC_DEBUG_FS
1063 ret = stmmac_init_fs(dev); 992 ret = stmmac_init_fs(dev);
1064 if (ret < 0) 993 if (ret < 0)
1065 pr_warning("\tFailed debugFS registration"); 994 pr_warning("%s: failed debugFS registration\n", __func__);
1066#endif 995#endif
1067 /* Start the ball rolling... */ 996 /* Start the ball rolling... */
1068 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 997 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1072,6 +1001,7 @@ static int stmmac_open(struct net_device *dev)
1072#ifdef CONFIG_STMMAC_TIMER 1001#ifdef CONFIG_STMMAC_TIMER
1073 priv->tm->timer_start(tmrate); 1002 priv->tm->timer_start(tmrate);
1074#endif 1003#endif
1004
1075 /* Dump DMA/MAC registers */ 1005 /* Dump DMA/MAC registers */
1076 if (netif_msg_hw(priv)) { 1006 if (netif_msg_hw(priv)) {
1077 priv->hw->mac->dump_regs(priv->ioaddr); 1007 priv->hw->mac->dump_regs(priv->ioaddr);
@@ -1087,6 +1017,9 @@ static int stmmac_open(struct net_device *dev)
1087 1017
1088 return 0; 1018 return 0;
1089 1019
1020open_error_wolirq:
1021 free_irq(dev->irq, dev);
1022
1090open_error: 1023open_error:
1091#ifdef CONFIG_STMMAC_TIMER 1024#ifdef CONFIG_STMMAC_TIMER
1092 kfree(priv->tm); 1025 kfree(priv->tm);
@@ -1127,6 +1060,8 @@ static int stmmac_release(struct net_device *dev)
1127 1060
1128 /* Free the IRQ lines */ 1061 /* Free the IRQ lines */
1129 free_irq(dev->irq, dev); 1062 free_irq(dev->irq, dev);
1063 if (priv->wol_irq != dev->irq)
1064 free_irq(priv->wol_irq, dev);
1130 1065
1131 /* Stop TX/RX DMA and clear the descriptors */ 1066 /* Stop TX/RX DMA and clear the descriptors */
1132 priv->hw->dma->stop_tx(priv->ioaddr); 1067 priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1789,13 +1724,77 @@ static const struct net_device_ops stmmac_netdev_ops = {
1789}; 1724};
1790 1725
1791/** 1726/**
1727 * stmmac_hw_init - Init the MAC device
1728 * @priv : pointer to the private device structure.
1729 * Description: this function detects which MAC device
1730 * (GMAC/MAC10-100) has to attached, checks the HW capability
1731 * (if supported) and sets the driver's features (for example
1732 * to use the ring or chaine mode or support the normal/enh
1733 * descriptor structure).
1734 */
1735static int stmmac_hw_init(struct stmmac_priv *priv)
1736{
1737 int ret = 0;
1738 struct mac_device_info *mac;
1739
1740 /* Identify the MAC HW device */
1741 if (priv->plat->has_gmac)
1742 mac = dwmac1000_setup(priv->ioaddr);
1743 else
1744 mac = dwmac100_setup(priv->ioaddr);
1745 if (!mac)
1746 return -ENOMEM;
1747
1748 priv->hw = mac;
1749
1750 /* To use the chained or ring mode */
1751 priv->hw->ring = &ring_mode_ops;
1752
1753 /* Get and dump the chip ID */
1754 stmmac_get_synopsys_id(priv);
1755
1756 /* Get the HW capability (new GMAC newer than 3.50a) */
1757 priv->hw_cap_support = stmmac_get_hw_features(priv);
1758 if (priv->hw_cap_support) {
1759 pr_info(" DMA HW capability register supported");
1760
1761 /* We can override some gmac/dma configuration fields: e.g.
1762 * enh_desc, tx_coe (e.g. that are passed through the
1763 * platform) with the values from the HW capability
1764 * register (if supported).
1765 */
1766 priv->plat->enh_desc = priv->dma_cap.enh_desc;
1767 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1768 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
1769 } else
1770 pr_info(" No HW DMA feature register supported");
1771
1772 /* Select the enhnaced/normal descriptor structures */
1773 stmmac_selec_desc_mode(priv);
1774
1775 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
1776 if (priv->rx_coe)
1777 pr_info(" RX Checksum Offload Engine supported\n");
1778 if (priv->plat->tx_coe)
1779 pr_info(" TX Checksum insertion supported\n");
1780
1781 if (priv->plat->pmt) {
1782 pr_info(" Wake-Up On Lan supported\n");
1783 device_set_wakeup_capable(priv->device, 1);
1784 }
1785
1786 return ret;
1787}
1788
1789/**
1792 * stmmac_dvr_probe 1790 * stmmac_dvr_probe
1793 * @device: device pointer 1791 * @device: device pointer
1794 * Description: this is the main probe function used to 1792 * Description: this is the main probe function used to
1795 * call the alloc_etherdev, allocate the priv structure. 1793 * call the alloc_etherdev, allocate the priv structure.
1796 */ 1794 */
1797struct stmmac_priv *stmmac_dvr_probe(struct device *device, 1795struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1798 struct plat_stmmacenet_data *plat_dat) 1796 struct plat_stmmacenet_data *plat_dat,
1797 void __iomem *addr)
1799{ 1798{
1800 int ret = 0; 1799 int ret = 0;
1801 struct net_device *ndev = NULL; 1800 struct net_device *ndev = NULL;
@@ -1815,10 +1814,27 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1815 1814
1816 ether_setup(ndev); 1815 ether_setup(ndev);
1817 1816
1818 ndev->netdev_ops = &stmmac_netdev_ops;
1819 stmmac_set_ethtool_ops(ndev); 1817 stmmac_set_ethtool_ops(ndev);
1818 priv->pause = pause;
1819 priv->plat = plat_dat;
1820 priv->ioaddr = addr;
1821 priv->dev->base_addr = (unsigned long)addr;
1822
1823 /* Verify driver arguments */
1824 stmmac_verify_args();
1825
1826 /* Override with kernel parameters if supplied XXX CRS XXX
1827 * this needs to have multiple instances */
1828 if ((phyaddr >= 0) && (phyaddr <= 31))
1829 priv->plat->phy_addr = phyaddr;
1830
1831 /* Init MAC and get the capabilities */
1832 stmmac_hw_init(priv);
1833
1834 ndev->netdev_ops = &stmmac_netdev_ops;
1820 1835
1821 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1836 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1837 NETIF_F_RXCSUM;
1822 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 1838 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1823 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1839 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1824#ifdef STMMAC_VLAN_TAG_USED 1840#ifdef STMMAC_VLAN_TAG_USED
@@ -1830,8 +1846,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1830 if (flow_ctrl) 1846 if (flow_ctrl)
1831 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 1847 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1832 1848
1833 priv->pause = pause;
1834 priv->plat = plat_dat;
1835 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 1849 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
1836 1850
1837 spin_lock_init(&priv->lock); 1851 spin_lock_init(&priv->lock);
@@ -1839,15 +1853,10 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1839 1853
1840 ret = register_netdev(ndev); 1854 ret = register_netdev(ndev);
1841 if (ret) { 1855 if (ret) {
1842 pr_err("%s: ERROR %i registering the device\n", 1856 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
1843 __func__, ret);
1844 goto error; 1857 goto error;
1845 } 1858 }
1846 1859
1847 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1848 ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
1849 (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
1850
1851 return priv; 1860 return priv;
1852 1861
1853error: 1862error:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c796de9eed72..50ad5b80cfaf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -96,13 +96,11 @@ static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
96 96
97 stmmac_default_data(); 97 stmmac_default_data();
98 98
99 priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat); 99 priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
100 if (!priv) { 100 if (!priv) {
101 pr_err("%s: main drivr probe failed", __func__); 101 pr_err("%s: main driver probe failed", __func__);
102 goto err_out; 102 goto err_out;
103 } 103 }
104 priv->ioaddr = addr;
105 priv->dev->base_addr = (unsigned long)addr;
106 priv->dev->irq = pdev->irq; 104 priv->dev->irq = pdev->irq;
107 priv->wol_irq = pdev->irq; 105 priv->wol_irq = pdev->irq;
108 106
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1ac83243649a..3aad9810237c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -59,16 +59,20 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
59 goto out_release_region; 59 goto out_release_region;
60 } 60 }
61 plat_dat = pdev->dev.platform_data; 61 plat_dat = pdev->dev.platform_data;
62 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat); 62
63 /* Custom initialisation (if needed)*/
64 if (plat_dat->init) {
65 ret = plat_dat->init(pdev);
66 if (unlikely(ret))
67 goto out_unmap;
68 }
69
70 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
63 if (!priv) { 71 if (!priv) {
64 pr_err("%s: main drivr probe failed", __func__); 72 pr_err("%s: main driver probe failed", __func__);
65 goto out_unmap; 73 goto out_unmap;
66 } 74 }
67 75
68 priv->ioaddr = addr;
69 /* Set the I/O base addr */
70 priv->dev->base_addr = (unsigned long)addr;
71
72 /* Get the MAC information */ 76 /* Get the MAC information */
73 priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); 77 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
74 if (priv->dev->irq == -ENXIO) { 78 if (priv->dev->irq == -ENXIO) {
@@ -92,13 +96,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
92 96
93 platform_set_drvdata(pdev, priv->dev); 97 platform_set_drvdata(pdev, priv->dev);
94 98
95 /* Custom initialisation */
96 if (priv->plat->init) {
97 ret = priv->plat->init(pdev);
98 if (unlikely(ret))
99 goto out_unmap;
100 }
101
102 pr_debug("STMMAC platform driver registration completed"); 99 pr_debug("STMMAC platform driver registration completed");
103 100
104 return 0; 101 return 0;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 4d9a28ffd3c3..cbc8df78d84b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1122,7 +1122,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1122 pdata = pdev->dev.platform_data; 1122 pdata = pdev->dev.platform_data;
1123 1123
1124 if (external_switch || dumb_switch) { 1124 if (external_switch || dumb_switch) {
1125 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1125 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1126 phy_id = pdev->id; 1126 phy_id = pdev->id;
1127 } else { 1127 } else {
1128 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1128 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
@@ -1138,7 +1138,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1138 if (phy_id == PHY_MAX_ADDR) { 1138 if (phy_id == PHY_MAX_ADDR) {
1139 dev_err(&pdev->dev, "no PHY present, falling back " 1139 dev_err(&pdev->dev, "no PHY present, falling back "
1140 "to switch on MDIO bus 0\n"); 1140 "to switch on MDIO bus 0\n");
1141 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1141 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1142 phy_id = pdev->id; 1142 phy_id = pdev->id;
1143 } 1143 }
1144 1144
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 794ac30a577b..4fa0bcb25dfc 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1600,8 +1600,9 @@ static int emac_dev_open(struct net_device *ndev)
1600 if (IS_ERR(priv->phydev)) { 1600 if (IS_ERR(priv->phydev)) {
1601 dev_err(emac_dev, "could not connect to phy %s\n", 1601 dev_err(emac_dev, "could not connect to phy %s\n",
1602 priv->phy_id); 1602 priv->phy_id);
1603 ret = PTR_ERR(priv->phydev);
1603 priv->phydev = NULL; 1604 priv->phydev = NULL;
1604 return PTR_ERR(priv->phydev); 1605 return ret;
1605 } 1606 }
1606 1607
1607 priv->link = 0; 1608 priv->link = 0;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index ef7c9c17bfff..af8b8fc39eb2 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -318,9 +318,9 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
318 318
319 data->clk = clk_get(dev, NULL); 319 data->clk = clk_get(dev, NULL);
320 if (IS_ERR(data->clk)) { 320 if (IS_ERR(data->clk)) {
321 data->clk = NULL;
322 dev_err(dev, "failed to get device clock\n"); 321 dev_err(dev, "failed to get device clock\n");
323 ret = PTR_ERR(data->clk); 322 ret = PTR_ERR(data->clk);
323 data->clk = NULL;
324 goto bail_out; 324 goto bail_out;
325 } 325 }
326 326
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 051764704559..74acb5cf6099 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_TOSHIBA 5config NET_VENDOR_TOSHIBA
6 bool "Toshiba devices" 6 bool "Toshiba devices"
7 default y 7 default y
8 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3 8 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4128d6b8cc28..cb35b14b73bb 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2491,9 +2491,6 @@ static int velocity_close(struct net_device *dev)
2491 if (dev->irq != 0) 2491 if (dev->irq != 0)
2492 free_irq(dev->irq, dev); 2492 free_irq(dev->irq, dev);
2493 2493
2494 /* Power down the chip */
2495 pci_set_power_state(vptr->pdev, PCI_D3hot);
2496
2497 velocity_free_rings(vptr); 2494 velocity_free_rings(vptr);
2498 2495
2499 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2496 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 72a854f05bb8..41a8b5a9849e 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1416,7 +1416,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1416 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1416 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1417 udelay(50); 1417 udelay(50);
1418 1418
1419 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy); 1419 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1420 mdio_bus->id, plat->phy);
1420 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, 1421 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1421 PHY_INTERFACE_MODE_MII); 1422 PHY_INTERFACE_MODE_MII);
1422 if (IS_ERR(port->phydev)) { 1423 if (IS_ERR(port->phydev)) {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1a1ca6cfc74a..466c58a7353d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -123,7 +123,7 @@ static int netvsc_close(struct net_device *net)
123 struct hv_device *device_obj = net_device_ctx->device_ctx; 123 struct hv_device *device_obj = net_device_ctx->device_ctx;
124 int ret; 124 int ret;
125 125
126 netif_stop_queue(net); 126 netif_tx_disable(net);
127 127
128 ret = rndis_filter_close(device_obj); 128 ret = rndis_filter_close(device_obj);
129 if (ret != 0) 129 if (ret != 0)
@@ -151,10 +151,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
151 int ret; 151 int ret;
152 unsigned int i, num_pages, npg_data; 152 unsigned int i, num_pages, npg_data;
153 153
154 /* Add multipage for skb->data and additional one for RNDIS */ 154 /* Add multipages for skb->data and additional 2 for RNDIS */
155 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) 155 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
156 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1; 156 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
157 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1; 157 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
158 158
159 /* Allocate a netvsc packet based on # of frags. */ 159 /* Allocate a netvsc packet based on # of frags. */
160 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 160 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -173,8 +173,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
173 sizeof(struct hv_netvsc_packet) + 173 sizeof(struct hv_netvsc_packet) +
174 (num_pages * sizeof(struct hv_page_buffer)); 174 (num_pages * sizeof(struct hv_page_buffer));
175 175
176 /* Setup the rndis header */ 176 /* If the rndis msg goes beyond 1 page, we will add 1 later */
177 packet->page_buf_cnt = num_pages; 177 packet->page_buf_cnt = num_pages - 1;
178 178
179 /* Initialize it from the skb */ 179 /* Initialize it from the skb */
180 packet->total_data_buflen = skb->len; 180 packet->total_data_buflen = skb->len;
@@ -256,7 +256,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
256 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 256 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
257 } else { 257 } else {
258 netif_carrier_off(net); 258 netif_carrier_off(net);
259 netif_stop_queue(net); 259 netif_tx_disable(net);
260 } 260 }
261} 261}
262 262
@@ -298,7 +298,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
298 skb->ip_summed = CHECKSUM_NONE; 298 skb->ip_summed = CHECKSUM_NONE;
299 299
300 net->stats.rx_packets++; 300 net->stats.rx_packets++;
301 net->stats.rx_bytes += skb->len; 301 net->stats.rx_bytes += packet->total_data_buflen;
302 302
303 /* 303 /*
304 * Pass the skb back up. Network stack will deallocate the skb when it 304 * Pass the skb back up. Network stack will deallocate the skb when it
@@ -337,7 +337,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
337 337
338 nvdev->start_remove = true; 338 nvdev->start_remove = true;
339 cancel_delayed_work_sync(&ndevctx->dwork); 339 cancel_delayed_work_sync(&ndevctx->dwork);
340 netif_stop_queue(ndev); 340 netif_tx_disable(ndev);
341 rndis_filter_device_remove(hdev); 341 rndis_filter_device_remove(hdev);
342 342
343 ndev->mtu = mtu; 343 ndev->mtu = mtu;
@@ -460,7 +460,7 @@ static int netvsc_remove(struct hv_device *dev)
460 cancel_delayed_work_sync(&ndev_ctx->dwork); 460 cancel_delayed_work_sync(&ndev_ctx->dwork);
461 461
462 /* Stop outbound asap */ 462 /* Stop outbound asap */
463 netif_stop_queue(net); 463 netif_tx_disable(net);
464 464
465 unregister_netdev(net); 465 unregister_netdev(net);
466 466
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index da181f9a49d1..133b7fbf8595 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -321,6 +321,25 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
321 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; 321 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
322 322
323 pkt->total_data_buflen -= data_offset; 323 pkt->total_data_buflen -= data_offset;
324
325 /*
326 * Make sure we got a valid RNDIS message, now total_data_buflen
327 * should be the data packet size plus the trailer padding size
328 */
329 if (pkt->total_data_buflen < rndis_pkt->data_len) {
330 netdev_err(dev->net_dev->ndev, "rndis message buffer "
331 "overflow detected (got %u, min %u)"
332 "...dropping this message!\n",
333 pkt->total_data_buflen, rndis_pkt->data_len);
334 return;
335 }
336
337 /*
338 * Remove the rndis trailer padding from rndis packet message
339 * rndis_pkt->data_len tell us the real data length, we only copy
340 * the data packet to the stack, without the rndis trailer padding
341 */
342 pkt->total_data_buflen = rndis_pkt->data_len;
324 pkt->data = (void *)((unsigned long)pkt->data + data_offset); 343 pkt->data = (void *)((unsigned long)pkt->data + data_offset);
325 344
326 pkt->is_data_pkt = true; 345 pkt->is_data_pkt = true;
@@ -778,6 +797,19 @@ int rndis_filter_send(struct hv_device *dev,
778 (unsigned long)rndisMessage & (PAGE_SIZE-1); 797 (unsigned long)rndisMessage & (PAGE_SIZE-1);
779 pkt->page_buf[0].len = rndisMessageSize; 798 pkt->page_buf[0].len = rndisMessageSize;
780 799
800 /* Add one page_buf if the rndis msg goes beyond page boundary */
801 if (pkt->page_buf[0].offset + rndisMessageSize > PAGE_SIZE) {
802 int i;
803 for (i = pkt->page_buf_cnt; i > 1; i--)
804 pkt->page_buf[i] = pkt->page_buf[i-1];
805 pkt->page_buf_cnt++;
806 pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
807 pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
808 rndisMessage + pkt->page_buf[0].len)) >> PAGE_SHIFT;
809 pkt->page_buf[1].offset = 0;
810 pkt->page_buf[1].len = rndisMessageSize - pkt->page_buf[0].len;
811 }
812
781 /* Save the packet send completion and context */ 813 /* Save the packet send completion and context */
782 filterPacket->completion = pkt->completion.send.send_completion; 814 filterPacket->completion = pkt->completion.send.send_completion;
783 filterPacket->completion_ctx = 815 filterPacket->completion_ctx =
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index c7e0149d1514..45550d42b368 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -7,7 +7,6 @@ menuconfig TR
7 bool "Token Ring driver support" 7 bool "Token Ring driver support"
8 depends on NETDEVICES && !UML 8 depends on NETDEVICES && !UML
9 depends on (PCI || ISA || MCA || CCW || PCMCIA) 9 depends on (PCI || ISA || MCA || CCW || PCMCIA)
10 select LLC
11 help 10 help
12 Token Ring is IBM's way of communication on a local network; the 11 Token Ring is IBM's way of communication on a local network; the
13 rest of the world uses Ethernet. To participate on a Token Ring 12 rest of the world uses Ethernet. To participate on a Token Ring
@@ -20,6 +19,10 @@ menuconfig TR
20 19
21if TR 20if TR
22 21
22config WANT_LLC
23 def_bool y
24 select LLC
25
23config PCMCIA_IBMTR 26config PCMCIA_IBMTR
24 tristate "IBM PCMCIA tokenring adapter support" 27 tristate "IBM PCMCIA tokenring adapter support"
25 depends on IBMTR!=y && PCMCIA 28 depends on IBMTR!=y && PCMCIA
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index e84662db51cc..dd78c4cbd459 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -60,6 +60,7 @@
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c 62#define USB_PRODUCT_IPHONE_4_VZW 0x129c
63#define USB_PRODUCT_IPHONE_4S 0x12a0
63 64
64#define IPHETH_USBINTF_CLASS 255 65#define IPHETH_USBINTF_CLASS 255
65#define IPHETH_USBINTF_SUBCLASS 253 66#define IPHETH_USBINTF_SUBCLASS 253
@@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = {
103 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 104 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
104 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 105 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
105 IPHETH_USBINTF_PROTO) }, 106 IPHETH_USBINTF_PROTO) },
107 { USB_DEVICE_AND_INTERFACE_INFO(
108 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
109 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
110 IPHETH_USBINTF_PROTO) },
106 { } 111 { }
107}; 112};
108MODULE_DEVICE_TABLE(usb, ipheth_table); 113MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 49f4667e1fa3..4a3402898f2a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -422,7 +422,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
422 unregister_netdevice_queue(peer, head); 422 unregister_netdevice_queue(peer, head);
423} 423}
424 424
425static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; 425static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
426 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
427};
426 428
427static struct rtnl_link_ops veth_link_ops = { 429static struct rtnl_link_ops veth_link_ops = {
428 .kind = DRV_NAME, 430 .kind = DRV_NAME,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ee7759575050..87db1ee1c298 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1037,13 +1037,16 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1037 1037
1038 /* 1038 /*
1039 * Workaround for early ACK timeouts, add an offset to match the 1039 * Workaround for early ACK timeouts, add an offset to match the
1040 * initval's 64us ack timeout value. 1040 * initval's 64us ack timeout value. Use 48us for the CTS timeout.
1041 * This was initially only meant to work around an issue with delayed 1041 * This was initially only meant to work around an issue with delayed
1042 * BA frames in some implementations, but it has been found to fix ACK 1042 * BA frames in some implementations, but it has been found to fix ACK
1043 * timeout issues in other cases as well. 1043 * timeout issues in other cases as well.
1044 */ 1044 */
1045 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) 1045 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
1046 acktimeout += 64 - sifstime - ah->slottime; 1046 acktimeout += 64 - sifstime - ah->slottime;
1047 ctstimeout += 48 - sifstime - ah->slottime;
1048 }
1049
1047 1050
1048 ath9k_hw_set_sifs_time(ah, sifstime); 1051 ath9k_hw_set_sifs_time(ah, sifstime);
1049 ath9k_hw_setslottime(ah, slottime); 1052 ath9k_hw_setslottime(ah, slottime);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index abf943557dee..53a005d288aa 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -822,6 +822,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
822 ARRAY_SIZE(ath9k_tpt_blink)); 822 ARRAY_SIZE(ath9k_tpt_blink));
823#endif 823#endif
824 824
825 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
826 INIT_WORK(&sc->hw_check_work, ath_hw_check);
827 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
828 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
829
825 /* Register with mac80211 */ 830 /* Register with mac80211 */
826 error = ieee80211_register_hw(hw); 831 error = ieee80211_register_hw(hw);
827 if (error) 832 if (error)
@@ -840,10 +845,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
840 goto error_world; 845 goto error_world;
841 } 846 }
842 847
843 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
844 INIT_WORK(&sc->hw_check_work, ath_hw_check);
845 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
846 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
847 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 848 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
848 849
849 ath_init_leds(sc); 850 ath_init_leds(sc);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index b3c3798fe513..635b592ad961 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -694,7 +694,7 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
694 return rate; 694 return rate;
695 695
696 /* This should not happen */ 696 /* This should not happen */
697 WARN_ON(1); 697 WARN_ON_ONCE(1);
698 698
699 rate = ath_rc_priv->valid_rate_index[0]; 699 rate = ath_rc_priv->valid_rate_index[0];
700 700
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 0e666fbe0842..7e1a91af1497 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -822,6 +822,14 @@ static bool ath9k_rx_accept(struct ath_common *common,
822 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 822 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
823 ATH9K_RXERR_KEYMISS)); 823 ATH9K_RXERR_KEYMISS));
824 824
825 /*
826 * Key miss events are only relevant for pairwise keys where the
827 * descriptor does contain a valid key index. This has been observed
828 * mostly with CCMP encryption.
829 */
830 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
831 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
832
825 if (!rx_stats->rs_datalen) 833 if (!rx_stats->rs_datalen)
826 return false; 834 return false;
827 /* 835 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index c664c2726553..63bbc60be28e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -91,6 +91,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
91 tx_cmd->tid_tspec = qc[0] & 0xf; 91 tx_cmd->tid_tspec = qc[0] & 0xf;
92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
93 } else { 93 } else {
94 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
94 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 95 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
95 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 96 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
96 else 97 else
@@ -620,7 +621,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
620 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = 621 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
621 sta_priv->max_agg_bufsize; 622 sta_priv->max_agg_bufsize;
622 623
623 IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", 624 IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
624 sta->addr, tid); 625 sta->addr, tid);
625 626
626 return iwl_send_lq_cmd(priv, ctx, 627 return iwl_send_lq_cmd(priv, ctx,
@@ -808,6 +809,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
808 u32 status = le16_to_cpu(tx_resp->status.status); 809 u32 status = le16_to_cpu(tx_resp->status.status);
809 int i; 810 int i;
810 811
812 WARN_ON(tid == IWL_TID_NON_QOS);
813
811 if (agg->wait_for_ba) 814 if (agg->wait_for_ba)
812 IWL_DEBUG_TX_REPLY(priv, 815 IWL_DEBUG_TX_REPLY(priv,
813 "got tx response w/o block-ack\n"); 816 "got tx response w/o block-ack\n");
@@ -1035,10 +1038,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
1035 } 1038 }
1036 1039
1037 __skb_queue_head_init(&skbs); 1040 __skb_queue_head_init(&skbs);
1038 priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;
1039 1041
1040 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", 1042 if (tid != IWL_TID_NON_QOS) {
1041 next_reclaimed); 1043 priv->tid_data[sta_id][tid].next_reclaimed =
1044 next_reclaimed;
1045 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
1046 next_reclaimed);
1047 }
1042 1048
1043 /*we can free until ssn % q.n_bd not inclusive */ 1049 /*we can free until ssn % q.n_bd not inclusive */
1044 WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, 1050 WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 265de39d394c..f822ac447c3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -815,6 +815,7 @@ struct iwl_qosparam_cmd {
815 815
816#define IWL_INVALID_STATION 255 816#define IWL_INVALID_STATION 255
817#define IWL_MAX_TID_COUNT 8 817#define IWL_MAX_TID_COUNT 8
818#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
818 819
819#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 820#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
820#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 821#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 67d6e324e26f..324d06dfb690 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1262,6 +1262,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1262 txq->time_stamp = jiffies; 1262 txq->time_stamp = jiffies;
1263 1263
1264 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && 1264 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1265 tid != IWL_TID_NON_QOS &&
1265 txq_id != trans_pcie->agg_txq[sta_id][tid])) { 1266 txq_id != trans_pcie->agg_txq[sta_id][tid])) {
1266 /* 1267 /*
1267 * FIXME: this is a uCode bug which need to be addressed, 1268 * FIXME: this is a uCode bug which need to be addressed,
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e05b417a3fae..1d0ec57a0143 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -382,7 +382,8 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter)
382 382
383 adapter->if_ops.cleanup_if(adapter); 383 adapter->if_ops.cleanup_if(adapter);
384 384
385 dev_kfree_skb_any(adapter->sleep_cfm); 385 if (adapter->sleep_cfm)
386 dev_kfree_skb_any(adapter->sleep_cfm);
386} 387}
387 388
388/* 389/*
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 84be196188cc..b728f54451e4 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -822,7 +822,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
822 continue; 822 continue;
823 823
824 rtnl_lock(); 824 rtnl_lock();
825 mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev); 825 if (priv->wdev && priv->netdev)
826 mwifiex_del_virtual_intf(priv->wdev->wiphy,
827 priv->netdev);
826 rtnl_unlock(); 828 rtnl_unlock();
827 } 829 }
828 830
@@ -830,9 +832,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
830 if (!priv) 832 if (!priv)
831 goto exit_remove; 833 goto exit_remove;
832 834
833 wiphy_unregister(priv->wdev->wiphy); 835 if (priv->wdev) {
834 wiphy_free(priv->wdev->wiphy); 836 wiphy_unregister(priv->wdev->wiphy);
835 kfree(priv->wdev); 837 wiphy_free(priv->wdev->wiphy);
838 kfree(priv->wdev);
839 }
836 840
837 mwifiex_terminate_workqueue(adapter); 841 mwifiex_terminate_workqueue(adapter);
838 842
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 470ca75ec250..b0fbf5d4fea0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -54,7 +54,7 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
54int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) 54int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
55{ 55{
56 bool cancel_flag = false; 56 bool cancel_flag = false;
57 int status = adapter->cmd_wait_q.status; 57 int status;
58 struct cmd_ctrl_node *cmd_queued; 58 struct cmd_ctrl_node *cmd_queued;
59 59
60 if (!adapter->cmd_queued) 60 if (!adapter->cmd_queued)
@@ -79,6 +79,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
79 mwifiex_cancel_pending_ioctl(adapter); 79 mwifiex_cancel_pending_ioctl(adapter);
80 dev_dbg(adapter->dev, "cmd cancel\n"); 80 dev_dbg(adapter->dev, "cmd cancel\n");
81 } 81 }
82
83 status = adapter->cmd_wait_q.status;
82 adapter->cmd_wait_q.status = 0; 84 adapter->cmd_wait_q.status = 0;
83 85
84 return status; 86 return status;
@@ -240,6 +242,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
240 242
241 if (!netif_queue_stopped(priv->netdev)) 243 if (!netif_queue_stopped(priv->netdev))
242 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 244 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
245 if (netif_carrier_ok(priv->netdev))
246 netif_carrier_off(priv->netdev);
243 247
244 /* Clear any past association response stored for 248 /* Clear any past association response stored for
245 * application retrieval */ 249 * application retrieval */
@@ -271,6 +275,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
271 275
272 if (!netif_queue_stopped(priv->netdev)) 276 if (!netif_queue_stopped(priv->netdev))
273 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 277 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
278 if (netif_carrier_ok(priv->netdev))
279 netif_carrier_off(priv->netdev);
274 280
275 if (!ret) { 281 if (!ret) {
276 dev_dbg(adapter->dev, "info: network found in scan" 282 dev_dbg(adapter->dev, "info: network found in scan"
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 22a1a8fc6e02..7bef66def10c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
514 514
515static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) 515static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
516{ 516{
517 int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); 517 s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
518 int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); 518 s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
519 int rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2); 519 s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
520 u16 eeprom; 520 u16 eeprom;
521 u8 offset0; 521 u8 offset0;
522 u8 offset1; 522 u8 offset1;
@@ -552,7 +552,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
552 * which gives less energy... 552 * which gives less energy...
553 */ 553 */
554 rssi0 = max(rssi0, rssi1); 554 rssi0 = max(rssi0, rssi1);
555 return max(rssi0, rssi2); 555 return (int)max(rssi0, rssi2);
556} 556}
557 557
558void rt2800_process_rxwi(struct queue_entry *entry, 558void rt2800_process_rxwi(struct queue_entry *entry,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 39e0907a3c4e..9245d882c06a 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1501,7 +1501,7 @@ static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1501 return err; 1501 return err;
1502 } 1502 }
1503 1503
1504 return 1; 1504 return 0;
1505} 1505}
1506 1506
1507static int rtl_pci_start(struct ieee80211_hw *hw) 1507static int rtl_pci_start(struct ieee80211_hw *hw)
@@ -1870,7 +1870,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1870 } 1870 }
1871 1871
1872 /* Init PCI sw */ 1872 /* Init PCI sw */
1873 err = !rtl_pci_init(hw, pdev); 1873 err = rtl_pci_init(hw, pdev);
1874 if (err) { 1874 if (err) {
1875 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1875 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1876 ("Failed to init PCI.\n")); 1876 ("Failed to init PCI.\n"));
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 0a70149df3fc..98a574a4a465 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -866,6 +866,14 @@ static int fill_ctrlset(struct zd_mac *mac,
866 866
867 ZD_ASSERT(frag_len <= 0xffff); 867 ZD_ASSERT(frag_len <= 0xffff);
868 868
869 /*
870 * Firmware computes the duration itself (for all frames except PSPoll)
871 * and needs the field set to 0 at input, otherwise firmware messes up
872 * duration_id and sets bits 14 and 15 on.
873 */
874 if (!ieee80211_is_pspoll(hdr->frame_control))
875 hdr->duration_id = 0;
876
869 txrate = ieee80211_get_tx_rate(mac->hw, info); 877 txrate = ieee80211_get_tx_rate(mac->hw, info);
870 878
871 cs->modulation = txrate->hw_value; 879 cs->modulation = txrate->hw_value;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 0321fa3b4226..0dab5ecf61bb 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -347,8 +347,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
347 return rc; 347 return rc;
348 } 348 }
349 349
350 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
351
352 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; 350 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
353 pci_cfg_access_lock(dev); 351 pci_cfg_access_lock(dev);
354 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 352 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -466,6 +464,7 @@ found:
466 return -EIO; 464 return -EIO;
467 465
468 pgsz &= ~(pgsz - 1); 466 pgsz &= ~(pgsz - 1);
467 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
469 468
470 nres = 0; 469 nres = 0;
471 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 470 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7cc9e2f0f47c..71eac9cd724d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -651,6 +651,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
651 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 651 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
652 secondary, subordinate, pass); 652 secondary, subordinate, pass);
653 653
654 if (!primary && (primary != bus->number) && secondary && subordinate) {
655 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
656 primary = bus->number;
657 }
658
654 /* Check if setup is sensible at all */ 659 /* Check if setup is sensible at all */
655 if (!pass && 660 if (!pass &&
656 (primary != bus->number || secondary <= bus->number)) { 661 (primary != bus->number || secondary <= bus->number)) {
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 6def3624c688..ef8b18c48f26 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -77,6 +77,7 @@ void pci_remove_bus(struct pci_bus *pci_bus)
77} 77}
78EXPORT_SYMBOL(pci_remove_bus); 78EXPORT_SYMBOL(pci_remove_bus);
79 79
80static void __pci_remove_behind_bridge(struct pci_dev *dev);
80/** 81/**
81 * pci_remove_bus_device - remove a PCI device and any children 82 * pci_remove_bus_device - remove a PCI device and any children
82 * @dev: the device to remove 83 * @dev: the device to remove
@@ -94,7 +95,7 @@ static void __pci_remove_bus_device(struct pci_dev *dev)
94 if (dev->subordinate) { 95 if (dev->subordinate) {
95 struct pci_bus *b = dev->subordinate; 96 struct pci_bus *b = dev->subordinate;
96 97
97 pci_remove_behind_bridge(dev); 98 __pci_remove_behind_bridge(dev);
98 pci_remove_bus(b); 99 pci_remove_bus(b);
99 dev->subordinate = NULL; 100 dev->subordinate = NULL;
100 } 101 }
@@ -107,6 +108,24 @@ void pci_remove_bus_device(struct pci_dev *dev)
107 __pci_remove_bus_device(dev); 108 __pci_remove_bus_device(dev);
108} 109}
109 110
111static void __pci_remove_behind_bridge(struct pci_dev *dev)
112{
113 struct list_head *l, *n;
114
115 if (dev->subordinate)
116 list_for_each_safe(l, n, &dev->subordinate->devices)
117 __pci_remove_bus_device(pci_dev_b(l));
118}
119
120static void pci_stop_behind_bridge(struct pci_dev *dev)
121{
122 struct list_head *l, *n;
123
124 if (dev->subordinate)
125 list_for_each_safe(l, n, &dev->subordinate->devices)
126 pci_stop_bus_device(pci_dev_b(l));
127}
128
110/** 129/**
111 * pci_remove_behind_bridge - remove all devices behind a PCI bridge 130 * pci_remove_behind_bridge - remove all devices behind a PCI bridge
112 * @dev: PCI bridge device 131 * @dev: PCI bridge device
@@ -117,11 +136,8 @@ void pci_remove_bus_device(struct pci_dev *dev)
117 */ 136 */
118void pci_remove_behind_bridge(struct pci_dev *dev) 137void pci_remove_behind_bridge(struct pci_dev *dev)
119{ 138{
120 struct list_head *l, *n; 139 pci_stop_behind_bridge(dev);
121 140 __pci_remove_behind_bridge(dev);
122 if (dev->subordinate)
123 list_for_each_safe(l, n, &dev->subordinate->devices)
124 __pci_remove_bus_device(pci_dev_b(l));
125} 141}
126 142
127static void pci_stop_bus_devices(struct pci_bus *bus) 143static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 7cf3d2fcf56a..1620088a0e7e 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -189,7 +189,7 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
189 189
190 if (verbose_request) 190 if (verbose_request)
191 dev_info(&pdev->xdev->dev, 191 dev_info(&pdev->xdev->dev,
192 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n", 192 "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
193 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), 193 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
194 PCI_FUNC(devfn), where, size); 194 PCI_FUNC(devfn), where, size);
195 195
@@ -228,7 +228,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
228 228
229 if (verbose_request) 229 if (verbose_request)
230 dev_info(&pdev->xdev->dev, 230 dev_info(&pdev->xdev->dev,
231 "write dev=%04x:%02x:%02x.%01x - " 231 "write dev=%04x:%02x:%02x.%d - "
232 "offset %x size %d val %x\n", 232 "offset %x size %d val %x\n",
233 pci_domain_nr(bus), bus->number, 233 pci_domain_nr(bus), bus->number,
234 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); 234 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
@@ -432,7 +432,7 @@ static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
432 d = pci_scan_single_device(b, devfn); 432 d = pci_scan_single_device(b, devfn);
433 if (d) 433 if (d)
434 dev_info(&pdev->xdev->dev, "New device on " 434 dev_info(&pdev->xdev->dev, "New device on "
435 "%04x:%02x:%02x.%02x found.\n", domain, bus, 435 "%04x:%02x:%02x.%d found.\n", domain, bus,
436 PCI_SLOT(devfn), PCI_FUNC(devfn)); 436 PCI_SLOT(devfn), PCI_FUNC(devfn));
437 } 437 }
438 438
@@ -1041,7 +1041,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
1041 pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func)); 1041 pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
1042 if (!pci_dev) { 1042 if (!pci_dev) {
1043 dev_dbg(&pdev->xdev->dev, 1043 dev_dbg(&pdev->xdev->dev,
1044 "Cannot get PCI device %04x:%02x:%02x.%02x\n", 1044 "Cannot get PCI device %04x:%02x:%02x.%d\n",
1045 domain, bus, slot, func); 1045 domain, bus, slot, func);
1046 continue; 1046 continue;
1047 } 1047 }
@@ -1049,7 +1049,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
1049 pci_dev_put(pci_dev); 1049 pci_dev_put(pci_dev);
1050 1050
1051 dev_dbg(&pdev->xdev->dev, 1051 dev_dbg(&pdev->xdev->dev,
1052 "PCI device %04x:%02x:%02x.%02x removed.\n", 1052 "PCI device %04x:%02x:%02x.%d removed.\n",
1053 domain, bus, slot, func); 1053 domain, bus, slot, func);
1054 } 1054 }
1055 1055
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 749c2a16012c..1932029de48d 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt)
1269 1269
1270static int pcmcia_bus_early_resume(struct pcmcia_socket *skt) 1270static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
1271{ 1271{
1272 if (!verify_cis_cache(skt)) { 1272 if (!verify_cis_cache(skt))
1273 pcmcia_put_socket(skt);
1274 return 0; 1273 return 0;
1275 }
1276 1274
1277 dev_dbg(&skt->dev, "cis mismatch - different card\n"); 1275 dev_dbg(&skt->dev, "cis mismatch - different card\n");
1278 1276
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 8fe15cf15ac8..894cd5e103da 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -189,7 +189,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
189 pindesc->pctldev = pctldev; 189 pindesc->pctldev = pctldev;
190 190
191 /* Copy basic pin info */ 191 /* Copy basic pin info */
192 if (pindesc->name) { 192 if (name) {
193 pindesc->name = name; 193 pindesc->name = name;
194 } else { 194 } else {
195 pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number); 195 pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 98bf5676318d..1ed6ea0bad6e 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -62,11 +62,10 @@
62 62
63#define BQ27500_REG_SOC 0x2C 63#define BQ27500_REG_SOC 0x2C
64#define BQ27500_REG_DCAP 0x3C /* Design capacity */ 64#define BQ27500_REG_DCAP 0x3C /* Design capacity */
65#define BQ27500_FLAG_DSG BIT(0) /* Discharging */ 65#define BQ27500_FLAG_DSC BIT(0)
66#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */ 66#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
67#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */ 67#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
68#define BQ27500_FLAG_CHG BIT(8) /* Charging */ 68#define BQ27500_FLAG_FC BIT(9)
69#define BQ27500_FLAG_FC BIT(9) /* Fully charged */
70 69
71#define BQ27000_RS 20 /* Resistor sense */ 70#define BQ27000_RS 20 /* Resistor sense */
72 71
@@ -312,7 +311,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
312 struct bq27x00_reg_cache cache = {0, }; 311 struct bq27x00_reg_cache cache = {0, };
313 bool is_bq27500 = di->chip == BQ27500; 312 bool is_bq27500 = di->chip == BQ27500;
314 313
315 cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500); 314 cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
316 if (cache.flags >= 0) { 315 if (cache.flags >= 0) {
317 if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) { 316 if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) {
318 dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n"); 317 dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
@@ -401,14 +400,10 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
401 if (di->chip == BQ27500) { 400 if (di->chip == BQ27500) {
402 if (di->cache.flags & BQ27500_FLAG_FC) 401 if (di->cache.flags & BQ27500_FLAG_FC)
403 status = POWER_SUPPLY_STATUS_FULL; 402 status = POWER_SUPPLY_STATUS_FULL;
404 else if (di->cache.flags & BQ27500_FLAG_DSG) 403 else if (di->cache.flags & BQ27500_FLAG_DSC)
405 status = POWER_SUPPLY_STATUS_DISCHARGING; 404 status = POWER_SUPPLY_STATUS_DISCHARGING;
406 else if (di->cache.flags & BQ27500_FLAG_CHG)
407 status = POWER_SUPPLY_STATUS_CHARGING;
408 else if (power_supply_am_i_supplied(&di->bat))
409 status = POWER_SUPPLY_STATUS_NOT_CHARGING;
410 else 405 else
411 status = POWER_SUPPLY_STATUS_UNKNOWN; 406 status = POWER_SUPPLY_STATUS_CHARGING;
412 } else { 407 } else {
413 if (di->cache.flags & BQ27000_FLAG_FC) 408 if (di->cache.flags & BQ27000_FLAG_FC)
414 status = POWER_SUPPLY_STATUS_FULL; 409 status = POWER_SUPPLY_STATUS_FULL;
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 0378d019efae..88fd9710bda2 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -974,10 +974,11 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
974 return 0; 974 return 0;
975} 975}
976 976
977const struct platform_device_id charger_manager_id[] = { 977static const struct platform_device_id charger_manager_id[] = {
978 { "charger-manager", 0 }, 978 { "charger-manager", 0 },
979 { }, 979 { },
980}; 980};
981MODULE_DEVICE_TABLE(platform, charger_manager_id);
981 982
982static int cm_suspend_prepare(struct device *dev) 983static int cm_suspend_prepare(struct device *dev)
983{ 984{
@@ -1069,4 +1070,3 @@ module_exit(charger_manager_cleanup);
1069MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1070MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1070MODULE_DESCRIPTION("Charger Manager"); 1071MODULE_DESCRIPTION("Charger Manager");
1071MODULE_LICENSE("GPL"); 1072MODULE_LICENSE("GPL");
1072MODULE_ALIAS("charger-manager");
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index b15b575c070c..c53dd1292f81 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -464,6 +464,7 @@ static int __devexit lp8727_remove(struct i2c_client *cl)
464 464
465static const struct i2c_device_id lp8727_ids[] = { 465static const struct i2c_device_id lp8727_ids[] = {
466 {"lp8727", 0}, 466 {"lp8727", 0},
467 { }
467}; 468};
468 469
469static struct i2c_driver lp8727_driver = { 470static struct i2c_driver lp8727_driver = {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index b06a2399587c..d0e1180ad961 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -150,7 +150,7 @@ static int max8649_enable_time(struct regulator_dev *rdev)
150 if (ret != 0) 150 if (ret != 0)
151 return ret; 151 return ret;
152 val &= MAX8649_VOL_MASK; 152 val &= MAX8649_VOL_MASK;
153 voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */ 153 voltage = max8649_list_voltage(rdev, (unsigned char)val); /* uV */
154 154
155 /* get rate */ 155 /* get rate */
156 ret = regmap_read(info->regmap, MAX8649_RAMP, &val); 156 ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 80ecafef1bc3..62dcd0a432bb 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -254,6 +254,7 @@ int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
254 254
255 return num; 255 return num;
256} 256}
257EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
257 258
258struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt( 259struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
259 struct platform_device *pdev, struct mc13xxx_regulator *regulators, 260 struct platform_device *pdev, struct mc13xxx_regulator *regulators,
@@ -291,6 +292,7 @@ struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
291 292
292 return data; 293 return data;
293} 294}
295EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
294#endif 296#endif
295 297
296MODULE_LICENSE("GPL v2"); 298MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index a3ad957507dc..ee3c122c0599 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -307,8 +307,12 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
307 device_init_wakeup(&pdev->dev, 1); 307 device_init_wakeup(&pdev->dev, 1);
308 308
309 platform_set_drvdata(pdev, rtc); 309 platform_set_drvdata(pdev, rtc);
310 rtc->rtt = (void __force __iomem *) (AT91_VA_BASE_SYS - AT91_BASE_SYS); 310 rtc->rtt = ioremap(r->start, resource_size(r));
311 rtc->rtt += r->start; 311 if (!rtc->rtt) {
312 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
313 ret = -ENOMEM;
314 goto fail;
315 }
312 316
313 mr = rtt_readl(rtc, MR); 317 mr = rtt_readl(rtc, MR);
314 318
@@ -326,7 +330,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
326 &at91_rtc_ops, THIS_MODULE); 330 &at91_rtc_ops, THIS_MODULE);
327 if (IS_ERR(rtc->rtcdev)) { 331 if (IS_ERR(rtc->rtcdev)) {
328 ret = PTR_ERR(rtc->rtcdev); 332 ret = PTR_ERR(rtc->rtcdev);
329 goto fail; 333 goto fail_register;
330 } 334 }
331 335
332 /* register irq handler after we know what name we'll use */ 336 /* register irq handler after we know what name we'll use */
@@ -351,6 +355,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
351 355
352 return 0; 356 return 0;
353 357
358fail_register:
359 iounmap(rtc->rtt);
354fail: 360fail:
355 platform_set_drvdata(pdev, NULL); 361 platform_set_drvdata(pdev, NULL);
356 kfree(rtc); 362 kfree(rtc);
@@ -371,6 +377,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
371 377
372 rtc_device_unregister(rtc->rtcdev); 378 rtc_device_unregister(rtc->rtcdev);
373 379
380 iounmap(rtc->rtt);
374 platform_set_drvdata(pdev, NULL); 381 platform_set_drvdata(pdev, NULL);
375 kfree(rtc); 382 kfree(rtc);
376 return 0; 383 return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3f9a47ec67dc..8293658e7cf9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -299,7 +299,7 @@ config SPI_S3C24XX_FIQ
299 299
300config SPI_S3C64XX 300config SPI_S3C64XX
301 tristate "Samsung S3C64XX series type SPI" 301 tristate "Samsung S3C64XX series type SPI"
302 depends on (ARCH_S3C64XX || ARCH_S5P64X0) 302 depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
303 select S3C64XX_DMA if ARCH_S3C64XX 303 select S3C64XX_DMA if ARCH_S3C64XX
304 help 304 help
305 SPI driver for Samsung S3C64XX and newer SoCs. 305 SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2a6429d8c363..10182eb50068 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1720,7 +1720,7 @@ static int pch_spi_resume(struct pci_dev *pdev)
1720 1720
1721#endif 1721#endif
1722 1722
1723static struct pci_driver pch_spi_pcidev = { 1723static struct pci_driver pch_spi_pcidev_driver = {
1724 .name = "pch_spi", 1724 .name = "pch_spi",
1725 .id_table = pch_spi_pcidev_id, 1725 .id_table = pch_spi_pcidev_id,
1726 .probe = pch_spi_probe, 1726 .probe = pch_spi_probe,
@@ -1736,7 +1736,7 @@ static int __init pch_spi_init(void)
1736 if (ret) 1736 if (ret)
1737 return ret; 1737 return ret;
1738 1738
1739 ret = pci_register_driver(&pch_spi_pcidev); 1739 ret = pci_register_driver(&pch_spi_pcidev_driver);
1740 if (ret) 1740 if (ret)
1741 return ret; 1741 return ret;
1742 1742
@@ -1746,7 +1746,7 @@ module_init(pch_spi_init);
1746 1746
1747static void __exit pch_spi_exit(void) 1747static void __exit pch_spi_exit(void)
1748{ 1748{
1749 pci_unregister_driver(&pch_spi_pcidev); 1749 pci_unregister_driver(&pch_spi_pcidev_driver);
1750 platform_driver_unregister(&pch_spi_pd_driver); 1750 platform_driver_unregister(&pch_spi_pd_driver);
1751} 1751}
1752module_exit(pch_spi_exit); 1752module_exit(pch_spi_exit);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 520e8286db28..49d209173f55 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -75,7 +75,7 @@ static u32 get_cfgspace_addr(struct ssb_pcicore *pc,
75 u32 tmp; 75 u32 tmp;
76 76
77 /* We do only have one cardbus device behind the bridge. */ 77 /* We do only have one cardbus device behind the bridge. */
78 if (pc->cardbusmode && (dev >= 1)) 78 if (pc->cardbusmode && (dev > 1))
79 goto out; 79 goto out;
80 80
81 if (bus == 0) { 81 if (bus == 0) {
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 21e2f4b87f14..9e6347249783 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -60,8 +60,6 @@ source "drivers/staging/rts5139/Kconfig"
60 60
61source "drivers/staging/frontier/Kconfig" 61source "drivers/staging/frontier/Kconfig"
62 62
63source "drivers/staging/pohmelfs/Kconfig"
64
65source "drivers/staging/phison/Kconfig" 63source "drivers/staging/phison/Kconfig"
66 64
67source "drivers/staging/line6/Kconfig" 65source "drivers/staging/line6/Kconfig"
@@ -120,8 +118,6 @@ source "drivers/staging/cptm1217/Kconfig"
120 118
121source "drivers/staging/ste_rmi4/Kconfig" 119source "drivers/staging/ste_rmi4/Kconfig"
122 120
123source "drivers/staging/gma500/Kconfig"
124
125source "drivers/staging/mei/Kconfig" 121source "drivers/staging/mei/Kconfig"
126 122
127source "drivers/staging/nvec/Kconfig" 123source "drivers/staging/nvec/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7c5808d7212d..943e14830753 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
22obj-$(CONFIG_RTS_PSTOR) += rts_pstor/ 22obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
23obj-$(CONFIG_RTS5139) += rts5139/ 23obj-$(CONFIG_RTS5139) += rts5139/
24obj-$(CONFIG_TRANZPORT) += frontier/ 24obj-$(CONFIG_TRANZPORT) += frontier/
25obj-$(CONFIG_POHMELFS) += pohmelfs/
26obj-$(CONFIG_IDE_PHISON) += phison/ 25obj-$(CONFIG_IDE_PHISON) += phison/
27obj-$(CONFIG_LINE6_USB) += line6/ 26obj-$(CONFIG_LINE6_USB) += line6/
28obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/ 27obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
@@ -52,7 +51,6 @@ obj-$(CONFIG_FT1000) += ft1000/
52obj-$(CONFIG_SPEAKUP) += speakup/ 51obj-$(CONFIG_SPEAKUP) += speakup/
53obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/ 52obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
54obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/ 53obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
55obj-$(CONFIG_DRM_PSB) += gma500/
56obj-$(CONFIG_INTEL_MEI) += mei/ 54obj-$(CONFIG_INTEL_MEI) += mei/
57obj-$(CONFIG_MFD_NVEC) += nvec/ 55obj-$(CONFIG_MFD_NVEC) += nvec/
58obj-$(CONFIG_DRM_OMAP) += omapdrm/ 56obj-$(CONFIG_DRM_OMAP) += omapdrm/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index becf711117ef..fef3580ce8de 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -27,6 +27,7 @@ config ANDROID_LOGGER
27 27
28config ANDROID_RAM_CONSOLE 28config ANDROID_RAM_CONSOLE
29 bool "Android RAM buffer console" 29 bool "Android RAM buffer console"
30 depends on !S390 && !UML
30 default n 31 default n
31 32
32config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE 33config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
@@ -99,10 +100,6 @@ config ANDROID_LOW_MEMORY_KILLER
99 ---help--- 100 ---help---
100 Register processes to be killed when memory is low 101 Register processes to be killed when memory is low
101 102
102config ANDROID_PMEM
103 bool "Android pmem allocator"
104 depends on ARM
105
106source "drivers/staging/android/switch/Kconfig" 103source "drivers/staging/android/switch/Kconfig"
107 104
108endif # if ANDROID 105endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index eaed1ff64f0f..5fcc24ffdd58 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -5,5 +5,4 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
5obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o 5obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
6obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o 6obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
7obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o 7obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
8obj-$(CONFIG_ANDROID_PMEM) += pmem.o
9obj-$(CONFIG_ANDROID_SWITCH) += switch/ 8obj-$(CONFIG_ANDROID_SWITCH) += switch/
diff --git a/drivers/staging/android/android_pmem.h b/drivers/staging/android/android_pmem.h
deleted file mode 100644
index f633621f5be3..000000000000
--- a/drivers/staging/android/android_pmem.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/* include/linux/android_pmem.h
2 *
3 * Copyright (C) 2007 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef _ANDROID_PMEM_H_
17#define _ANDROID_PMEM_H_
18
19#define PMEM_IOCTL_MAGIC 'p'
20#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
21#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
22#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
23#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
24/* This ioctl will allocate pmem space, backing the file, it will fail
25 * if the file already has an allocation, pass it the len as the argument
26 * to the ioctl */
27#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
28/* This will connect a one pmem file to another, pass the file that is already
29 * backed in memory as the argument to the ioctl
30 */
31#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
32/* Returns the total size of the pmem region it is sent to as a pmem_region
33 * struct (with offset set to 0).
34 */
35#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
36#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
37
38struct android_pmem_platform_data
39{
40 const char* name;
41 /* starting physical address of memory region */
42 unsigned long start;
43 /* size of memory region */
44 unsigned long size;
45 /* set to indicate the region should not be managed with an allocator */
46 unsigned no_allocator;
47 /* set to indicate maps of this region should be cached, if a mix of
48 * cached and uncached is desired, set this and open the device with
49 * O_SYNC to get an uncached region */
50 unsigned cached;
51 /* The MSM7k has bits to enable a write buffer in the bus controller*/
52 unsigned buffered;
53};
54
55struct pmem_region {
56 unsigned long offset;
57 unsigned long len;
58};
59
60#ifdef CONFIG_ANDROID_PMEM
61int is_pmem_file(struct file *file);
62int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
63 unsigned long *end, struct file **filp);
64int get_pmem_user_addr(struct file *file, unsigned long *start,
65 unsigned long *end);
66void put_pmem_file(struct file* file);
67void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
68int pmem_setup(struct android_pmem_platform_data *pdata,
69 long (*ioctl)(struct file *, unsigned int, unsigned long),
70 int (*release)(struct inode *, struct file *));
71int pmem_remap(struct pmem_region *region, struct file *file,
72 unsigned operation);
73
74#else
75static inline int is_pmem_file(struct file *file) { return 0; }
76static inline int get_pmem_file(int fd, unsigned long *start,
77 unsigned long *vstart, unsigned long *end,
78 struct file **filp) { return -ENOSYS; }
79static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
80 unsigned long *end) { return -ENOSYS; }
81static inline void put_pmem_file(struct file* file) { return; }
82static inline void flush_pmem_file(struct file *file, unsigned long start,
83 unsigned long len) { return; }
84static inline int pmem_setup(struct android_pmem_platform_data *pdata,
85 long (*ioctl)(struct file *, unsigned int, unsigned long),
86 int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
87
88static inline int pmem_remap(struct pmem_region *region, struct file *file,
89 unsigned operation) { return -ENOSYS; }
90#endif
91
92#endif //_ANDROID_PPP_H_
93
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 7491801a661c..f0b7e6605ab5 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -38,6 +38,7 @@
38 38
39static DEFINE_MUTEX(binder_lock); 39static DEFINE_MUTEX(binder_lock);
40static DEFINE_MUTEX(binder_deferred_lock); 40static DEFINE_MUTEX(binder_deferred_lock);
41static DEFINE_MUTEX(binder_mmap_lock);
41 42
42static HLIST_HEAD(binder_procs); 43static HLIST_HEAD(binder_procs);
43static HLIST_HEAD(binder_deferred_list); 44static HLIST_HEAD(binder_deferred_list);
@@ -632,6 +633,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
632 if (mm) { 633 if (mm) {
633 down_write(&mm->mmap_sem); 634 down_write(&mm->mmap_sem);
634 vma = proc->vma; 635 vma = proc->vma;
636 if (vma && mm != vma->vm_mm) {
637 pr_err("binder: %d: vma mm and task mm mismatch\n",
638 proc->pid);
639 vma = NULL;
640 }
635 } 641 }
636 642
637 if (allocate == 0) 643 if (allocate == 0)
@@ -2759,7 +2765,6 @@ static void binder_vma_open(struct vm_area_struct *vma)
2759 proc->pid, vma->vm_start, vma->vm_end, 2765 proc->pid, vma->vm_start, vma->vm_end,
2760 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2766 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2761 (unsigned long)pgprot_val(vma->vm_page_prot)); 2767 (unsigned long)pgprot_val(vma->vm_page_prot));
2762 dump_stack();
2763} 2768}
2764 2769
2765static void binder_vma_close(struct vm_area_struct *vma) 2770static void binder_vma_close(struct vm_area_struct *vma)
@@ -2803,6 +2808,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2803 } 2808 }
2804 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2809 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2805 2810
2811 mutex_lock(&binder_mmap_lock);
2806 if (proc->buffer) { 2812 if (proc->buffer) {
2807 ret = -EBUSY; 2813 ret = -EBUSY;
2808 failure_string = "already mapped"; 2814 failure_string = "already mapped";
@@ -2817,6 +2823,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2817 } 2823 }
2818 proc->buffer = area->addr; 2824 proc->buffer = area->addr;
2819 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2825 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2826 mutex_unlock(&binder_mmap_lock);
2820 2827
2821#ifdef CONFIG_CPU_CACHE_VIPT 2828#ifdef CONFIG_CPU_CACHE_VIPT
2822 if (cache_is_vipt_aliasing()) { 2829 if (cache_is_vipt_aliasing()) {
@@ -2849,7 +2856,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2849 binder_insert_free_buffer(proc, buffer); 2856 binder_insert_free_buffer(proc, buffer);
2850 proc->free_async_space = proc->buffer_size / 2; 2857 proc->free_async_space = proc->buffer_size / 2;
2851 barrier(); 2858 barrier();
2852 proc->files = get_files_struct(current); 2859 proc->files = get_files_struct(proc->tsk);
2853 proc->vma = vma; 2860 proc->vma = vma;
2854 2861
2855 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", 2862 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
@@ -2860,10 +2867,12 @@ err_alloc_small_buf_failed:
2860 kfree(proc->pages); 2867 kfree(proc->pages);
2861 proc->pages = NULL; 2868 proc->pages = NULL;
2862err_alloc_pages_failed: 2869err_alloc_pages_failed:
2870 mutex_lock(&binder_mmap_lock);
2863 vfree(proc->buffer); 2871 vfree(proc->buffer);
2864 proc->buffer = NULL; 2872 proc->buffer = NULL;
2865err_get_vm_area_failed: 2873err_get_vm_area_failed:
2866err_already_mapped: 2874err_already_mapped:
2875 mutex_unlock(&binder_mmap_lock);
2867err_bad_arg: 2876err_bad_arg:
2868 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", 2877 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
2869 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2878 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 2d8d2b796101..efc7dc1f4831 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -54,6 +54,7 @@ static size_t lowmem_minfree[6] = {
54static int lowmem_minfree_size = 4; 54static int lowmem_minfree_size = 4;
55 55
56static struct task_struct *lowmem_deathpending; 56static struct task_struct *lowmem_deathpending;
57static unsigned long lowmem_deathpending_timeout;
57 58
58#define lowmem_print(level, x...) \ 59#define lowmem_print(level, x...) \
59 do { \ 60 do { \
@@ -103,7 +104,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
103 * Note: Currently you need CONFIG_PROFILING 104 * Note: Currently you need CONFIG_PROFILING
104 * for this to work correctly. 105 * for this to work correctly.
105 */ 106 */
106 if (lowmem_deathpending) 107 if (lowmem_deathpending &&
108 time_before_eq(jiffies, lowmem_deathpending_timeout))
107 return 0; 109 return 0;
108 110
109 if (lowmem_adj_size < array_size) 111 if (lowmem_adj_size < array_size)
@@ -178,6 +180,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
178 */ 180 */
179#ifdef CONFIG_PROFILING 181#ifdef CONFIG_PROFILING
180 lowmem_deathpending = selected; 182 lowmem_deathpending = selected;
183 lowmem_deathpending_timeout = jiffies + HZ;
181 task_handoff_register(&task_nb); 184 task_handoff_register(&task_nb);
182#endif 185#endif
183 force_sig(SIGKILL, selected); 186 force_sig(SIGKILL, selected);
diff --git a/drivers/staging/android/pmem.c b/drivers/staging/android/pmem.c
deleted file mode 100644
index 7d97032c6508..000000000000
--- a/drivers/staging/android/pmem.c
+++ /dev/null
@@ -1,1345 +0,0 @@
1/* pmem.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/miscdevice.h>
17#include <linux/platform_device.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/mm.h>
21#include <linux/list.h>
22#include <linux/mutex.h>
23#include <linux/debugfs.h>
24#include <linux/mempolicy.h>
25#include <linux/sched.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/cacheflush.h>
29#include "android_pmem.h"
30
31#define PMEM_MAX_DEVICES 10
32#define PMEM_MAX_ORDER 128
33#define PMEM_MIN_ALLOC PAGE_SIZE
34
35#define PMEM_DEBUG 1
36
37/* indicates that a refernce to this file has been taken via get_pmem_file,
38 * the file should not be released until put_pmem_file is called */
39#define PMEM_FLAGS_BUSY 0x1
40/* indicates that this is a suballocation of a larger master range */
41#define PMEM_FLAGS_CONNECTED 0x1 << 1
42/* indicates this is a master and not a sub allocation and that it is mmaped */
43#define PMEM_FLAGS_MASTERMAP 0x1 << 2
44/* submap and unsubmap flags indicate:
45 * 00: subregion has never been mmaped
46 * 10: subregion has been mmaped, reference to the mm was taken
47 * 11: subretion has ben released, refernece to the mm still held
48 * 01: subretion has been released, reference to the mm has been released
49 */
50#define PMEM_FLAGS_SUBMAP 0x1 << 3
51#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
52
53
54struct pmem_data {
55 /* in alloc mode: an index into the bitmap
56 * in no_alloc mode: the size of the allocation */
57 int index;
58 /* see flags above for descriptions */
59 unsigned int flags;
60 /* protects this data field, if the mm_mmap sem will be held at the
61 * same time as this sem, the mm sem must be taken first (as this is
62 * the order for vma_open and vma_close ops */
63 struct rw_semaphore sem;
64 /* info about the mmaping process */
65 struct vm_area_struct *vma;
66 /* task struct of the mapping process */
67 struct task_struct *task;
68 /* process id of teh mapping process */
69 pid_t pid;
70 /* file descriptor of the master */
71 int master_fd;
72 /* file struct of the master */
73 struct file *master_file;
74 /* a list of currently available regions if this is a suballocation */
75 struct list_head region_list;
76 /* a linked list of data so we can access them for debugging */
77 struct list_head list;
78#if PMEM_DEBUG
79 int ref;
80#endif
81};
82
83struct pmem_bits {
84 unsigned allocated:1; /* 1 if allocated, 0 if free */
85 unsigned order:7; /* size of the region in pmem space */
86};
87
88struct pmem_region_node {
89 struct pmem_region region;
90 struct list_head list;
91};
92
93#define PMEM_DEBUG_MSGS 0
94#if PMEM_DEBUG_MSGS
95#define DLOG(fmt,args...) \
96 do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
97 ##args); } \
98 while (0)
99#else
100#define DLOG(x...) do {} while (0)
101#endif
102
103struct pmem_info {
104 struct miscdevice dev;
105 /* physical start address of the remaped pmem space */
106 unsigned long base;
107 /* vitual start address of the remaped pmem space */
108 unsigned char __iomem *vbase;
109 /* total size of the pmem space */
110 unsigned long size;
111 /* number of entries in the pmem space */
112 unsigned long num_entries;
113 /* pfn of the garbage page in memory */
114 unsigned long garbage_pfn;
115 /* index of the garbage page in the pmem space */
116 int garbage_index;
117 /* the bitmap for the region indicating which entries are allocated
118 * and which are free */
119 struct pmem_bits *bitmap;
120 /* indicates the region should not be managed with an allocator */
121 unsigned no_allocator;
122 /* indicates maps of this region should be cached, if a mix of
123 * cached and uncached is desired, set this and open the device with
124 * O_SYNC to get an uncached region */
125 unsigned cached;
126 unsigned buffered;
127 /* in no_allocator mode the first mapper gets the whole space and sets
128 * this flag */
129 unsigned allocated;
130 /* for debugging, creates a list of pmem file structs, the
131 * data_list_lock should be taken before pmem_data->sem if both are
132 * needed */
133 struct mutex data_list_lock;
134 struct list_head data_list;
135 /* pmem_sem protects the bitmap array
136 * a write lock should be held when modifying entries in bitmap
137 * a read lock should be held when reading data from bits or
138 * dereferencing a pointer into bitmap
139 *
140 * pmem_data->sem protects the pmem data of a particular file
141 * Many of the function that require the pmem_data->sem have a non-
142 * locking version for when the caller is already holding that sem.
143 *
144 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
145 * down(pmem_data->sem) => down(bitmap_sem)
146 */
147 struct rw_semaphore bitmap_sem;
148
149 long (*ioctl)(struct file *, unsigned int, unsigned long);
150 int (*release)(struct inode *, struct file *);
151};
152
153static struct pmem_info pmem[PMEM_MAX_DEVICES];
154static int id_count;
155
156#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
157#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
158#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
159#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
160#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
161#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
162#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
163#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
164 PMEM_LEN(id, index))
165#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
166#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
167 PMEM_LEN(id, index))
168#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
169#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
170#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
171 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
172
173static int pmem_release(struct inode *, struct file *);
174static int pmem_mmap(struct file *, struct vm_area_struct *);
175static int pmem_open(struct inode *, struct file *);
176static long pmem_ioctl(struct file *, unsigned int, unsigned long);
177
178struct file_operations pmem_fops = {
179 .release = pmem_release,
180 .mmap = pmem_mmap,
181 .open = pmem_open,
182 .unlocked_ioctl = pmem_ioctl,
183};
184
185static int get_id(struct file *file)
186{
187 return MINOR(file->f_dentry->d_inode->i_rdev);
188}
189
190int is_pmem_file(struct file *file)
191{
192 int id;
193
194 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
195 return 0;
196 id = get_id(file);
197 if (unlikely(id >= PMEM_MAX_DEVICES))
198 return 0;
199 if (unlikely(file->f_dentry->d_inode->i_rdev !=
200 MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
201 return 0;
202 return 1;
203}
204
205static int has_allocation(struct file *file)
206{
207 struct pmem_data *data;
208 /* check is_pmem_file first if not accessed via pmem_file_ops */
209
210 if (unlikely(!file->private_data))
211 return 0;
212 data = (struct pmem_data *)file->private_data;
213 if (unlikely(data->index < 0))
214 return 0;
215 return 1;
216}
217
218static int is_master_owner(struct file *file)
219{
220 struct file *master_file;
221 struct pmem_data *data;
222 int put_needed, ret = 0;
223
224 if (!is_pmem_file(file) || !has_allocation(file))
225 return 0;
226 data = (struct pmem_data *)file->private_data;
227 if (PMEM_FLAGS_MASTERMAP & data->flags)
228 return 1;
229 master_file = fget_light(data->master_fd, &put_needed);
230 if (master_file && data->master_file == master_file)
231 ret = 1;
232 fput_light(master_file, put_needed);
233 return ret;
234}
235
236static int pmem_free(int id, int index)
237{
238 /* caller should hold the write lock on pmem_sem! */
239 int buddy, curr = index;
240 DLOG("index %d\n", index);
241
242 if (pmem[id].no_allocator) {
243 pmem[id].allocated = 0;
244 return 0;
245 }
246 /* clean up the bitmap, merging any buddies */
247 pmem[id].bitmap[curr].allocated = 0;
248 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
249 * if the buddy is also free merge them
250 * repeat until the buddy is not free or end of the bitmap is reached
251 */
252 do {
253 buddy = PMEM_BUDDY_INDEX(id, curr);
254 if (PMEM_IS_FREE(id, buddy) &&
255 PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
256 PMEM_ORDER(id, buddy)++;
257 PMEM_ORDER(id, curr)++;
258 curr = min(buddy, curr);
259 } else {
260 break;
261 }
262 } while (curr < pmem[id].num_entries);
263
264 return 0;
265}
266
267static void pmem_revoke(struct file *file, struct pmem_data *data);
268
269static int pmem_release(struct inode *inode, struct file *file)
270{
271 struct pmem_data *data = (struct pmem_data *)file->private_data;
272 struct pmem_region_node *region_node;
273 struct list_head *elt, *elt2;
274 int id = get_id(file), ret = 0;
275
276
277 mutex_lock(&pmem[id].data_list_lock);
278 /* if this file is a master, revoke all the memory in the connected
279 * files */
280 if (PMEM_FLAGS_MASTERMAP & data->flags) {
281 struct pmem_data *sub_data;
282 list_for_each(elt, &pmem[id].data_list) {
283 sub_data = list_entry(elt, struct pmem_data, list);
284 down_read(&sub_data->sem);
285 if (PMEM_IS_SUBMAP(sub_data) &&
286 file == sub_data->master_file) {
287 up_read(&sub_data->sem);
288 pmem_revoke(file, sub_data);
289 } else
290 up_read(&sub_data->sem);
291 }
292 }
293 list_del(&data->list);
294 mutex_unlock(&pmem[id].data_list_lock);
295
296
297 down_write(&data->sem);
298
299 /* if its not a conencted file and it has an allocation, free it */
300 if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
301 down_write(&pmem[id].bitmap_sem);
302 ret = pmem_free(id, data->index);
303 up_write(&pmem[id].bitmap_sem);
304 }
305
306 /* if this file is a submap (mapped, connected file), downref the
307 * task struct */
308 if (PMEM_FLAGS_SUBMAP & data->flags)
309 if (data->task) {
310 put_task_struct(data->task);
311 data->task = NULL;
312 }
313
314 file->private_data = NULL;
315
316 list_for_each_safe(elt, elt2, &data->region_list) {
317 region_node = list_entry(elt, struct pmem_region_node, list);
318 list_del(elt);
319 kfree(region_node);
320 }
321 BUG_ON(!list_empty(&data->region_list));
322
323 up_write(&data->sem);
324 kfree(data);
325 if (pmem[id].release)
326 ret = pmem[id].release(inode, file);
327
328 return ret;
329}
330
331static int pmem_open(struct inode *inode, struct file *file)
332{
333 struct pmem_data *data;
334 int id = get_id(file);
335 int ret = 0;
336
337 DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
338 /* setup file->private_data to indicate its unmapped */
339 /* you can only open a pmem device one time */
340 if (file->private_data != NULL)
341 return -1;
342 data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
343 if (!data) {
344 printk("pmem: unable to allocate memory for pmem metadata.");
345 return -1;
346 }
347 data->flags = 0;
348 data->index = -1;
349 data->task = NULL;
350 data->vma = NULL;
351 data->pid = 0;
352 data->master_file = NULL;
353#if PMEM_DEBUG
354 data->ref = 0;
355#endif
356 INIT_LIST_HEAD(&data->region_list);
357 init_rwsem(&data->sem);
358
359 file->private_data = data;
360 INIT_LIST_HEAD(&data->list);
361
362 mutex_lock(&pmem[id].data_list_lock);
363 list_add(&data->list, &pmem[id].data_list);
364 mutex_unlock(&pmem[id].data_list_lock);
365 return ret;
366}
367
368static unsigned long pmem_order(unsigned long len)
369{
370 int i;
371
372 len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
373 len--;
374 for (i = 0; i < sizeof(len)*8; i++)
375 if (len >> i == 0)
376 break;
377 return i;
378}
379
380static int pmem_allocate(int id, unsigned long len)
381{
382 /* caller should hold the write lock on pmem_sem! */
383 /* return the corresponding pdata[] entry */
384 int curr = 0;
385 int end = pmem[id].num_entries;
386 int best_fit = -1;
387 unsigned long order = pmem_order(len);
388
389 if (pmem[id].no_allocator) {
390 DLOG("no allocator");
391 if ((len > pmem[id].size) || pmem[id].allocated)
392 return -1;
393 pmem[id].allocated = 1;
394 return len;
395 }
396
397 if (order > PMEM_MAX_ORDER)
398 return -1;
399 DLOG("order %lx\n", order);
400
401 /* look through the bitmap:
402 * if you find a free slot of the correct order use it
403 * otherwise, use the best fit (smallest with size > order) slot
404 */
405 while (curr < end) {
406 if (PMEM_IS_FREE(id, curr)) {
407 if (PMEM_ORDER(id, curr) == (unsigned char)order) {
408 /* set the not free bit and clear others */
409 best_fit = curr;
410 break;
411 }
412 if (PMEM_ORDER(id, curr) > (unsigned char)order &&
413 (best_fit < 0 ||
414 PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
415 best_fit = curr;
416 }
417 curr = PMEM_NEXT_INDEX(id, curr);
418 }
419
420 /* if best_fit < 0, there are no suitable slots,
421 * return an error
422 */
423 if (best_fit < 0) {
424 printk("pmem: no space left to allocate!\n");
425 return -1;
426 }
427
428 /* now partition the best fit:
429 * split the slot into 2 buddies of order - 1
430 * repeat until the slot is of the correct order
431 */
432 while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
433 int buddy;
434 PMEM_ORDER(id, best_fit) -= 1;
435 buddy = PMEM_BUDDY_INDEX(id, best_fit);
436 PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
437 }
438 pmem[id].bitmap[best_fit].allocated = 1;
439 return best_fit;
440}
441
442static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
443{
444 int id = get_id(file);
445#ifdef pgprot_noncached
446 if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
447 return pgprot_noncached(vma_prot);
448#endif
449#ifdef pgprot_ext_buffered
450 else if (pmem[id].buffered)
451 return pgprot_ext_buffered(vma_prot);
452#endif
453 return vma_prot;
454}
455
456static unsigned long pmem_start_addr(int id, struct pmem_data *data)
457{
458 if (pmem[id].no_allocator)
459 return PMEM_START_ADDR(id, 0);
460 else
461 return PMEM_START_ADDR(id, data->index);
462
463}
464
465static void *pmem_start_vaddr(int id, struct pmem_data *data)
466{
467 return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
468}
469
470static unsigned long pmem_len(int id, struct pmem_data *data)
471{
472 if (pmem[id].no_allocator)
473 return data->index;
474 else
475 return PMEM_LEN(id, data->index);
476}
477
478static int pmem_map_garbage(int id, struct vm_area_struct *vma,
479 struct pmem_data *data, unsigned long offset,
480 unsigned long len)
481{
482 int i, garbage_pages = len >> PAGE_SHIFT;
483
484 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
485 for (i = 0; i < garbage_pages; i++) {
486 if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
487 pmem[id].garbage_pfn))
488 return -EAGAIN;
489 }
490 return 0;
491}
492
493static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
494 struct pmem_data *data, unsigned long offset,
495 unsigned long len)
496{
497 int garbage_pages;
498 DLOG("unmap offset %lx len %lx\n", offset, len);
499
500 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
501
502 garbage_pages = len >> PAGE_SHIFT;
503 zap_page_range(vma, vma->vm_start + offset, len, NULL);
504 pmem_map_garbage(id, vma, data, offset, len);
505 return 0;
506}
507
508static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
509 struct pmem_data *data, unsigned long offset,
510 unsigned long len)
511{
512 DLOG("map offset %lx len %lx\n", offset, len);
513 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
514 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
515 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
516 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
517
518 if (io_remap_pfn_range(vma, vma->vm_start + offset,
519 (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
520 len, vma->vm_page_prot)) {
521 return -EAGAIN;
522 }
523 return 0;
524}
525
526static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
527 struct pmem_data *data, unsigned long offset,
528 unsigned long len)
529{
530 /* hold the mm semp for the vma you are modifying when you call this */
531 BUG_ON(!vma);
532 zap_page_range(vma, vma->vm_start + offset, len, NULL);
533 return pmem_map_pfn_range(id, vma, data, offset, len);
534}
535
536static void pmem_vma_open(struct vm_area_struct *vma)
537{
538 struct file *file = vma->vm_file;
539 struct pmem_data *data = file->private_data;
540 int id = get_id(file);
541 /* this should never be called as we don't support copying pmem
542 * ranges via fork */
543 BUG_ON(!has_allocation(file));
544 down_write(&data->sem);
545 /* remap the garbage pages, forkers don't get access to the data */
546 pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
547 up_write(&data->sem);
548}
549
550static void pmem_vma_close(struct vm_area_struct *vma)
551{
552 struct file *file = vma->vm_file;
553 struct pmem_data *data = file->private_data;
554
555 DLOG("current %u ppid %u file %p count %d\n", current->pid,
556 current->parent->pid, file, file_count(file));
557 if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
558 printk(KERN_WARNING "pmem: something is very wrong, you are "
559 "closing a vm backing an allocation that doesn't "
560 "exist!\n");
561 return;
562 }
563 down_write(&data->sem);
564 if (data->vma == vma) {
565 data->vma = NULL;
566 if ((data->flags & PMEM_FLAGS_CONNECTED) &&
567 (data->flags & PMEM_FLAGS_SUBMAP))
568 data->flags |= PMEM_FLAGS_UNSUBMAP;
569 }
570 /* the kernel is going to free this vma now anyway */
571 up_write(&data->sem);
572}
573
574static struct vm_operations_struct vm_ops = {
575 .open = pmem_vma_open,
576 .close = pmem_vma_close,
577};
578
579static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
580{
581 struct pmem_data *data;
582 int index;
583 unsigned long vma_size = vma->vm_end - vma->vm_start;
584 int ret = 0, id = get_id(file);
585
586 if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
587#if PMEM_DEBUG
588 printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
589 " and a multiple of pages_size.\n");
590#endif
591 return -EINVAL;
592 }
593
594 data = (struct pmem_data *)file->private_data;
595 down_write(&data->sem);
596 /* check this file isn't already mmaped, for submaps check this file
597 * has never been mmaped */
598 if ((data->flags & PMEM_FLAGS_SUBMAP) ||
599 (data->flags & PMEM_FLAGS_UNSUBMAP)) {
600#if PMEM_DEBUG
601 printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
602 "this file is already mmaped. %x\n", data->flags);
603#endif
604 ret = -EINVAL;
605 goto error;
606 }
607 /* if file->private_data == unalloced, alloc*/
608 if (data && data->index == -1) {
609 down_write(&pmem[id].bitmap_sem);
610 index = pmem_allocate(id, vma->vm_end - vma->vm_start);
611 up_write(&pmem[id].bitmap_sem);
612 data->index = index;
613 }
614 /* either no space was available or an error occured */
615 if (!has_allocation(file)) {
616 ret = -EINVAL;
617 printk("pmem: could not find allocation for map.\n");
618 goto error;
619 }
620
621 if (pmem_len(id, data) < vma_size) {
622#if PMEM_DEBUG
623 printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
624 "size of backing region [%lu].\n", vma_size,
625 pmem_len(id, data));
626#endif
627 ret = -EINVAL;
628 goto error;
629 }
630
631 vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
632 vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
633
634 if (data->flags & PMEM_FLAGS_CONNECTED) {
635 struct pmem_region_node *region_node;
636 struct list_head *elt;
637 if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
638 printk("pmem: mmap failed in kernel!\n");
639 ret = -EAGAIN;
640 goto error;
641 }
642 list_for_each(elt, &data->region_list) {
643 region_node = list_entry(elt, struct pmem_region_node,
644 list);
645 DLOG("remapping file: %p %lx %lx\n", file,
646 region_node->region.offset,
647 region_node->region.len);
648 if (pmem_remap_pfn_range(id, vma, data,
649 region_node->region.offset,
650 region_node->region.len)) {
651 ret = -EAGAIN;
652 goto error;
653 }
654 }
655 data->flags |= PMEM_FLAGS_SUBMAP;
656 get_task_struct(current->group_leader);
657 data->task = current->group_leader;
658 data->vma = vma;
659#if PMEM_DEBUG
660 data->pid = current->pid;
661#endif
662 DLOG("submmapped file %p vma %p pid %u\n", file, vma,
663 current->pid);
664 } else {
665 if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
666 printk(KERN_INFO "pmem: mmap failed in kernel!\n");
667 ret = -EAGAIN;
668 goto error;
669 }
670 data->flags |= PMEM_FLAGS_MASTERMAP;
671 data->pid = current->pid;
672 }
673 vma->vm_ops = &vm_ops;
674error:
675 up_write(&data->sem);
676 return ret;
677}
678
679/* the following are the api for accessing pmem regions by other drivers
680 * from inside the kernel */
681int get_pmem_user_addr(struct file *file, unsigned long *start,
682 unsigned long *len)
683{
684 struct pmem_data *data;
685 if (!is_pmem_file(file) || !has_allocation(file)) {
686#if PMEM_DEBUG
687 printk(KERN_INFO "pmem: requested pmem data from invalid"
688 "file.\n");
689#endif
690 return -1;
691 }
692 data = (struct pmem_data *)file->private_data;
693 down_read(&data->sem);
694 if (data->vma) {
695 *start = data->vma->vm_start;
696 *len = data->vma->vm_end - data->vma->vm_start;
697 } else {
698 *start = 0;
699 *len = 0;
700 }
701 up_read(&data->sem);
702 return 0;
703}
704
705int get_pmem_addr(struct file *file, unsigned long *start,
706 unsigned long *vstart, unsigned long *len)
707{
708 struct pmem_data *data;
709 int id;
710
711 if (!is_pmem_file(file) || !has_allocation(file)) {
712 return -1;
713 }
714
715 data = (struct pmem_data *)file->private_data;
716 if (data->index == -1) {
717#if PMEM_DEBUG
718 printk(KERN_INFO "pmem: requested pmem data from file with no "
719 "allocation.\n");
720 return -1;
721#endif
722 }
723 id = get_id(file);
724
725 down_read(&data->sem);
726 *start = pmem_start_addr(id, data);
727 *len = pmem_len(id, data);
728 *vstart = (unsigned long)pmem_start_vaddr(id, data);
729 up_read(&data->sem);
730#if PMEM_DEBUG
731 down_write(&data->sem);
732 data->ref++;
733 up_write(&data->sem);
734#endif
735 return 0;
736}
737
738int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
739 unsigned long *len, struct file **filp)
740{
741 struct file *file;
742
743 file = fget(fd);
744 if (unlikely(file == NULL)) {
745 printk(KERN_INFO "pmem: requested data from file descriptor "
746 "that doesn't exist.");
747 return -1;
748 }
749
750 if (get_pmem_addr(file, start, vstart, len))
751 goto end;
752
753 if (filp)
754 *filp = file;
755 return 0;
756end:
757 fput(file);
758 return -1;
759}
760
761void put_pmem_file(struct file *file)
762{
763 struct pmem_data *data;
764 int id;
765
766 if (!is_pmem_file(file))
767 return;
768 id = get_id(file);
769 data = (struct pmem_data *)file->private_data;
770#if PMEM_DEBUG
771 down_write(&data->sem);
772 if (data->ref == 0) {
773 printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
774 pmem[id].dev.name, data->pid);
775 BUG();
776 }
777 data->ref--;
778 up_write(&data->sem);
779#endif
780 fput(file);
781}
782
783void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
784{
785 struct pmem_data *data;
786 int id;
787 void *vaddr;
788 struct pmem_region_node *region_node;
789 struct list_head *elt;
790 void *flush_start, *flush_end;
791
792 if (!is_pmem_file(file) || !has_allocation(file)) {
793 return;
794 }
795
796 id = get_id(file);
797 data = (struct pmem_data *)file->private_data;
798 if (!pmem[id].cached || file->f_flags & O_SYNC)
799 return;
800
801 down_read(&data->sem);
802 vaddr = pmem_start_vaddr(id, data);
803 /* if this isn't a submmapped file, flush the whole thing */
804 if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
805 dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
806 goto end;
807 }
808 /* otherwise, flush the region of the file we are drawing */
809 list_for_each(elt, &data->region_list) {
810 region_node = list_entry(elt, struct pmem_region_node, list);
811 if ((offset >= region_node->region.offset) &&
812 ((offset + len) <= (region_node->region.offset +
813 region_node->region.len))) {
814 flush_start = vaddr + region_node->region.offset;
815 flush_end = flush_start + region_node->region.len;
816 dmac_flush_range(flush_start, flush_end);
817 break;
818 }
819 }
820end:
821 up_read(&data->sem);
822}
823
824static int pmem_connect(unsigned long connect, struct file *file)
825{
826 struct pmem_data *data = (struct pmem_data *)file->private_data;
827 struct pmem_data *src_data;
828 struct file *src_file;
829 int ret = 0, put_needed;
830
831 down_write(&data->sem);
832 /* retrieve the src file and check it is a pmem file with an alloc */
833 src_file = fget_light(connect, &put_needed);
834 DLOG("connect %p to %p\n", file, src_file);
835 if (!src_file) {
836 printk("pmem: src file not found!\n");
837 ret = -EINVAL;
838 goto err_no_file;
839 }
840 if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
841 printk(KERN_INFO "pmem: src file is not a pmem file or has no "
842 "alloc!\n");
843 ret = -EINVAL;
844 goto err_bad_file;
845 }
846 src_data = (struct pmem_data *)src_file->private_data;
847
848 if (has_allocation(file) && (data->index != src_data->index)) {
849 printk("pmem: file is already mapped but doesn't match this"
850 " src_file!\n");
851 ret = -EINVAL;
852 goto err_bad_file;
853 }
854 data->index = src_data->index;
855 data->flags |= PMEM_FLAGS_CONNECTED;
856 data->master_fd = connect;
857 data->master_file = src_file;
858
859err_bad_file:
860 fput_light(src_file, put_needed);
861err_no_file:
862 up_write(&data->sem);
863 return ret;
864}
865
866static void pmem_unlock_data_and_mm(struct pmem_data *data,
867 struct mm_struct *mm)
868{
869 up_write(&data->sem);
870 if (mm != NULL) {
871 up_write(&mm->mmap_sem);
872 mmput(mm);
873 }
874}
875
876static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
877 struct mm_struct **locked_mm)
878{
879 int ret = 0;
880 struct mm_struct *mm = NULL;
881 *locked_mm = NULL;
882lock_mm:
883 down_read(&data->sem);
884 if (PMEM_IS_SUBMAP(data)) {
885 mm = get_task_mm(data->task);
886 if (!mm) {
887#if PMEM_DEBUG
888 printk("pmem: can't remap task is gone!\n");
889#endif
890 up_read(&data->sem);
891 return -1;
892 }
893 }
894 up_read(&data->sem);
895
896 if (mm)
897 down_write(&mm->mmap_sem);
898
899 down_write(&data->sem);
900 /* check that the file didn't get mmaped before we could take the
901 * data sem, this should be safe b/c you can only submap each file
902 * once */
903 if (PMEM_IS_SUBMAP(data) && !mm) {
904 pmem_unlock_data_and_mm(data, mm);
905 up_write(&data->sem);
906 goto lock_mm;
907 }
908 /* now check that vma.mm is still there, it could have been
909 * deleted by vma_close before we could get the data->sem */
910 if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
911 /* might as well release this */
912 if (data->flags & PMEM_FLAGS_SUBMAP) {
913 put_task_struct(data->task);
914 data->task = NULL;
915 /* lower the submap flag to show the mm is gone */
916 data->flags &= ~(PMEM_FLAGS_SUBMAP);
917 }
918 pmem_unlock_data_and_mm(data, mm);
919 return -1;
920 }
921 *locked_mm = mm;
922 return ret;
923}
924
925int pmem_remap(struct pmem_region *region, struct file *file,
926 unsigned operation)
927{
928 int ret;
929 struct pmem_region_node *region_node;
930 struct mm_struct *mm = NULL;
931 struct list_head *elt, *elt2;
932 int id = get_id(file);
933 struct pmem_data *data = (struct pmem_data *)file->private_data;
934
935 /* pmem region must be aligned on a page boundry */
936 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
937 !PMEM_IS_PAGE_ALIGNED(region->len))) {
938#if PMEM_DEBUG
939 printk("pmem: request for unaligned pmem suballocation "
940 "%lx %lx\n", region->offset, region->len);
941#endif
942 return -EINVAL;
943 }
944
945 /* if userspace requests a region of len 0, there's nothing to do */
946 if (region->len == 0)
947 return 0;
948
949 /* lock the mm and data */
950 ret = pmem_lock_data_and_mm(file, data, &mm);
951 if (ret)
952 return 0;
953
954 /* only the owner of the master file can remap the client fds
955 * that back in it */
956 if (!is_master_owner(file)) {
957#if PMEM_DEBUG
958 printk("pmem: remap requested from non-master process\n");
959#endif
960 ret = -EINVAL;
961 goto err;
962 }
963
964 /* check that the requested range is within the src allocation */
965 if (unlikely((region->offset > pmem_len(id, data)) ||
966 (region->len > pmem_len(id, data)) ||
967 (region->offset + region->len > pmem_len(id, data)))) {
968#if PMEM_DEBUG
969 printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
970#endif
971 ret = -EINVAL;
972 goto err;
973 }
974
975 if (operation == PMEM_MAP) {
976 region_node = kmalloc(sizeof(struct pmem_region_node),
977 GFP_KERNEL);
978 if (!region_node) {
979 ret = -ENOMEM;
980#if PMEM_DEBUG
981 printk(KERN_INFO "No space to allocate metadata!");
982#endif
983 goto err;
984 }
985 region_node->region = *region;
986 list_add(&region_node->list, &data->region_list);
987 } else if (operation == PMEM_UNMAP) {
988 int found = 0;
989 list_for_each_safe(elt, elt2, &data->region_list) {
990 region_node = list_entry(elt, struct pmem_region_node,
991 list);
992 if (region->len == 0 ||
993 (region_node->region.offset == region->offset &&
994 region_node->region.len == region->len)) {
995 list_del(elt);
996 kfree(region_node);
997 found = 1;
998 }
999 }
1000 if (!found) {
1001#if PMEM_DEBUG
1002 printk("pmem: Unmap region does not map any mapped "
1003 "region!");
1004#endif
1005 ret = -EINVAL;
1006 goto err;
1007 }
1008 }
1009
1010 if (data->vma && PMEM_IS_SUBMAP(data)) {
1011 if (operation == PMEM_MAP)
1012 ret = pmem_remap_pfn_range(id, data->vma, data,
1013 region->offset, region->len);
1014 else if (operation == PMEM_UNMAP)
1015 ret = pmem_unmap_pfn_range(id, data->vma, data,
1016 region->offset, region->len);
1017 }
1018
1019err:
1020 pmem_unlock_data_and_mm(data, mm);
1021 return ret;
1022}
1023
1024static void pmem_revoke(struct file *file, struct pmem_data *data)
1025{
1026 struct pmem_region_node *region_node;
1027 struct list_head *elt, *elt2;
1028 struct mm_struct *mm = NULL;
1029 int id = get_id(file);
1030 int ret = 0;
1031
1032 data->master_file = NULL;
1033 ret = pmem_lock_data_and_mm(file, data, &mm);
1034 /* if lock_data_and_mm fails either the task that mapped the fd, or
1035 * the vma that mapped it have already gone away, nothing more
1036 * needs to be done */
1037 if (ret)
1038 return;
1039 /* unmap everything */
1040 /* delete the regions and region list nothing is mapped any more */
1041 if (data->vma)
1042 list_for_each_safe(elt, elt2, &data->region_list) {
1043 region_node = list_entry(elt, struct pmem_region_node,
1044 list);
1045 pmem_unmap_pfn_range(id, data->vma, data,
1046 region_node->region.offset,
1047 region_node->region.len);
1048 list_del(elt);
1049 kfree(region_node);
1050 }
1051 /* delete the master file */
1052 pmem_unlock_data_and_mm(data, mm);
1053}
1054
1055static void pmem_get_size(struct pmem_region *region, struct file *file)
1056{
1057 struct pmem_data *data = (struct pmem_data *)file->private_data;
1058 int id = get_id(file);
1059
1060 if (!has_allocation(file)) {
1061 region->offset = 0;
1062 region->len = 0;
1063 return;
1064 } else {
1065 region->offset = pmem_start_addr(id, data);
1066 region->len = pmem_len(id, data);
1067 }
1068 DLOG("offset %lx len %lx\n", region->offset, region->len);
1069}
1070
1071
1072static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1073{
1074 struct pmem_data *data;
1075 int id = get_id(file);
1076
1077 switch (cmd) {
1078 case PMEM_GET_PHYS:
1079 {
1080 struct pmem_region region;
1081 DLOG("get_phys\n");
1082 if (!has_allocation(file)) {
1083 region.offset = 0;
1084 region.len = 0;
1085 } else {
1086 data = (struct pmem_data *)file->private_data;
1087 region.offset = pmem_start_addr(id, data);
1088 region.len = pmem_len(id, data);
1089 }
1090 printk(KERN_INFO "pmem: request for physical address of pmem region "
1091 "from process %d.\n", current->pid);
1092 if (copy_to_user((void __user *)arg, &region,
1093 sizeof(struct pmem_region)))
1094 return -EFAULT;
1095 break;
1096 }
1097 case PMEM_MAP:
1098 {
1099 struct pmem_region region;
1100 if (copy_from_user(&region, (void __user *)arg,
1101 sizeof(struct pmem_region)))
1102 return -EFAULT;
1103 data = (struct pmem_data *)file->private_data;
1104 return pmem_remap(&region, file, PMEM_MAP);
1105 }
1106 break;
1107 case PMEM_UNMAP:
1108 {
1109 struct pmem_region region;
1110 if (copy_from_user(&region, (void __user *)arg,
1111 sizeof(struct pmem_region)))
1112 return -EFAULT;
1113 data = (struct pmem_data *)file->private_data;
1114 return pmem_remap(&region, file, PMEM_UNMAP);
1115 break;
1116 }
1117 case PMEM_GET_SIZE:
1118 {
1119 struct pmem_region region;
1120 DLOG("get_size\n");
1121 pmem_get_size(&region, file);
1122 if (copy_to_user((void __user *)arg, &region,
1123 sizeof(struct pmem_region)))
1124 return -EFAULT;
1125 break;
1126 }
1127 case PMEM_GET_TOTAL_SIZE:
1128 {
1129 struct pmem_region region;
1130 DLOG("get total size\n");
1131 region.offset = 0;
1132 get_id(file);
1133 region.len = pmem[id].size;
1134 if (copy_to_user((void __user *)arg, &region,
1135 sizeof(struct pmem_region)))
1136 return -EFAULT;
1137 break;
1138 }
1139 case PMEM_ALLOCATE:
1140 {
1141 if (has_allocation(file))
1142 return -EINVAL;
1143 data = (struct pmem_data *)file->private_data;
1144 data->index = pmem_allocate(id, arg);
1145 break;
1146 }
1147 case PMEM_CONNECT:
1148 DLOG("connect\n");
1149 return pmem_connect(arg, file);
1150 break;
1151 case PMEM_CACHE_FLUSH:
1152 {
1153 struct pmem_region region;
1154 DLOG("flush\n");
1155 if (copy_from_user(&region, (void __user *)arg,
1156 sizeof(struct pmem_region)))
1157 return -EFAULT;
1158 flush_pmem_file(file, region.offset, region.len);
1159 break;
1160 }
1161 default:
1162 if (pmem[id].ioctl)
1163 return pmem[id].ioctl(file, cmd, arg);
1164 return -EINVAL;
1165 }
1166 return 0;
1167}
1168
1169#if PMEM_DEBUG
1170static ssize_t debug_open(struct inode *inode, struct file *file)
1171{
1172 file->private_data = inode->i_private;
1173 return 0;
1174}
1175
1176static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
1177 loff_t *ppos)
1178{
1179 struct list_head *elt, *elt2;
1180 struct pmem_data *data;
1181 struct pmem_region_node *region_node;
1182 int id = (int)file->private_data;
1183 const int debug_bufmax = 4096;
1184 static char buffer[4096];
1185 int n = 0;
1186
1187 DLOG("debug open\n");
1188 n = scnprintf(buffer, debug_bufmax,
1189 "pid #: mapped regions (offset, len) (offset,len)...\n");
1190
1191 mutex_lock(&pmem[id].data_list_lock);
1192 list_for_each(elt, &pmem[id].data_list) {
1193 data = list_entry(elt, struct pmem_data, list);
1194 down_read(&data->sem);
1195 n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
1196 data->pid);
1197 list_for_each(elt2, &data->region_list) {
1198 region_node = list_entry(elt2, struct pmem_region_node,
1199 list);
1200 n += scnprintf(buffer + n, debug_bufmax - n,
1201 "(%lx,%lx) ",
1202 region_node->region.offset,
1203 region_node->region.len);
1204 }
1205 n += scnprintf(buffer + n, debug_bufmax - n, "\n");
1206 up_read(&data->sem);
1207 }
1208 mutex_unlock(&pmem[id].data_list_lock);
1209
1210 n++;
1211 buffer[n] = 0;
1212 return simple_read_from_buffer(buf, count, ppos, buffer, n);
1213}
1214
1215static struct file_operations debug_fops = {
1216 .read = debug_read,
1217 .open = debug_open,
1218};
1219#endif
1220
1221#if 0
1222static struct miscdevice pmem_dev = {
1223 .name = "pmem",
1224 .fops = &pmem_fops,
1225};
1226#endif
1227
1228int pmem_setup(struct android_pmem_platform_data *pdata,
1229 long (*ioctl)(struct file *, unsigned int, unsigned long),
1230 int (*release)(struct inode *, struct file *))
1231{
1232 int err = 0;
1233 int i, index = 0;
1234 int id = id_count;
1235 id_count++;
1236
1237 pmem[id].no_allocator = pdata->no_allocator;
1238 pmem[id].cached = pdata->cached;
1239 pmem[id].buffered = pdata->buffered;
1240 pmem[id].base = pdata->start;
1241 pmem[id].size = pdata->size;
1242 pmem[id].ioctl = ioctl;
1243 pmem[id].release = release;
1244 init_rwsem(&pmem[id].bitmap_sem);
1245 mutex_init(&pmem[id].data_list_lock);
1246 INIT_LIST_HEAD(&pmem[id].data_list);
1247 pmem[id].dev.name = pdata->name;
1248 pmem[id].dev.minor = id;
1249 pmem[id].dev.fops = &pmem_fops;
1250 printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
1251
1252 err = misc_register(&pmem[id].dev);
1253 if (err) {
1254 printk(KERN_ALERT "Unable to register pmem driver!\n");
1255 goto err_cant_register_device;
1256 }
1257 pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
1258
1259 pmem[id].bitmap = kmalloc(pmem[id].num_entries *
1260 sizeof(struct pmem_bits), GFP_KERNEL);
1261 if (!pmem[id].bitmap)
1262 goto err_no_mem_for_metadata;
1263
1264 memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
1265 pmem[id].num_entries);
1266
1267 for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
1268 if ((pmem[id].num_entries) & 1<<i) {
1269 PMEM_ORDER(id, index) = i;
1270 index = PMEM_NEXT_INDEX(id, index);
1271 }
1272 }
1273
1274 if (pmem[id].cached)
1275 pmem[id].vbase = ioremap_cached(pmem[id].base,
1276 pmem[id].size);
1277#ifdef ioremap_ext_buffered
1278 else if (pmem[id].buffered)
1279 pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
1280 pmem[id].size);
1281#endif
1282 else
1283 pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
1284
1285 if (pmem[id].vbase == 0)
1286 goto error_cant_remap;
1287
1288 pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
1289 if (pmem[id].no_allocator)
1290 pmem[id].allocated = 0;
1291
1292#if PMEM_DEBUG
1293 debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
1294 &debug_fops);
1295#endif
1296 return 0;
1297error_cant_remap:
1298 kfree(pmem[id].bitmap);
1299err_no_mem_for_metadata:
1300 misc_deregister(&pmem[id].dev);
1301err_cant_register_device:
1302 return -1;
1303}
1304
1305static int pmem_probe(struct platform_device *pdev)
1306{
1307 struct android_pmem_platform_data *pdata;
1308
1309 if (!pdev || !pdev->dev.platform_data) {
1310 printk(KERN_ALERT "Unable to probe pmem!\n");
1311 return -1;
1312 }
1313 pdata = pdev->dev.platform_data;
1314 return pmem_setup(pdata, NULL, NULL);
1315}
1316
1317
1318static int pmem_remove(struct platform_device *pdev)
1319{
1320 int id = pdev->id;
1321 __free_page(pfn_to_page(pmem[id].garbage_pfn));
1322 misc_deregister(&pmem[id].dev);
1323 return 0;
1324}
1325
1326static struct platform_driver pmem_driver = {
1327 .probe = pmem_probe,
1328 .remove = pmem_remove,
1329 .driver = { .name = "android_pmem" }
1330};
1331
1332
1333static int __init pmem_init(void)
1334{
1335 return platform_driver_register(&pmem_driver);
1336}
1337
1338static void __exit pmem_exit(void)
1339{
1340 platform_driver_unregister(&pmem_driver);
1341}
1342
1343module_init(pmem_init);
1344module_exit(pmem_exit);
1345
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index e77e4e0396cf..1df9586f2730 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -355,7 +355,14 @@ static void send_data(struct asus_oled_dev *odev)
355 355
356static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count) 356static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
357{ 357{
358 while (count-- > 0 && val) { 358 odev->last_val = val;
359
360 if (val == 0) {
361 odev->buf_offs += count;
362 return 0;
363 }
364
365 while (count-- > 0) {
359 size_t x = odev->buf_offs % odev->width; 366 size_t x = odev->buf_offs % odev->width;
360 size_t y = odev->buf_offs / odev->width; 367 size_t y = odev->buf_offs / odev->width;
361 size_t i; 368 size_t i;
@@ -406,7 +413,6 @@ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
406 ; 413 ;
407 } 414 }
408 415
409 odev->last_val = val;
410 odev->buf_offs++; 416 odev->buf_offs++;
411 } 417 }
412 418
@@ -805,10 +811,9 @@ error:
805 811
806static void __exit asus_oled_exit(void) 812static void __exit asus_oled_exit(void)
807{ 813{
814 usb_deregister(&oled_driver);
808 class_remove_file(oled_class, &class_attr_version.attr); 815 class_remove_file(oled_class, &class_attr_version.attr);
809 class_destroy(oled_class); 816 class_destroy(oled_class);
810
811 usb_deregister(&oled_driver);
812} 817}
813 818
814module_init(asus_oled_init); 819module_init(asus_oled_init);
diff --git a/drivers/staging/gma500/Kconfig b/drivers/staging/gma500/Kconfig
deleted file mode 100644
index c7a2b3bc0a18..000000000000
--- a/drivers/staging/gma500/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
1config DRM_PSB
2 tristate "Intel GMA5/600 KMS Framebuffer"
3 depends on DRM && PCI && X86 && BROKEN
4 select FB_CFB_COPYAREA
5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 Say yes for an experimental 2D KMS framebuffer driver for the
11 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
12 devices.
13
14config DRM_PSB_MRST
15 bool "Intel GMA600 support (Experimental)"
16 depends on DRM_PSB
17 help
18 Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
19 platforms with LVDS ports. HDMI and MIPI are not currently
20 supported.
21
22config DRM_PSB_MFLD
23 bool "Intel Medfield support (Experimental)"
24 depends on DRM_PSB
25 help
26 Say yes to include support for Intel Medfield platforms with MIPI
27 interfaces.
28
29config DRM_PSB_CDV
30 bool "Intel Cedarview support (Experimental)"
31 depends on DRM_PSB
32 help
33 Say yes to include support for Intel Cedarview platforms
diff --git a/drivers/staging/gma500/Makefile b/drivers/staging/gma500/Makefile
deleted file mode 100644
index c729868b1b10..000000000000
--- a/drivers/staging/gma500/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
1#
2# KMS driver for the GMA500
3#
4ccflags-y += -Iinclude/drm
5
6psb_gfx-y += gem_glue.o \
7 accel_2d.o \
8 backlight.o \
9 framebuffer.o \
10 gem.o \
11 gtt.o \
12 intel_bios.o \
13 intel_i2c.o \
14 intel_opregion.o \
15 mmu.o \
16 power.o \
17 psb_drv.o \
18 psb_intel_display.o \
19 psb_intel_lvds.o \
20 psb_intel_modes.o \
21 psb_intel_sdvo.o \
22 psb_lid.o \
23 psb_irq.o \
24 psb_device.o \
25 mid_bios.o
26
27psb_gfx-$(CONFIG_DRM_PSB_CDV) += cdv_device.o \
28 cdv_intel_crt.o \
29 cdv_intel_display.o \
30 cdv_intel_hdmi.o \
31 cdv_intel_lvds.o
32
33psb_gfx-$(CONFIG_DRM_PSB_MRST) += mrst_device.o \
34 mrst_crtc.o \
35 mrst_lvds.o \
36 mrst_hdmi.o \
37 mrst_hdmi_i2c.o
38
39psb_gfx-$(CONFIG_DRM_PSB_MFLD) += mdfld_device.o \
40 mdfld_output.o \
41 mdfld_pyr_cmd.o \
42 mdfld_tmd_vid.o \
43 mdfld_tpo_cmd.o \
44 mdfld_tpo_vid.o \
45 mdfld_dsi_pkg_sender.o \
46 mdfld_dsi_dpi.o \
47 mdfld_dsi_output.o \
48 mdfld_dsi_dbi.o \
49 mdfld_dsi_dbi_dpu.o \
50 mdfld_intel_display.o
51
52obj-$(CONFIG_DRM_PSB) += psb_gfx.o
diff --git a/drivers/staging/gma500/TODO b/drivers/staging/gma500/TODO
deleted file mode 100644
index fc836158e74c..000000000000
--- a/drivers/staging/gma500/TODO
+++ /dev/null
@@ -1,15 +0,0 @@
1- Sort out the power management side. Not important for Poulsbo but
2 matters for Moorestown/Medfield
3- Debug Oaktrail/Moorestown support (single pipe, no BIOS on mrst,
4 some other differences)
5- Add 2D acceleration via console and DRM
6- Add scrolling acceleration using the GTT to do remapping on the main
7 framebuffer.
8- HDMI testing
9- Oaktrail HDMI and other features
10- Oaktrail MIPI
11- Medfield needs a lot of further love
12
13As per kernel policy and the in the interest of the safety of various
14kittens there is no support or plans to add hooks for the closed user space
15stuff.
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
deleted file mode 100644
index b8f78ebbb145..000000000000
--- a/drivers/staging/gma500/accel_2d.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/mm.h>
28#include <linux/tty.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/fb.h>
32#include <linux/init.h>
33#include <linux/console.h>
34
35#include <drm/drmP.h>
36#include <drm/drm.h>
37#include <drm/drm_crtc.h>
38
39#include "psb_drv.h"
40#include "psb_reg.h"
41#include "framebuffer.h"
42
43/**
44 * psb_spank - reset the 2D engine
45 * @dev_priv: our PSB DRM device
46 *
47 * Soft reset the graphics engine and then reload the necessary registers.
48 * We use this at initialisation time but it will become relevant for
49 * accelerated X later
50 */
51void psb_spank(struct drm_psb_private *dev_priv)
52{
53 PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
54 _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
55 _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
56 _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
57 PSB_RSGX32(PSB_CR_SOFT_RESET);
58
59 msleep(1);
60
61 PSB_WSGX32(0, PSB_CR_SOFT_RESET);
62 wmb();
63 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
64 PSB_CR_BIF_CTRL);
65 wmb();
66 (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
67
68 msleep(1);
69 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
70 PSB_CR_BIF_CTRL);
71 (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
72 PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
73}
74
75/**
76 * psb2_2d_wait_available - wait for FIFO room
77 * @dev_priv: our DRM device
78 * @size: size (in dwords) of the command we want to issue
79 *
80 * Wait until there is room to load the FIFO with our data. If the
81 * device is not responding then reset it
82 */
83static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
84 unsigned size)
85{
86 uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
87 unsigned long t = jiffies + HZ;
88
89 while (avail < size) {
90 avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
91 if (time_after(jiffies, t)) {
92 psb_spank(dev_priv);
93 return -EIO;
94 }
95 }
96 return 0;
97}
98
99/**
100 * psb_2d_submit - submit a 2D command
101 * @dev_priv: our DRM device
102 * @cmdbuf: command to issue
103 * @size: length (in dwords)
104 *
105 * Issue one or more 2D commands to the accelerator. This needs to be
106 * serialized later when we add the GEM interfaces for acceleration
107 */
108static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
109 unsigned size)
110{
111 int ret = 0;
112 int i;
113 unsigned submit_size;
114 unsigned long flags;
115
116 spin_lock_irqsave(&dev_priv->lock_2d, flags);
117 while (size > 0) {
118 submit_size = (size < 0x60) ? size : 0x60;
119 size -= submit_size;
120 ret = psb_2d_wait_available(dev_priv, submit_size);
121 if (ret)
122 break;
123
124 submit_size <<= 2;
125
126 for (i = 0; i < submit_size; i += 4)
127 PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
128
129 (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
130 }
131 spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
132 return ret;
133}
134
135
136/**
137 * psb_accel_2d_copy_direction - compute blit order
138 * @xdir: X direction of move
139 * @ydir: Y direction of move
140 *
141 * Compute the correct order setings to ensure that an overlapping blit
142 * correctly copies all the pixels.
143 */
144static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
145{
146 if (xdir < 0)
147 return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
148 PSB_2D_COPYORDER_TR2BL;
149 else
150 return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
151 PSB_2D_COPYORDER_TL2BR;
152}
153
154/**
155 * psb_accel_2d_copy - accelerated 2D copy
156 * @dev_priv: our DRM device
157 * @src_offset in bytes
158 * @src_stride in bytes
159 * @src_format psb 2D format defines
160 * @dst_offset in bytes
161 * @dst_stride in bytes
162 * @dst_format psb 2D format defines
163 * @src_x offset in pixels
164 * @src_y offset in pixels
165 * @dst_x offset in pixels
166 * @dst_y offset in pixels
167 * @size_x of the copied area
168 * @size_y of the copied area
169 *
170 * Format and issue a 2D accelerated copy command.
171 */
172static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
173 uint32_t src_offset, uint32_t src_stride,
174 uint32_t src_format, uint32_t dst_offset,
175 uint32_t dst_stride, uint32_t dst_format,
176 uint16_t src_x, uint16_t src_y,
177 uint16_t dst_x, uint16_t dst_y,
178 uint16_t size_x, uint16_t size_y)
179{
180 uint32_t blit_cmd;
181 uint32_t buffer[10];
182 uint32_t *buf;
183 uint32_t direction;
184
185 buf = buffer;
186
187 direction =
188 psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
189
190 if (direction == PSB_2D_COPYORDER_BR2TL ||
191 direction == PSB_2D_COPYORDER_TR2BL) {
192 src_x += size_x - 1;
193 dst_x += size_x - 1;
194 }
195 if (direction == PSB_2D_COPYORDER_BR2TL ||
196 direction == PSB_2D_COPYORDER_BL2TR) {
197 src_y += size_y - 1;
198 dst_y += size_y - 1;
199 }
200
201 blit_cmd =
202 PSB_2D_BLIT_BH |
203 PSB_2D_ROT_NONE |
204 PSB_2D_DSTCK_DISABLE |
205 PSB_2D_SRCCK_DISABLE |
206 PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
207
208 *buf++ = PSB_2D_FENCE_BH;
209 *buf++ =
210 PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
211 PSB_2D_DST_STRIDE_SHIFT);
212 *buf++ = dst_offset;
213 *buf++ =
214 PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
215 PSB_2D_SRC_STRIDE_SHIFT);
216 *buf++ = src_offset;
217 *buf++ =
218 PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
219 (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
220 *buf++ = blit_cmd;
221 *buf++ =
222 (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
223 PSB_2D_DST_YSTART_SHIFT);
224 *buf++ =
225 (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
226 PSB_2D_DST_YSIZE_SHIFT);
227 *buf++ = PSB_2D_FLUSH_BH;
228
229 return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
230}
231
232/**
233 * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
234 * @info: our framebuffer
235 * @a: copyarea parameters from the framebuffer core
236 *
237 * Perform a 2D copy via the accelerator
238 */
239static void psbfb_copyarea_accel(struct fb_info *info,
240 const struct fb_copyarea *a)
241{
242 struct psb_fbdev *fbdev = info->par;
243 struct psb_framebuffer *psbfb = &fbdev->pfb;
244 struct drm_device *dev = psbfb->base.dev;
245 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
246 struct drm_psb_private *dev_priv = dev->dev_private;
247 uint32_t offset;
248 uint32_t stride;
249 uint32_t src_format;
250 uint32_t dst_format;
251
252 if (!fb)
253 return;
254
255 offset = psbfb->gtt->offset;
256 stride = fb->pitches[0];
257
258 switch (fb->depth) {
259 case 8:
260 src_format = PSB_2D_SRC_332RGB;
261 dst_format = PSB_2D_DST_332RGB;
262 break;
263 case 15:
264 src_format = PSB_2D_SRC_555RGB;
265 dst_format = PSB_2D_DST_555RGB;
266 break;
267 case 16:
268 src_format = PSB_2D_SRC_565RGB;
269 dst_format = PSB_2D_DST_565RGB;
270 break;
271 case 24:
272 case 32:
273 /* this is wrong but since we don't do blending its okay */
274 src_format = PSB_2D_SRC_8888ARGB;
275 dst_format = PSB_2D_DST_8888ARGB;
276 break;
277 default:
278 /* software fallback */
279 cfb_copyarea(info, a);
280 return;
281 }
282
283 if (!gma_power_begin(dev, false)) {
284 cfb_copyarea(info, a);
285 return;
286 }
287 psb_accel_2d_copy(dev_priv,
288 offset, stride, src_format,
289 offset, stride, dst_format,
290 a->sx, a->sy, a->dx, a->dy, a->width, a->height);
291 gma_power_end(dev);
292}
293
294/**
295 * psbfb_copyarea - 2D copy interface
296 * @info: our framebuffer
297 * @region: region to copy
298 *
299 * Copy an area of the framebuffer console either by the accelerator
300 * or directly using the cfb helpers according to the request
301 */
302void psbfb_copyarea(struct fb_info *info,
303 const struct fb_copyarea *region)
304{
305 if (unlikely(info->state != FBINFO_STATE_RUNNING))
306 return;
307
308 /* Avoid the 8 pixel erratum */
309 if (region->width == 8 || region->height == 8 ||
310 (info->flags & FBINFO_HWACCEL_DISABLED))
311 return cfb_copyarea(info, region);
312
313 psbfb_copyarea_accel(info, region);
314}
315
316/**
317 * psbfb_sync - synchronize 2D
318 * @info: our framebuffer
319 *
320 * Wait for the 2D engine to quiesce so that we can do CPU
321 * access to the framebuffer again
322 */
323int psbfb_sync(struct fb_info *info)
324{
325 struct psb_fbdev *fbdev = info->par;
326 struct psb_framebuffer *psbfb = &fbdev->pfb;
327 struct drm_device *dev = psbfb->base.dev;
328 struct drm_psb_private *dev_priv = dev->dev_private;
329 unsigned long _end = jiffies + DRM_HZ;
330 int busy = 0;
331 unsigned long flags;
332
333 spin_lock_irqsave(&dev_priv->lock_2d, flags);
334 /*
335 * First idle the 2D engine.
336 */
337
338 if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
339 ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
340 goto out;
341
342 do {
343 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
344 cpu_relax();
345 } while (busy && !time_after_eq(jiffies, _end));
346
347 if (busy)
348 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
349 if (busy)
350 goto out;
351
352 do {
353 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
354 _PSB_C2B_STATUS_BUSY) != 0);
355 cpu_relax();
356 } while (busy && !time_after_eq(jiffies, _end));
357 if (busy)
358 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
359 _PSB_C2B_STATUS_BUSY) != 0);
360
361out:
362 spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
363 return (busy) ? -EBUSY : 0;
364}
365
366int psb_accel_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
367{
368 struct drm_psb_private *dev_priv = dev->dev_private;
369 struct drm_psb_2d_op *op = data;
370 u32 *op_ptr = &op->cmd[0];
371 int i;
372 struct drm_gem_object *obj;
373 struct gtt_range *gtt;
374 int err = -EINVAL;
375
376 if (!dev_priv->ops->accel_2d)
377 return -EOPNOTSUPP;
378 if (op->size > PSB_2D_OP_BUFLEN)
379 return -EINVAL;
380
381 /* The GEM object being used. We need to support separate src/dst/etc
382 in the end but for now keep them all the same */
383 obj = drm_gem_object_lookup(dev, file, op->src);
384 if (obj == NULL)
385 return -ENOENT;
386 gtt = container_of(obj, struct gtt_range, gem);
387
388 if (psb_gtt_pin(gtt) < 0)
389 goto bad_2;
390 for (i = 0; i < op->size; i++, op_ptr++) {
391 u32 r = *op_ptr & 0xF0000000;
392 /* Fill in the GTT offsets for the command buffer */
393 if (r == PSB_2D_SRC_SURF_BH ||
394 r == PSB_2D_DST_SURF_BH ||
395 r == PSB_2D_MASK_SURF_BH ||
396 r == PSB_2D_PAT_SURF_BH) {
397 i++;
398 op_ptr++;
399 if (i == op->size)
400 goto bad;
401 if (*op_ptr)
402 goto bad;
403 *op_ptr = gtt->offset;
404 continue;
405 }
406 }
407 psbfb_2d_submit(dev_priv, op->cmd, op->size);
408 err = 0;
409bad:
410 psb_gtt_unpin(gtt);
411bad_2:
412 drm_gem_object_unreference(obj);
413 return err;
414}
diff --git a/drivers/staging/gma500/backlight.c b/drivers/staging/gma500/backlight.c
deleted file mode 100644
index 20793951fcac..000000000000
--- a/drivers/staging/gma500/backlight.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * GMA500 Backlight Interface
3 *
4 * Copyright (c) 2009-2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Eric Knopp
20 *
21 */
22
23#include "psb_drv.h"
24#include "psb_intel_reg.h"
25#include "psb_intel_drv.h"
26#include "intel_bios.h"
27#include "power.h"
28
29int gma_backlight_init(struct drm_device *dev)
30{
31#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
32 struct drm_psb_private *dev_priv = dev->dev_private;
33 return dev_priv->ops->backlight_init(dev);
34#else
35 return 0;
36#endif
37}
38
39void gma_backlight_exit(struct drm_device *dev)
40{
41#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
42 struct drm_psb_private *dev_priv = dev->dev_private;
43 if (dev_priv->backlight_device) {
44 dev_priv->backlight_device->props.brightness = 0;
45 backlight_update_status(dev_priv->backlight_device);
46 backlight_device_unregister(dev_priv->backlight_device);
47 }
48#endif
49}
diff --git a/drivers/staging/gma500/cdv_device.c b/drivers/staging/gma500/cdv_device.c
deleted file mode 100644
index 8ec10caab13e..000000000000
--- a/drivers/staging/gma500/cdv_device.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "psb_drm.h"
24#include "psb_drv.h"
25#include "psb_reg.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28#include "cdv_device.h"
29
30#define VGA_SR_INDEX 0x3c4
31#define VGA_SR_DATA 0x3c5
32
33static void cdv_disable_vga(struct drm_device *dev)
34{
35 u8 sr1;
36 u32 vga_reg;
37
38 vga_reg = VGACNTRL;
39
40 outb(1, VGA_SR_INDEX);
41 sr1 = inb(VGA_SR_DATA);
42 outb(sr1 | 1<<5, VGA_SR_DATA);
43 udelay(300);
44
45 REG_WRITE(vga_reg, VGA_DISP_DISABLE);
46 REG_READ(vga_reg);
47}
48
49static int cdv_output_init(struct drm_device *dev)
50{
51 struct drm_psb_private *dev_priv = dev->dev_private;
52 cdv_disable_vga(dev);
53
54 cdv_intel_crt_init(dev, &dev_priv->mode_dev);
55 cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
56
57 /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
58 the HDMI interface */
59 if (REG_READ(SDVOB) & SDVO_DETECTED)
60 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
61 if (REG_READ(SDVOC) & SDVO_DETECTED)
62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
63 return 0;
64}
65
66#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
67
68/*
69 * Poulsbo Backlight Interfaces
70 */
71
72#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
73#define BLC_PWM_FREQ_CALC_CONSTANT 32
74#define MHz 1000000
75
76#define PSB_BLC_PWM_PRECISION_FACTOR 10
77#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
78#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
79
80#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
81#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
82
83static int cdv_brightness;
84static struct backlight_device *cdv_backlight_device;
85
86static int cdv_get_brightness(struct backlight_device *bd)
87{
88 /* return locally cached var instead of HW read (due to DPST etc.) */
89 /* FIXME: ideally return actual value in case firmware fiddled with
90 it */
91 return cdv_brightness;
92}
93
94
95static int cdv_backlight_setup(struct drm_device *dev)
96{
97 struct drm_psb_private *dev_priv = dev->dev_private;
98 unsigned long core_clock;
99 /* u32 bl_max_freq; */
100 /* unsigned long value; */
101 u16 bl_max_freq;
102 uint32_t value;
103 uint32_t blc_pwm_precision_factor;
104
105 /* get bl_max_freq and pol from dev_priv*/
106 if (!dev_priv->lvds_bl) {
107 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
108 return -ENOENT;
109 }
110 bl_max_freq = dev_priv->lvds_bl->freq;
111 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
112
113 core_clock = dev_priv->core_freq;
114
115 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
116 value *= blc_pwm_precision_factor;
117 value /= bl_max_freq;
118 value /= blc_pwm_precision_factor;
119
120 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
121 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
122 return -ERANGE;
123 else {
124 /* FIXME */
125 }
126 return 0;
127}
128
129static int cdv_set_brightness(struct backlight_device *bd)
130{
131 int level = bd->props.brightness;
132
133 /* Percentage 1-100% being valid */
134 if (level < 1)
135 level = 1;
136
137 /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
138 cdv_brightness = level;
139 return 0;
140}
141
142static const struct backlight_ops cdv_ops = {
143 .get_brightness = cdv_get_brightness,
144 .update_status = cdv_set_brightness,
145};
146
147static int cdv_backlight_init(struct drm_device *dev)
148{
149 struct drm_psb_private *dev_priv = dev->dev_private;
150 int ret;
151 struct backlight_properties props;
152
153 memset(&props, 0, sizeof(struct backlight_properties));
154 props.max_brightness = 100;
155 props.type = BACKLIGHT_PLATFORM;
156
157 cdv_backlight_device = backlight_device_register("psb-bl",
158 NULL, (void *)dev, &cdv_ops, &props);
159 if (IS_ERR(cdv_backlight_device))
160 return PTR_ERR(cdv_backlight_device);
161
162 ret = cdv_backlight_setup(dev);
163 if (ret < 0) {
164 backlight_device_unregister(cdv_backlight_device);
165 cdv_backlight_device = NULL;
166 return ret;
167 }
168 cdv_backlight_device->props.brightness = 100;
169 cdv_backlight_device->props.max_brightness = 100;
170 backlight_update_status(cdv_backlight_device);
171 dev_priv->backlight_device = cdv_backlight_device;
172 return 0;
173}
174
175#endif
176
177/*
178 * Provide the Cedarview specific chip logic and low level methods
179 * for power management
180 *
181 * FIXME: we need to implement the apm/ospm base management bits
182 * for this and the MID devices.
183 */
184
185static inline u32 CDV_MSG_READ32(uint port, uint offset)
186{
187 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
188 uint32_t ret_val = 0;
189 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
190 pci_write_config_dword(pci_root, 0xD0, mcr);
191 pci_read_config_dword(pci_root, 0xD4, &ret_val);
192 pci_dev_put(pci_root);
193 return ret_val;
194}
195
196static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
197{
198 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
199 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
200 pci_write_config_dword(pci_root, 0xD4, value);
201 pci_write_config_dword(pci_root, 0xD0, mcr);
202 pci_dev_put(pci_root);
203}
204
205#define PSB_APM_CMD 0x0
206#define PSB_APM_STS 0x04
207#define PSB_PM_SSC 0x20
208#define PSB_PM_SSS 0x30
209#define PSB_PWRGT_GFX_MASK 0x3
210#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
211#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
212
213static void cdv_init_pm(struct drm_device *dev)
214{
215 struct drm_psb_private *dev_priv = dev->dev_private;
216 u32 pwr_cnt;
217 int i;
218
219 dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
220 PSB_APMBA) & 0xFFFF;
221 dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
222 PSB_OSPMBA) & 0xFFFF;
223
224 /* Force power on for now */
225 pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
226 pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
227
228 outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
229 for (i = 0; i < 5; i++) {
230 u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
231 if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
232 break;
233 udelay(10);
234 }
235 pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
236 pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
237 outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
238 for (i = 0; i < 5; i++) {
239 u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
240 if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
241 break;
242 udelay(10);
243 }
244}
245
246/**
247 * cdv_save_display_registers - save registers lost on suspend
248 * @dev: our DRM device
249 *
250 * Save the state we need in order to be able to restore the interface
251 * upon resume from suspend
252 *
253 * FIXME: review
254 */
255static int cdv_save_display_registers(struct drm_device *dev)
256{
257 return 0;
258}
259
260/**
261 * cdv_restore_display_registers - restore lost register state
262 * @dev: our DRM device
263 *
264 * Restore register state that was lost during suspend and resume.
265 *
266 * FIXME: review
267 */
268static int cdv_restore_display_registers(struct drm_device *dev)
269{
270 return 0;
271}
272
273static int cdv_power_down(struct drm_device *dev)
274{
275 return 0;
276}
277
278static int cdv_power_up(struct drm_device *dev)
279{
280 return 0;
281}
282
283/* FIXME ? - shared with Poulsbo */
284static void cdv_get_core_freq(struct drm_device *dev)
285{
286 uint32_t clock;
287 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
288 struct drm_psb_private *dev_priv = dev->dev_private;
289
290 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
291 pci_read_config_dword(pci_root, 0xD4, &clock);
292 pci_dev_put(pci_root);
293
294 switch (clock & 0x07) {
295 case 0:
296 dev_priv->core_freq = 100;
297 break;
298 case 1:
299 dev_priv->core_freq = 133;
300 break;
301 case 2:
302 dev_priv->core_freq = 150;
303 break;
304 case 3:
305 dev_priv->core_freq = 178;
306 break;
307 case 4:
308 dev_priv->core_freq = 200;
309 break;
310 case 5:
311 case 6:
312 case 7:
313 dev_priv->core_freq = 266;
314 default:
315 dev_priv->core_freq = 0;
316 }
317}
318
319static int cdv_chip_setup(struct drm_device *dev)
320{
321 cdv_get_core_freq(dev);
322 gma_intel_opregion_init(dev);
323 psb_intel_init_bios(dev);
324 return 0;
325}
326
327/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
328
329const struct psb_ops cdv_chip_ops = {
330 .name = "Cedartrail",
331 .accel_2d = 0,
332 .pipes = 2,
333 .sgx_offset = MRST_SGX_OFFSET,
334 .chip_setup = cdv_chip_setup,
335
336 .crtc_helper = &cdv_intel_helper_funcs,
337 .crtc_funcs = &cdv_intel_crtc_funcs,
338
339 .output_init = cdv_output_init,
340
341#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
342 .backlight_init = cdv_backlight_init,
343#endif
344
345 .init_pm = cdv_init_pm,
346 .save_regs = cdv_save_display_registers,
347 .restore_regs = cdv_restore_display_registers,
348 .power_down = cdv_power_down,
349 .power_up = cdv_power_up,
350};
diff --git a/drivers/staging/gma500/cdv_device.h b/drivers/staging/gma500/cdv_device.h
deleted file mode 100644
index 2a88b7beb551..000000000000
--- a/drivers/staging/gma500/cdv_device.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
20extern void cdv_intel_crt_init(struct drm_device *dev,
21 struct psb_intel_mode_device *mode_dev);
22extern void cdv_intel_lvds_init(struct drm_device *dev,
23 struct psb_intel_mode_device *mode_dev);
24extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
25 int reg);
26extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
27 struct drm_crtc *crtc);
28
29extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
30{
31 /* Wait for 20ms, i.e. one cycle at 50hz. */
32 /* FIXME: msleep ?? */
33 mdelay(20);
34}
35
36
diff --git a/drivers/staging/gma500/cdv_intel_crt.c b/drivers/staging/gma500/cdv_intel_crt.c
deleted file mode 100644
index efda63b97b45..000000000000
--- a/drivers/staging/gma500/cdv_intel_crt.c
+++ /dev/null
@@ -1,326 +0,0 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
28#include <drm/drmP.h>
29
30#include "intel_bios.h"
31#include "psb_drv.h"
32#include "psb_intel_drv.h"
33#include "psb_intel_reg.h"
34#include "power.h"
35#include <linux/pm_runtime.h>
36
37
38static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
39{
40 struct drm_device *dev = encoder->dev;
41 u32 temp, reg;
42 reg = ADPA;
43
44 temp = REG_READ(reg);
45 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
46 temp &= ~ADPA_DAC_ENABLE;
47
48 switch (mode) {
49 case DRM_MODE_DPMS_ON:
50 temp |= ADPA_DAC_ENABLE;
51 break;
52 case DRM_MODE_DPMS_STANDBY:
53 temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
54 break;
55 case DRM_MODE_DPMS_SUSPEND:
56 temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
57 break;
58 case DRM_MODE_DPMS_OFF:
59 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
60 break;
61 }
62
63 REG_WRITE(reg, temp);
64}
65
66static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
67 struct drm_display_mode *mode)
68{
69 int max_clock = 0;
70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
71 return MODE_NO_DBLESCAN;
72
73 /* The lowest clock for CDV is 20000KHz */
74 if (mode->clock < 20000)
75 return MODE_CLOCK_LOW;
76
77 /* The max clock for CDV is 355 instead of 400 */
78 max_clock = 355000;
79 if (mode->clock > max_clock)
80 return MODE_CLOCK_HIGH;
81
82 if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
83 return MODE_PANEL;
84
85 return MODE_OK;
86}
87
88static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
89 struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode)
91{
92 return true;
93}
94
95static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
96 struct drm_display_mode *mode,
97 struct drm_display_mode *adjusted_mode)
98{
99
100 struct drm_device *dev = encoder->dev;
101 struct drm_crtc *crtc = encoder->crtc;
102 struct psb_intel_crtc *psb_intel_crtc =
103 to_psb_intel_crtc(crtc);
104 int dpll_md_reg;
105 u32 adpa, dpll_md;
106 u32 adpa_reg;
107
108 if (psb_intel_crtc->pipe == 0)
109 dpll_md_reg = DPLL_A_MD;
110 else
111 dpll_md_reg = DPLL_B_MD;
112
113 adpa_reg = ADPA;
114
115 /*
116 * Disable separate mode multiplier used when cloning SDVO to CRT
117 * XXX this needs to be adjusted when we really are cloning
118 */
119 {
120 dpll_md = REG_READ(dpll_md_reg);
121 REG_WRITE(dpll_md_reg,
122 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
123 }
124
125 adpa = 0;
126 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
127 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
128 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
129 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
130
131 if (psb_intel_crtc->pipe == 0)
132 adpa |= ADPA_PIPE_A_SELECT;
133 else
134 adpa |= ADPA_PIPE_B_SELECT;
135
136 REG_WRITE(adpa_reg, adpa);
137}
138
139
140/**
141 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
142 *
143 * \return true if CRT is connected.
144 * \return false if CRT is disconnected.
145 */
146static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
147 bool force)
148{
149 struct drm_device *dev = connector->dev;
150 u32 hotplug_en;
151 int i, tries = 0, ret = false;
152 u32 adpa_orig;
153
154 /* disable the DAC when doing the hotplug detection */
155
156 adpa_orig = REG_READ(ADPA);
157
158 REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
159
160 /*
161 * On a CDV thep, CRT detect sequence need to be done twice
162 * to get a reliable result.
163 */
164 tries = 2;
165
166 hotplug_en = REG_READ(PORT_HOTPLUG_EN);
167 hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
168 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
169
170 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
171 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
172
173 for (i = 0; i < tries ; i++) {
174 unsigned long timeout;
175 /* turn on the FORCE_DETECT */
176 REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
177 timeout = jiffies + msecs_to_jiffies(1000);
178 /* wait for FORCE_DETECT to go off */
179 do {
180 if (!(REG_READ(PORT_HOTPLUG_EN) &
181 CRT_HOTPLUG_FORCE_DETECT))
182 break;
183 msleep(1);
184 } while (time_after(timeout, jiffies));
185 }
186
187 if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
188 CRT_HOTPLUG_MONITOR_NONE)
189 ret = true;
190
191 /* Restore the saved ADPA */
192 REG_WRITE(ADPA, adpa_orig);
193 return ret;
194}
195
196static enum drm_connector_status cdv_intel_crt_detect(
197 struct drm_connector *connector, bool force)
198{
199 if (cdv_intel_crt_detect_hotplug(connector, force))
200 return connector_status_connected;
201 else
202 return connector_status_disconnected;
203}
204
205static void cdv_intel_crt_destroy(struct drm_connector *connector)
206{
207 struct psb_intel_output *intel_output = to_psb_intel_output(connector);
208
209 psb_intel_i2c_destroy(intel_output->ddc_bus);
210 drm_sysfs_connector_remove(connector);
211 drm_connector_cleanup(connector);
212 kfree(connector);
213}
214
215static int cdv_intel_crt_get_modes(struct drm_connector *connector)
216{
217 struct psb_intel_output *intel_output =
218 to_psb_intel_output(connector);
219 return psb_intel_ddc_get_modes(intel_output);
220}
221
222static int cdv_intel_crt_set_property(struct drm_connector *connector,
223 struct drm_property *property,
224 uint64_t value)
225{
226 return 0;
227}
228
229/*
230 * Routines for controlling stuff on the analog port
231 */
232
233static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
234 .dpms = cdv_intel_crt_dpms,
235 .mode_fixup = cdv_intel_crt_mode_fixup,
236 .prepare = psb_intel_encoder_prepare,
237 .commit = psb_intel_encoder_commit,
238 .mode_set = cdv_intel_crt_mode_set,
239};
240
241static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
242 .dpms = drm_helper_connector_dpms,
243 .detect = cdv_intel_crt_detect,
244 .fill_modes = drm_helper_probe_single_connector_modes,
245 .destroy = cdv_intel_crt_destroy,
246 .set_property = cdv_intel_crt_set_property,
247};
248
249static const struct drm_connector_helper_funcs
250 cdv_intel_crt_connector_helper_funcs = {
251 .mode_valid = cdv_intel_crt_mode_valid,
252 .get_modes = cdv_intel_crt_get_modes,
253 .best_encoder = psb_intel_best_encoder,
254};
255
256static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
257{
258 drm_encoder_cleanup(encoder);
259}
260
261static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
262 .destroy = cdv_intel_crt_enc_destroy,
263};
264
265void cdv_intel_crt_init(struct drm_device *dev,
266 struct psb_intel_mode_device *mode_dev)
267{
268
269 struct psb_intel_output *psb_intel_output;
270 struct drm_connector *connector;
271 struct drm_encoder *encoder;
272
273 u32 i2c_reg;
274
275 psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
276 if (!psb_intel_output)
277 return;
278
279 psb_intel_output->mode_dev = mode_dev;
280 connector = &psb_intel_output->base;
281 drm_connector_init(dev, connector,
282 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
283
284 encoder = &psb_intel_output->enc;
285 drm_encoder_init(dev, encoder,
286 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
287
288 drm_mode_connector_attach_encoder(&psb_intel_output->base,
289 &psb_intel_output->enc);
290
291 /* Set up the DDC bus. */
292 i2c_reg = GPIOA;
293 /* Remove the following code for CDV */
294 /*
295 if (dev_priv->crt_ddc_bus != 0)
296 i2c_reg = dev_priv->crt_ddc_bus;
297 }*/
298 psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
299 i2c_reg, "CRTDDC_A");
300 if (!psb_intel_output->ddc_bus) {
301 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
302 "failed.\n");
303 goto failed_ddc;
304 }
305
306 psb_intel_output->type = INTEL_OUTPUT_ANALOG;
307 /*
308 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
309 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
310 */
311 connector->interlace_allowed = 0;
312 connector->doublescan_allowed = 0;
313
314 drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
315 drm_connector_helper_add(connector,
316 &cdv_intel_crt_connector_helper_funcs);
317
318 drm_sysfs_connector_add(connector);
319
320 return;
321failed_ddc:
322 drm_encoder_cleanup(&psb_intel_output->enc);
323 drm_connector_cleanup(&psb_intel_output->base);
324 kfree(psb_intel_output);
325 return;
326}
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
deleted file mode 100644
index c63a32776a9e..000000000000
--- a/drivers/staging/gma500/cdv_intel_display.c
+++ /dev/null
@@ -1,1508 +0,0 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23
24#include <drm/drmP.h>
25#include "framebuffer.h"
26#include "psb_drv.h"
27#include "psb_intel_drv.h"
28#include "psb_intel_reg.h"
29#include "psb_intel_display.h"
30#include "power.h"
31#include "cdv_device.h"
32
33
34struct cdv_intel_range_t {
35 int min, max;
36};
37
38struct cdv_intel_p2_t {
39 int dot_limit;
40 int p2_slow, p2_fast;
41};
42
43struct cdv_intel_clock_t {
44 /* given values */
45 int n;
46 int m1, m2;
47 int p1, p2;
48 /* derived values */
49 int dot;
50 int vco;
51 int m;
52 int p;
53};
54
55#define INTEL_P2_NUM 2
56
57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2;
60};
61
62#define CDV_LIMIT_SINGLE_LVDS_96 0
63#define CDV_LIMIT_SINGLE_LVDS_100 1
64#define CDV_LIMIT_DAC_HDMI_27 2
65#define CDV_LIMIT_DAC_HDMI_96 3
66
67static const struct cdv_intel_limit_t cdv_intel_limits[] = {
68 { /* CDV_SIGNLE_LVDS_96MHz */
69 .dot = {.min = 20000, .max = 115500},
70 .vco = {.min = 1800000, .max = 3600000},
71 .n = {.min = 2, .max = 6},
72 .m = {.min = 60, .max = 160},
73 .m1 = {.min = 0, .max = 0},
74 .m2 = {.min = 58, .max = 158},
75 .p = {.min = 28, .max = 140},
76 .p1 = {.min = 2, .max = 10},
77 .p2 = {.dot_limit = 200000,
78 .p2_slow = 14, .p2_fast = 14},
79 },
80 { /* CDV_SINGLE_LVDS_100MHz */
81 .dot = {.min = 20000, .max = 115500},
82 .vco = {.min = 1800000, .max = 3600000},
83 .n = {.min = 2, .max = 6},
84 .m = {.min = 60, .max = 160},
85 .m1 = {.min = 0, .max = 0},
86 .m2 = {.min = 58, .max = 158},
87 .p = {.min = 28, .max = 140},
88 .p1 = {.min = 2, .max = 10},
89 /* The single-channel range is 25-112Mhz, and dual-channel
90 * is 80-224Mhz. Prefer single channel as much as possible.
91 */
92 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
93 },
94 { /* CDV_DAC_HDMI_27MHz */
95 .dot = {.min = 20000, .max = 400000},
96 .vco = {.min = 1809000, .max = 3564000},
97 .n = {.min = 1, .max = 1},
98 .m = {.min = 67, .max = 132},
99 .m1 = {.min = 0, .max = 0},
100 .m2 = {.min = 65, .max = 130},
101 .p = {.min = 5, .max = 90},
102 .p1 = {.min = 1, .max = 9},
103 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
104 },
105 { /* CDV_DAC_HDMI_96MHz */
106 .dot = {.min = 20000, .max = 400000},
107 .vco = {.min = 1800000, .max = 3600000},
108 .n = {.min = 2, .max = 6},
109 .m = {.min = 60, .max = 160},
110 .m1 = {.min = 0, .max = 0},
111 .m2 = {.min = 58, .max = 158},
112 .p = {.min = 5, .max = 100},
113 .p1 = {.min = 1, .max = 10},
114 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
115 },
116};
117
118#define _wait_for(COND, MS, W) ({ \
119 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
120 int ret__ = 0; \
121 while (!(COND)) { \
122 if (time_after(jiffies, timeout__)) { \
123 ret__ = -ETIMEDOUT; \
124 break; \
125 } \
126 if (W && !in_dbg_master()) \
127 msleep(W); \
128 } \
129 ret__; \
130})
131
132#define wait_for(COND, MS) _wait_for(COND, MS, 1)
133
134
135static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
136{
137 int ret;
138
139 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
140 if (ret) {
141 DRM_ERROR("timeout waiting for SB to idle before read\n");
142 return ret;
143 }
144
145 REG_WRITE(SB_ADDR, reg);
146 REG_WRITE(SB_PCKT,
147 SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
148 SET_FIELD(SB_DEST_DPLL, SB_DEST) |
149 SET_FIELD(0xf, SB_BYTE_ENABLE));
150
151 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
152 if (ret) {
153 DRM_ERROR("timeout waiting for SB to idle after read\n");
154 return ret;
155 }
156
157 *val = REG_READ(SB_DATA);
158
159 return 0;
160}
161
162static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
163{
164 int ret;
165 static bool dpio_debug = true;
166 u32 temp;
167
168 if (dpio_debug) {
169 if (cdv_sb_read(dev, reg, &temp) == 0)
170 DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
171 DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
172 }
173
174 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
175 if (ret) {
176 DRM_ERROR("timeout waiting for SB to idle before write\n");
177 return ret;
178 }
179
180 REG_WRITE(SB_ADDR, reg);
181 REG_WRITE(SB_DATA, val);
182 REG_WRITE(SB_PCKT,
183 SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
184 SET_FIELD(SB_DEST_DPLL, SB_DEST) |
185 SET_FIELD(0xf, SB_BYTE_ENABLE));
186
187 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
188 if (ret) {
189 DRM_ERROR("timeout waiting for SB to idle after write\n");
190 return ret;
191 }
192
193 if (dpio_debug) {
194 if (cdv_sb_read(dev, reg, &temp) == 0)
195 DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
196 }
197
198 return 0;
199}
200
201/* Reset the DPIO configuration register. The BIOS does this at every
202 * mode set.
203 */
204static void cdv_sb_reset(struct drm_device *dev)
205{
206
207 REG_WRITE(DPIO_CFG, 0);
208 REG_READ(DPIO_CFG);
209 REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
210}
211
212/* Unlike most Intel display engines, on Cedarview the DPLL registers
213 * are behind this sideband bus. They must be programmed while the
214 * DPLL reference clock is on in the DPLL control register, but before
215 * the DPLL is enabled in the DPLL control register.
216 */
217static int
218cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
219 struct cdv_intel_clock_t *clock)
220{
221 struct psb_intel_crtc *psb_crtc =
222 to_psb_intel_crtc(crtc);
223 int pipe = psb_crtc->pipe;
224 u32 m, n_vco, p;
225 int ret = 0;
226 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
227 u32 ref_value;
228
229 cdv_sb_reset(dev);
230
231 if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
232 DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
233 return -EBUSY;
234 }
235
236 /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
237 ref_value = 0x68A701;
238
239 cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
240
241 /* We don't know what the other fields of these regs are, so
242 * leave them in place.
243 */
244 ret = cdv_sb_read(dev, SB_M(pipe), &m);
245 if (ret)
246 return ret;
247 m &= ~SB_M_DIVIDER_MASK;
248 m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
249 ret = cdv_sb_write(dev, SB_M(pipe), m);
250 if (ret)
251 return ret;
252
253 ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
254 if (ret)
255 return ret;
256
257 /* Follow the BIOS to program the N_DIVIDER REG */
258 n_vco &= 0xFFFF;
259 n_vco |= 0x107;
260 n_vco &= ~(SB_N_VCO_SEL_MASK |
261 SB_N_DIVIDER_MASK |
262 SB_N_CB_TUNE_MASK);
263
264 n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
265
266 if (clock->vco < 2250000) {
267 n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
268 n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
269 } else if (clock->vco < 2750000) {
270 n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
271 n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
272 } else if (clock->vco < 3300000) {
273 n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
274 n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
275 } else {
276 n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
277 n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
278 }
279
280 ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
281 if (ret)
282 return ret;
283
284 ret = cdv_sb_read(dev, SB_P(pipe), &p);
285 if (ret)
286 return ret;
287 p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
288 p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
289 switch (clock->p2) {
290 case 5:
291 p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
292 break;
293 case 10:
294 p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
295 break;
296 case 14:
297 p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
298 break;
299 case 7:
300 p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
301 break;
302 default:
303 DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
304 return -EINVAL;
305 }
306 ret = cdv_sb_write(dev, SB_P(pipe), p);
307 if (ret)
308 return ret;
309
310 /* always Program the Lane Register for the Pipe A*/
311 if (pipe == 0) {
312 /* Program the Lane0/1 for HDMI B */
313 u32 lane_reg, lane_value;
314
315 lane_reg = PSB_LANE0;
316 cdv_sb_read(dev, lane_reg, &lane_value);
317 lane_value &= ~(LANE_PLL_MASK);
318 lane_value |= LANE_PLL_ENABLE;
319 cdv_sb_write(dev, lane_reg, lane_value);
320
321 lane_reg = PSB_LANE1;
322 cdv_sb_read(dev, lane_reg, &lane_value);
323 lane_value &= ~(LANE_PLL_MASK);
324 lane_value |= LANE_PLL_ENABLE;
325 cdv_sb_write(dev, lane_reg, lane_value);
326
327 /* Program the Lane2/3 for HDMI C */
328 lane_reg = PSB_LANE2;
329 cdv_sb_read(dev, lane_reg, &lane_value);
330 lane_value &= ~(LANE_PLL_MASK);
331 lane_value |= LANE_PLL_ENABLE;
332 cdv_sb_write(dev, lane_reg, lane_value);
333
334 lane_reg = PSB_LANE3;
335 cdv_sb_read(dev, lane_reg, &lane_value);
336 lane_value &= ~(LANE_PLL_MASK);
337 lane_value |= LANE_PLL_ENABLE;
338 cdv_sb_write(dev, lane_reg, lane_value);
339 }
340
341 return 0;
342}
343
344/*
345 * Returns whether any output on the specified pipe is of the specified type
346 */
347bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
348{
349 struct drm_device *dev = crtc->dev;
350 struct drm_mode_config *mode_config = &dev->mode_config;
351 struct drm_connector *l_entry;
352
353 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
354 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
355 struct psb_intel_output *psb_intel_output =
356 to_psb_intel_output(l_entry);
357 if (psb_intel_output->type == type)
358 return true;
359 }
360 }
361 return false;
362}
363
364static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
365 int refclk)
366{
367 const struct cdv_intel_limit_t *limit;
368 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
369 /*
370 * Now only single-channel LVDS is supported on CDV. If it is
371 * incorrect, please add the dual-channel LVDS.
372 */
373 if (refclk == 96000)
374 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
375 else
376 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
377 } else {
378 if (refclk == 27000)
379 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
380 else
381 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
382 }
383 return limit;
384}
385
386/* m1 is reserved as 0 in CDV, n is a ring counter */
387static void cdv_intel_clock(struct drm_device *dev,
388 int refclk, struct cdv_intel_clock_t *clock)
389{
390 clock->m = clock->m2 + 2;
391 clock->p = clock->p1 * clock->p2;
392 clock->vco = (refclk * clock->m) / clock->n;
393 clock->dot = clock->vco / clock->p;
394}
395
396
397#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
398static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
399 const struct cdv_intel_limit_t *limit,
400 struct cdv_intel_clock_t *clock)
401{
402 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
403 INTELPllInvalid("p1 out of range\n");
404 if (clock->p < limit->p.min || limit->p.max < clock->p)
405 INTELPllInvalid("p out of range\n");
406 /* unnecessary to check the range of m(m1/M2)/n again */
407 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
408 INTELPllInvalid("vco out of range\n");
409 /* XXX: We may need to be checking "Dot clock"
410 * depending on the multiplier, connector, etc.,
411 * rather than just a single range.
412 */
413 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
414 INTELPllInvalid("dot out of range\n");
415
416 return true;
417}
418
419static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
420 int refclk,
421 struct cdv_intel_clock_t *best_clock)
422{
423 struct drm_device *dev = crtc->dev;
424 struct cdv_intel_clock_t clock;
425 const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
426 int err = target;
427
428
429 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
430 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
431 /*
432 * For LVDS, if the panel is on, just rely on its current
433 * settings for dual-channel. We haven't figured out how to
434 * reliably set up different single/dual channel state, if we
435 * even can.
436 */
437 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
438 LVDS_CLKB_POWER_UP)
439 clock.p2 = limit->p2.p2_fast;
440 else
441 clock.p2 = limit->p2.p2_slow;
442 } else {
443 if (target < limit->p2.dot_limit)
444 clock.p2 = limit->p2.p2_slow;
445 else
446 clock.p2 = limit->p2.p2_fast;
447 }
448
449 memset(best_clock, 0, sizeof(*best_clock));
450 clock.m1 = 0;
451 /* m1 is reserved as 0 in CDV, n is a ring counter.
452 So skip the m1 loop */
453 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
454 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
455 clock.m2++) {
456 for (clock.p1 = limit->p1.min;
457 clock.p1 <= limit->p1.max;
458 clock.p1++) {
459 int this_err;
460
461 cdv_intel_clock(dev, refclk, &clock);
462
463 if (!cdv_intel_PLL_is_valid(crtc,
464 limit, &clock))
465 continue;
466
467 this_err = abs(clock.dot - target);
468 if (this_err < err) {
469 *best_clock = clock;
470 err = this_err;
471 }
472 }
473 }
474 }
475
476 return err != target;
477}
478
479int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
480 int x, int y, struct drm_framebuffer *old_fb)
481{
482 struct drm_device *dev = crtc->dev;
483 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
484 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
485 int pipe = psb_intel_crtc->pipe;
486 unsigned long start, offset;
487 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
488 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
489 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
490 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
491 u32 dspcntr;
492 int ret = 0;
493
494 if (!gma_power_begin(dev, true))
495 return 0;
496
497 /* no fb bound */
498 if (!crtc->fb) {
499 dev_err(dev->dev, "No FB bound\n");
500 goto psb_intel_pipe_cleaner;
501 }
502
503
504 /* We are displaying this buffer, make sure it is actually loaded
505 into the GTT */
506 ret = psb_gtt_pin(psbfb->gtt);
507 if (ret < 0)
508 goto psb_intel_pipe_set_base_exit;
509 start = psbfb->gtt->offset;
510 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
511
512 REG_WRITE(dspstride, crtc->fb->pitches[0]);
513
514 dspcntr = REG_READ(dspcntr_reg);
515 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
516
517 switch (crtc->fb->bits_per_pixel) {
518 case 8:
519 dspcntr |= DISPPLANE_8BPP;
520 break;
521 case 16:
522 if (crtc->fb->depth == 15)
523 dspcntr |= DISPPLANE_15_16BPP;
524 else
525 dspcntr |= DISPPLANE_16BPP;
526 break;
527 case 24:
528 case 32:
529 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
530 break;
531 default:
532 dev_err(dev->dev, "Unknown color depth\n");
533 ret = -EINVAL;
534 goto psb_intel_pipe_set_base_exit;
535 }
536 REG_WRITE(dspcntr_reg, dspcntr);
537
538 dev_dbg(dev->dev,
539 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
540
541 REG_WRITE(dspbase, offset);
542 REG_READ(dspbase);
543 REG_WRITE(dspsurf, start);
544 REG_READ(dspsurf);
545
546psb_intel_pipe_cleaner:
547 /* If there was a previous display we can now unpin it */
548 if (old_fb)
549 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
550
551psb_intel_pipe_set_base_exit:
552 gma_power_end(dev);
553 return ret;
554}
555
556/**
557 * Sets the power management mode of the pipe and plane.
558 *
559 * This code should probably grow support for turning the cursor off and back
560 * on appropriately at the same time as we're turning the pipe off/on.
561 */
562static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
563{
564 struct drm_device *dev = crtc->dev;
565 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
566 int pipe = psb_intel_crtc->pipe;
567 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
568 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
569 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
570 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
571 u32 temp;
572 bool enabled;
573
574 /* XXX: When our outputs are all unaware of DPMS modes other than off
575 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
576 */
577 switch (mode) {
578 case DRM_MODE_DPMS_ON:
579 case DRM_MODE_DPMS_STANDBY:
580 case DRM_MODE_DPMS_SUSPEND:
581 /* Enable the DPLL */
582 temp = REG_READ(dpll_reg);
583 if ((temp & DPLL_VCO_ENABLE) == 0) {
584 REG_WRITE(dpll_reg, temp);
585 REG_READ(dpll_reg);
586 /* Wait for the clocks to stabilize. */
587 udelay(150);
588 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
589 REG_READ(dpll_reg);
590 /* Wait for the clocks to stabilize. */
591 udelay(150);
592 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
593 REG_READ(dpll_reg);
594 /* Wait for the clocks to stabilize. */
595 udelay(150);
596 }
597
598 /* Jim Bish - switch plan and pipe per scott */
599 /* Enable the plane */
600 temp = REG_READ(dspcntr_reg);
601 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
602 REG_WRITE(dspcntr_reg,
603 temp | DISPLAY_PLANE_ENABLE);
604 /* Flush the plane changes */
605 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
606 }
607
608 udelay(150);
609
610 /* Enable the pipe */
611 temp = REG_READ(pipeconf_reg);
612 if ((temp & PIPEACONF_ENABLE) == 0)
613 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
614
615 psb_intel_crtc_load_lut(crtc);
616
617 /* Give the overlay scaler a chance to enable
618 * if it's on this pipe */
619 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
620 break;
621 case DRM_MODE_DPMS_OFF:
622 /* Give the overlay scaler a chance to disable
623 * if it's on this pipe */
624 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
625
626 /* Disable the VGA plane that we never use */
627 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
628
629 /* Jim Bish - changed pipe/plane here as well. */
630
631 /* Wait for vblank for the disable to take effect */
632 cdv_intel_wait_for_vblank(dev);
633
634 /* Next, disable display pipes */
635 temp = REG_READ(pipeconf_reg);
636 if ((temp & PIPEACONF_ENABLE) != 0) {
637 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
638 REG_READ(pipeconf_reg);
639 }
640
641 /* Wait for vblank for the disable to take effect. */
642 cdv_intel_wait_for_vblank(dev);
643
644 udelay(150);
645
646 /* Disable display plane */
647 temp = REG_READ(dspcntr_reg);
648 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
649 REG_WRITE(dspcntr_reg,
650 temp & ~DISPLAY_PLANE_ENABLE);
651 /* Flush the plane changes */
652 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
653 REG_READ(dspbase_reg);
654 }
655
656 temp = REG_READ(dpll_reg);
657 if ((temp & DPLL_VCO_ENABLE) != 0) {
658 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
659 REG_READ(dpll_reg);
660 }
661
662 /* Wait for the clocks to turn off. */
663 udelay(150);
664 break;
665 }
666 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
667 /*Set FIFO Watermarks*/
668 REG_WRITE(DSPARB, 0x3F3E);
669}
670
671static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
672{
673 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
674 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
675}
676
677static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
678{
679 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
680 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
681}
682
683void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
684{
685 struct drm_encoder_helper_funcs *encoder_funcs =
686 encoder->helper_private;
687 /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
688 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
689}
690
691void cdv_intel_encoder_commit(struct drm_encoder *encoder)
692{
693 struct drm_encoder_helper_funcs *encoder_funcs =
694 encoder->helper_private;
695 /* lvds has its own version of commit see cdv_intel_lvds_commit */
696 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
697}
698
699static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
700 struct drm_display_mode *mode,
701 struct drm_display_mode *adjusted_mode)
702{
703 return true;
704}
705
706
707/**
708 * Return the pipe currently connected to the panel fitter,
709 * or -1 if the panel fitter is not present or not in use
710 */
711static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
712{
713 u32 pfit_control;
714
715 pfit_control = REG_READ(PFIT_CONTROL);
716
717 /* See if the panel fitter is in use */
718 if ((pfit_control & PFIT_ENABLE) == 0)
719 return -1;
720 return (pfit_control >> 29) & 0x3;
721}
722
723static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
724 struct drm_display_mode *mode,
725 struct drm_display_mode *adjusted_mode,
726 int x, int y,
727 struct drm_framebuffer *old_fb)
728{
729 struct drm_device *dev = crtc->dev;
730 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
731 int pipe = psb_intel_crtc->pipe;
732 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
733 int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
734 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
735 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
736 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
737 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
738 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
739 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
740 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
741 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
742 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
743 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
744 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
745 int refclk;
746 struct cdv_intel_clock_t clock;
747 u32 dpll = 0, dspcntr, pipeconf;
748 bool ok, is_sdvo = false, is_dvo = false;
749 bool is_crt = false, is_lvds = false, is_tv = false;
750 bool is_hdmi = false;
751 struct drm_mode_config *mode_config = &dev->mode_config;
752 struct drm_connector *connector;
753
754 list_for_each_entry(connector, &mode_config->connector_list, head) {
755 struct psb_intel_output *psb_intel_output =
756 to_psb_intel_output(connector);
757
758 if (!connector->encoder
759 || connector->encoder->crtc != crtc)
760 continue;
761
762 switch (psb_intel_output->type) {
763 case INTEL_OUTPUT_LVDS:
764 is_lvds = true;
765 break;
766 case INTEL_OUTPUT_SDVO:
767 is_sdvo = true;
768 break;
769 case INTEL_OUTPUT_DVO:
770 is_dvo = true;
771 break;
772 case INTEL_OUTPUT_TVOUT:
773 is_tv = true;
774 break;
775 case INTEL_OUTPUT_ANALOG:
776 is_crt = true;
777 break;
778 case INTEL_OUTPUT_HDMI:
779 is_hdmi = true;
780 break;
781 }
782 }
783
784 refclk = 96000;
785
786 /* Hack selection about ref clk for CRT */
787 /* Select 27MHz as the reference clk for HDMI */
788 if (is_crt || is_hdmi)
789 refclk = 27000;
790
791 drm_mode_debug_printmodeline(adjusted_mode);
792
793 ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
794 &clock);
795 if (!ok) {
796 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
797 return 0;
798 }
799
800 dpll = DPLL_VGA_MODE_DIS;
801 if (is_tv) {
802 /* XXX: just matching BIOS for now */
803/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
804 dpll |= 3;
805 }
806 dpll |= PLL_REF_INPUT_DREFCLK;
807
808 dpll |= DPLL_SYNCLOCK_ENABLE;
809 dpll |= DPLL_VGA_MODE_DIS;
810 if (is_lvds)
811 dpll |= DPLLB_MODE_LVDS;
812 else
813 dpll |= DPLLB_MODE_DAC_SERIAL;
814 /* dpll |= (2 << 11); */
815
816 /* setup pipeconf */
817 pipeconf = REG_READ(pipeconf_reg);
818
819 /* Set up the display plane register */
820 dspcntr = DISPPLANE_GAMMA_ENABLE;
821
822 if (pipe == 0)
823 dspcntr |= DISPPLANE_SEL_PIPE_A;
824 else
825 dspcntr |= DISPPLANE_SEL_PIPE_B;
826
827 dspcntr |= DISPLAY_PLANE_ENABLE;
828 pipeconf |= PIPEACONF_ENABLE;
829
830 REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
831 REG_READ(dpll_reg);
832
833 cdv_dpll_set_clock_cdv(dev, crtc, &clock);
834
835 udelay(150);
836
837
838 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
839 * This is an exception to the general rule that mode_set doesn't turn
840 * things on.
841 */
842 if (is_lvds) {
843 u32 lvds = REG_READ(LVDS);
844
845 lvds |=
846 LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
847 LVDS_PIPEB_SELECT;
848 /* Set the B0-B3 data pairs corresponding to
849 * whether we're going to
850 * set the DPLLs for dual-channel mode or not.
851 */
852 if (clock.p2 == 7)
853 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
854 else
855 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
856
857 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
858 * appropriately here, but we need to look more
859 * thoroughly into how panels behave in the two modes.
860 */
861
862 REG_WRITE(LVDS, lvds);
863 REG_READ(LVDS);
864 }
865
866 dpll |= DPLL_VCO_ENABLE;
867
868 /* Disable the panel fitter if it was on our pipe */
869 if (cdv_intel_panel_fitter_pipe(dev) == pipe)
870 REG_WRITE(PFIT_CONTROL, 0);
871
872 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
873 drm_mode_debug_printmodeline(mode);
874
875 REG_WRITE(dpll_reg,
876 (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
877 REG_READ(dpll_reg);
878 /* Wait for the clocks to stabilize. */
879 udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
880
881 if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
882 dev_err(dev->dev, "Failed to get DPLL lock\n");
883 return -EBUSY;
884 }
885
886 {
887 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
888 REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
889 }
890
891 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
892 ((adjusted_mode->crtc_htotal - 1) << 16));
893 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
894 ((adjusted_mode->crtc_hblank_end - 1) << 16));
895 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
896 ((adjusted_mode->crtc_hsync_end - 1) << 16));
897 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
898 ((adjusted_mode->crtc_vtotal - 1) << 16));
899 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
900 ((adjusted_mode->crtc_vblank_end - 1) << 16));
901 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
902 ((adjusted_mode->crtc_vsync_end - 1) << 16));
903 /* pipesrc and dspsize control the size that is scaled from,
904 * which should always be the user's requested size.
905 */
906 REG_WRITE(dspsize_reg,
907 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
908 REG_WRITE(dsppos_reg, 0);
909 REG_WRITE(pipesrc_reg,
910 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
911 REG_WRITE(pipeconf_reg, pipeconf);
912 REG_READ(pipeconf_reg);
913
914 cdv_intel_wait_for_vblank(dev);
915
916 REG_WRITE(dspcntr_reg, dspcntr);
917
918 /* Flush the plane changes */
919 {
920 struct drm_crtc_helper_funcs *crtc_funcs =
921 crtc->helper_private;
922 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
923 }
924
925 cdv_intel_wait_for_vblank(dev);
926
927 return 0;
928}
929
930/** Loads the palette/gamma unit for the CRTC with the prepared values */
931void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
932{
933 struct drm_device *dev = crtc->dev;
934 struct drm_psb_private *dev_priv =
935 (struct drm_psb_private *)dev->dev_private;
936 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
937 int palreg = PALETTE_A;
938 int i;
939
940 /* The clocks have to be on to load the palette. */
941 if (!crtc->enabled)
942 return;
943
944 switch (psb_intel_crtc->pipe) {
945 case 0:
946 break;
947 case 1:
948 palreg = PALETTE_B;
949 break;
950 case 2:
951 palreg = PALETTE_C;
952 break;
953 default:
954 dev_err(dev->dev, "Illegal Pipe Number.\n");
955 return;
956 }
957
958 if (gma_power_begin(dev, false)) {
959 for (i = 0; i < 256; i++) {
960 REG_WRITE(palreg + 4 * i,
961 ((psb_intel_crtc->lut_r[i] +
962 psb_intel_crtc->lut_adj[i]) << 16) |
963 ((psb_intel_crtc->lut_g[i] +
964 psb_intel_crtc->lut_adj[i]) << 8) |
965 (psb_intel_crtc->lut_b[i] +
966 psb_intel_crtc->lut_adj[i]));
967 }
968 gma_power_end(dev);
969 } else {
970 for (i = 0; i < 256; i++) {
971 dev_priv->save_palette_a[i] =
972 ((psb_intel_crtc->lut_r[i] +
973 psb_intel_crtc->lut_adj[i]) << 16) |
974 ((psb_intel_crtc->lut_g[i] +
975 psb_intel_crtc->lut_adj[i]) << 8) |
976 (psb_intel_crtc->lut_b[i] +
977 psb_intel_crtc->lut_adj[i]);
978 }
979
980 }
981}
982
983/**
984 * Save HW states of giving crtc
985 */
986static void cdv_intel_crtc_save(struct drm_crtc *crtc)
987{
988 struct drm_device *dev = crtc->dev;
989 /* struct drm_psb_private *dev_priv =
990 (struct drm_psb_private *)dev->dev_private; */
991 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
992 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
993 int pipeA = (psb_intel_crtc->pipe == 0);
994 uint32_t paletteReg;
995 int i;
996
997 if (!crtc_state) {
998 dev_dbg(dev->dev, "No CRTC state found\n");
999 return;
1000 }
1001
1002 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
1003 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
1004 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
1005 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
1006 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
1007 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
1008 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
1009 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
1010 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
1011 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
1012 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
1013 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
1014 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
1015
1016 /*NOTE: DSPSIZE DSPPOS only for psb*/
1017 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
1018 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
1019
1020 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
1021
1022 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1023 crtc_state->saveDSPCNTR,
1024 crtc_state->savePIPECONF,
1025 crtc_state->savePIPESRC,
1026 crtc_state->saveFP0,
1027 crtc_state->saveFP1,
1028 crtc_state->saveDPLL,
1029 crtc_state->saveHTOTAL,
1030 crtc_state->saveHBLANK,
1031 crtc_state->saveHSYNC,
1032 crtc_state->saveVTOTAL,
1033 crtc_state->saveVBLANK,
1034 crtc_state->saveVSYNC,
1035 crtc_state->saveDSPSTRIDE,
1036 crtc_state->saveDSPSIZE,
1037 crtc_state->saveDSPPOS,
1038 crtc_state->saveDSPBASE
1039 );
1040
1041 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
1042 for (i = 0; i < 256; ++i)
1043 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1044}
1045
1046/**
1047 * Restore HW states of giving crtc
1048 */
1049static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1050{
1051 struct drm_device *dev = crtc->dev;
1052 /* struct drm_psb_private * dev_priv =
1053 (struct drm_psb_private *)dev->dev_private; */
1054 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1055 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1056 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
1057 int pipeA = (psb_intel_crtc->pipe == 0);
1058 uint32_t paletteReg;
1059 int i;
1060
1061 if (!crtc_state) {
1062 dev_dbg(dev->dev, "No crtc state\n");
1063 return;
1064 }
1065
1066 DRM_DEBUG(
1067 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1068 REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
1069 REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
1070 REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
1071 REG_READ(pipeA ? FPA0 : FPB0),
1072 REG_READ(pipeA ? FPA1 : FPB1),
1073 REG_READ(pipeA ? DPLL_A : DPLL_B),
1074 REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
1075 REG_READ(pipeA ? HBLANK_A : HBLANK_B),
1076 REG_READ(pipeA ? HSYNC_A : HSYNC_B),
1077 REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
1078 REG_READ(pipeA ? VBLANK_A : VBLANK_B),
1079 REG_READ(pipeA ? VSYNC_A : VSYNC_B),
1080 REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
1081 REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
1082 REG_READ(pipeA ? DSPAPOS : DSPBPOS),
1083 REG_READ(pipeA ? DSPABASE : DSPBBASE)
1084 );
1085
1086 DRM_DEBUG(
1087 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1088 crtc_state->saveDSPCNTR,
1089 crtc_state->savePIPECONF,
1090 crtc_state->savePIPESRC,
1091 crtc_state->saveFP0,
1092 crtc_state->saveFP1,
1093 crtc_state->saveDPLL,
1094 crtc_state->saveHTOTAL,
1095 crtc_state->saveHBLANK,
1096 crtc_state->saveHSYNC,
1097 crtc_state->saveVTOTAL,
1098 crtc_state->saveVBLANK,
1099 crtc_state->saveVSYNC,
1100 crtc_state->saveDSPSTRIDE,
1101 crtc_state->saveDSPSIZE,
1102 crtc_state->saveDSPPOS,
1103 crtc_state->saveDSPBASE
1104 );
1105
1106
1107 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1108 REG_WRITE(pipeA ? DPLL_A : DPLL_B,
1109 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1110 REG_READ(pipeA ? DPLL_A : DPLL_B);
1111 DRM_DEBUG("write dpll: %x\n",
1112 REG_READ(pipeA ? DPLL_A : DPLL_B));
1113 udelay(150);
1114 }
1115
1116 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
1117 REG_READ(pipeA ? FPA0 : FPB0);
1118
1119 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
1120 REG_READ(pipeA ? FPA1 : FPB1);
1121
1122 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
1123 REG_READ(pipeA ? DPLL_A : DPLL_B);
1124 udelay(150);
1125
1126 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
1127 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
1128 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
1129 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
1130 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
1131 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
1132 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
1133
1134 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
1135 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
1136
1137 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
1138 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
1139 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
1140
1141 cdv_intel_wait_for_vblank(dev);
1142
1143 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
1144 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
1145
1146 cdv_intel_wait_for_vblank(dev);
1147
1148 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
1149 for (i = 0; i < 256; ++i)
1150 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1151}
1152
1153static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1154 struct drm_file *file_priv,
1155 uint32_t handle,
1156 uint32_t width, uint32_t height)
1157{
1158 struct drm_device *dev = crtc->dev;
1159 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1160 int pipe = psb_intel_crtc->pipe;
1161 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1162 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1163 uint32_t temp;
1164 size_t addr = 0;
1165 struct gtt_range *gt;
1166 struct drm_gem_object *obj;
1167 int ret;
1168
1169 /* if we want to turn of the cursor ignore width and height */
1170 if (!handle) {
1171 /* turn off the cursor */
1172 temp = CURSOR_MODE_DISABLE;
1173
1174 if (gma_power_begin(dev, false)) {
1175 REG_WRITE(control, temp);
1176 REG_WRITE(base, 0);
1177 gma_power_end(dev);
1178 }
1179
1180 /* unpin the old GEM object */
1181 if (psb_intel_crtc->cursor_obj) {
1182 gt = container_of(psb_intel_crtc->cursor_obj,
1183 struct gtt_range, gem);
1184 psb_gtt_unpin(gt);
1185 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1186 psb_intel_crtc->cursor_obj = NULL;
1187 }
1188
1189 return 0;
1190 }
1191
1192 /* Currently we only support 64x64 cursors */
1193 if (width != 64 || height != 64) {
1194 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1195 return -EINVAL;
1196 }
1197
1198 obj = drm_gem_object_lookup(dev, file_priv, handle);
1199 if (!obj)
1200 return -ENOENT;
1201
1202 if (obj->size < width * height * 4) {
1203 dev_dbg(dev->dev, "buffer is to small\n");
1204 return -ENOMEM;
1205 }
1206
1207 gt = container_of(obj, struct gtt_range, gem);
1208
1209 /* Pin the memory into the GTT */
1210 ret = psb_gtt_pin(gt);
1211 if (ret) {
1212 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1213 return ret;
1214 }
1215
1216 addr = gt->offset; /* Or resource.start ??? */
1217
1218 psb_intel_crtc->cursor_addr = addr;
1219
1220 temp = 0;
1221 /* set the pipe for the cursor */
1222 temp |= (pipe << 28);
1223 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1224
1225 if (gma_power_begin(dev, false)) {
1226 REG_WRITE(control, temp);
1227 REG_WRITE(base, addr);
1228 gma_power_end(dev);
1229 }
1230
1231 /* unpin the old GEM object */
1232 if (psb_intel_crtc->cursor_obj) {
1233 gt = container_of(psb_intel_crtc->cursor_obj,
1234 struct gtt_range, gem);
1235 psb_gtt_unpin(gt);
1236 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1237 psb_intel_crtc->cursor_obj = obj;
1238 }
1239 return 0;
1240}
1241
1242static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1243{
1244 struct drm_device *dev = crtc->dev;
1245 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1246 int pipe = psb_intel_crtc->pipe;
1247 uint32_t temp = 0;
1248 uint32_t adder;
1249
1250
1251 if (x < 0) {
1252 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1253 x = -x;
1254 }
1255 if (y < 0) {
1256 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1257 y = -y;
1258 }
1259
1260 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1261 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1262
1263 adder = psb_intel_crtc->cursor_addr;
1264
1265 if (gma_power_begin(dev, false)) {
1266 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1267 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1268 gma_power_end(dev);
1269 }
1270 return 0;
1271}
1272
1273static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1274 u16 *green, u16 *blue, uint32_t start, uint32_t size)
1275{
1276 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1277 int i;
1278 int end = (start + size > 256) ? 256 : start + size;
1279
1280 for (i = start; i < end; i++) {
1281 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1282 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1283 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1284 }
1285
1286 cdv_intel_crtc_load_lut(crtc);
1287}
1288
1289static int cdv_crtc_set_config(struct drm_mode_set *set)
1290{
1291 int ret = 0;
1292 struct drm_device *dev = set->crtc->dev;
1293 struct drm_psb_private *dev_priv = dev->dev_private;
1294
1295 if (!dev_priv->rpm_enabled)
1296 return drm_crtc_helper_set_config(set);
1297
1298 pm_runtime_forbid(&dev->pdev->dev);
1299
1300 ret = drm_crtc_helper_set_config(set);
1301
1302 pm_runtime_allow(&dev->pdev->dev);
1303
1304 return ret;
1305}
1306
1307/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
1308
1309/* FIXME: why are we using this, should it be cdv_ in this tree ? */
1310
1311static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
1312{
1313 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
1314 clock->p = clock->p1 * clock->p2;
1315 clock->vco = refclk * clock->m / (clock->n + 2);
1316 clock->dot = clock->vco / clock->p;
1317}
1318
1319/* Returns the clock of the currently programmed mode of the given pipe. */
1320static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1321 struct drm_crtc *crtc)
1322{
1323 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1324 int pipe = psb_intel_crtc->pipe;
1325 u32 dpll;
1326 u32 fp;
1327 struct cdv_intel_clock_t clock;
1328 bool is_lvds;
1329 struct drm_psb_private *dev_priv = dev->dev_private;
1330
1331 if (gma_power_begin(dev, false)) {
1332 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
1333 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1334 fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
1335 else
1336 fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
1337 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1338 gma_power_end(dev);
1339 } else {
1340 dpll = (pipe == 0) ?
1341 dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
1342
1343 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1344 fp = (pipe == 0) ?
1345 dev_priv->saveFPA0 :
1346 dev_priv->saveFPB0;
1347 else
1348 fp = (pipe == 0) ?
1349 dev_priv->saveFPA1 :
1350 dev_priv->saveFPB1;
1351
1352 is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
1353 }
1354
1355 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1356 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1357 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1358
1359 if (is_lvds) {
1360 clock.p1 =
1361 ffs((dpll &
1362 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
1363 DPLL_FPA01_P1_POST_DIV_SHIFT);
1364 if (clock.p1 == 0) {
1365 clock.p1 = 4;
1366 dev_err(dev->dev, "PLL %d\n", dpll);
1367 }
1368 clock.p2 = 14;
1369
1370 if ((dpll & PLL_REF_INPUT_MASK) ==
1371 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1372 /* XXX: might not be 66MHz */
1373 i8xx_clock(66000, &clock);
1374 } else
1375 i8xx_clock(48000, &clock);
1376 } else {
1377 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1378 clock.p1 = 2;
1379 else {
1380 clock.p1 =
1381 ((dpll &
1382 DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
1383 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
1384 }
1385 if (dpll & PLL_P2_DIVIDE_BY_4)
1386 clock.p2 = 4;
1387 else
1388 clock.p2 = 2;
1389
1390 i8xx_clock(48000, &clock);
1391 }
1392
1393 /* XXX: It would be nice to validate the clocks, but we can't reuse
1394 * i830PllIsValid() because it relies on the xf86_config connector
1395 * configuration being accurate, which it isn't necessarily.
1396 */
1397
1398 return clock.dot;
1399}
1400
1401/** Returns the currently programmed mode of the given pipe. */
1402struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1403 struct drm_crtc *crtc)
1404{
1405 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1406 int pipe = psb_intel_crtc->pipe;
1407 struct drm_display_mode *mode;
1408 int htot;
1409 int hsync;
1410 int vtot;
1411 int vsync;
1412 struct drm_psb_private *dev_priv = dev->dev_private;
1413
1414 if (gma_power_begin(dev, false)) {
1415 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
1416 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
1417 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
1418 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
1419 gma_power_end(dev);
1420 } else {
1421 htot = (pipe == 0) ?
1422 dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
1423 hsync = (pipe == 0) ?
1424 dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
1425 vtot = (pipe == 0) ?
1426 dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
1427 vsync = (pipe == 0) ?
1428 dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
1429 }
1430
1431 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
1432 if (!mode)
1433 return NULL;
1434
1435 mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
1436 mode->hdisplay = (htot & 0xffff) + 1;
1437 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
1438 mode->hsync_start = (hsync & 0xffff) + 1;
1439 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
1440 mode->vdisplay = (vtot & 0xffff) + 1;
1441 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
1442 mode->vsync_start = (vsync & 0xffff) + 1;
1443 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
1444
1445 drm_mode_set_name(mode);
1446 drm_mode_set_crtcinfo(mode, 0);
1447
1448 return mode;
1449}
1450
1451static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1452{
1453 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1454
1455 kfree(psb_intel_crtc->crtc_state);
1456 drm_crtc_cleanup(crtc);
1457 kfree(psb_intel_crtc);
1458}
1459
1460const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1461 .dpms = cdv_intel_crtc_dpms,
1462 .mode_fixup = cdv_intel_crtc_mode_fixup,
1463 .mode_set = cdv_intel_crtc_mode_set,
1464 .mode_set_base = cdv_intel_pipe_set_base,
1465 .prepare = cdv_intel_crtc_prepare,
1466 .commit = cdv_intel_crtc_commit,
1467};
1468
1469const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
1470 .save = cdv_intel_crtc_save,
1471 .restore = cdv_intel_crtc_restore,
1472 .cursor_set = cdv_intel_crtc_cursor_set,
1473 .cursor_move = cdv_intel_crtc_cursor_move,
1474 .gamma_set = cdv_intel_crtc_gamma_set,
1475 .set_config = cdv_crtc_set_config,
1476 .destroy = cdv_intel_crtc_destroy,
1477};
1478
1479/*
1480 * Set the default value of cursor control and base register
1481 * to zero. This is a workaround for h/w defect on oaktrail
1482 */
1483void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
1484{
1485 uint32_t control;
1486 uint32_t base;
1487
1488 switch (pipe) {
1489 case 0:
1490 control = CURACNTR;
1491 base = CURABASE;
1492 break;
1493 case 1:
1494 control = CURBCNTR;
1495 base = CURBBASE;
1496 break;
1497 case 2:
1498 control = CURCCNTR;
1499 base = CURCBASE;
1500 break;
1501 default:
1502 return;
1503 }
1504
1505 REG_WRITE(control, 0);
1506 REG_WRITE(base, 0);
1507}
1508
diff --git a/drivers/staging/gma500/cdv_intel_hdmi.c b/drivers/staging/gma500/cdv_intel_hdmi.c
deleted file mode 100644
index cbca2b0c7d58..000000000000
--- a/drivers/staging/gma500/cdv_intel_hdmi.c
+++ /dev/null
@@ -1,376 +0,0 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 *
26 * FIXME:
27 * We should probably make this generic and share it with Medfield
28 */
29
30#include <drm/drmP.h>
31#include <drm/drm.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_edid.h>
34#include "psb_intel_drv.h"
35#include "psb_drv.h"
36#include "psb_intel_reg.h"
37#include <linux/pm_runtime.h>
38
39/* hdmi control bits */
40#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
41#define HDMI_BORDER_ENABLE (1 << 7)
42#define HDMI_AUDIO_ENABLE (1 << 6)
43#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
44#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
45/* hdmi-b control bits */
46#define HDMIB_PIPE_B_SELECT (1 << 30)
47
48
49struct mid_intel_hdmi_priv {
50 u32 hdmi_reg;
51 u32 save_HDMIB;
52 bool has_hdmi_sink;
53 bool has_hdmi_audio;
54 /* Should set this when detect hotplug */
55 bool hdmi_device_connected;
56 struct mdfld_hdmi_i2c *i2c_bus;
57 struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
58 struct drm_device *dev;
59};
60
61static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
62 struct drm_display_mode *mode,
63 struct drm_display_mode *adjusted_mode)
64{
65 struct drm_device *dev = encoder->dev;
66 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
67 struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
68 u32 hdmib;
69 struct drm_crtc *crtc = encoder->crtc;
70 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
71
72 hdmib = (2 << 10);
73
74 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
75 hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
76 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
77 hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
78
79 if (intel_crtc->pipe == 1)
80 hdmib |= HDMIB_PIPE_B_SELECT;
81
82 if (hdmi_priv->has_hdmi_audio) {
83 hdmib |= HDMI_AUDIO_ENABLE;
84 hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
85 }
86
87 REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
88 REG_READ(hdmi_priv->hdmi_reg);
89}
90
91static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
92 struct drm_display_mode *mode,
93 struct drm_display_mode *adjusted_mode)
94{
95 return true;
96}
97
98static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
99{
100 struct drm_device *dev = encoder->dev;
101 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
102 struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
103 u32 hdmib;
104
105 hdmib = REG_READ(hdmi_priv->hdmi_reg);
106
107 if (mode != DRM_MODE_DPMS_ON)
108 REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
109 else
110 REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
111 REG_READ(hdmi_priv->hdmi_reg);
112}
113
114static void cdv_hdmi_save(struct drm_connector *connector)
115{
116 struct drm_device *dev = connector->dev;
117 struct psb_intel_output *output = to_psb_intel_output(connector);
118 struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
119
120 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
121}
122
123static void cdv_hdmi_restore(struct drm_connector *connector)
124{
125 struct drm_device *dev = connector->dev;
126 struct psb_intel_output *output = to_psb_intel_output(connector);
127 struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
128
129 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
130 REG_READ(hdmi_priv->hdmi_reg);
131}
132
133static enum drm_connector_status cdv_hdmi_detect(
134 struct drm_connector *connector, bool force)
135{
136 struct psb_intel_output *psb_intel_output =
137 to_psb_intel_output(connector);
138 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_output->dev_priv;
139 struct edid *edid = NULL;
140 enum drm_connector_status status = connector_status_disconnected;
141
142 edid = drm_get_edid(&psb_intel_output->base,
143 psb_intel_output->hdmi_i2c_adapter);
144
145 hdmi_priv->has_hdmi_sink = false;
146 hdmi_priv->has_hdmi_audio = false;
147 if (edid) {
148 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
149 status = connector_status_connected;
150 hdmi_priv->has_hdmi_sink =
151 drm_detect_hdmi_monitor(edid);
152 hdmi_priv->has_hdmi_audio =
153 drm_detect_monitor_audio(edid);
154 }
155
156 psb_intel_output->base.display_info.raw_edid = NULL;
157 kfree(edid);
158 }
159 return status;
160}
161
162static int cdv_hdmi_set_property(struct drm_connector *connector,
163 struct drm_property *property,
164 uint64_t value)
165{
166 struct drm_encoder *encoder = connector->encoder;
167
168 if (!strcmp(property->name, "scaling mode") && encoder) {
169 struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
170 bool centre;
171 uint64_t curValue;
172
173 if (!crtc)
174 return -1;
175
176 switch (value) {
177 case DRM_MODE_SCALE_FULLSCREEN:
178 break;
179 case DRM_MODE_SCALE_NO_SCALE:
180 break;
181 case DRM_MODE_SCALE_ASPECT:
182 break;
183 default:
184 return -1;
185 }
186
187 if (drm_connector_property_get_value(connector,
188 property, &curValue))
189 return -1;
190
191 if (curValue == value)
192 return 0;
193
194 if (drm_connector_property_set_value(connector,
195 property, value))
196 return -1;
197
198 centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
199 (value == DRM_MODE_SCALE_NO_SCALE);
200
201 if (crtc->saved_mode.hdisplay != 0 &&
202 crtc->saved_mode.vdisplay != 0) {
203 if (centre) {
204 if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
205 encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
206 return -1;
207 } else {
208 struct drm_encoder_helper_funcs *helpers
209 = encoder->helper_private;
210 helpers->mode_set(encoder, &crtc->saved_mode,
211 &crtc->saved_adjusted_mode);
212 }
213 }
214 }
215 return 0;
216}
217
218/*
219 * Return the list of HDMI DDC modes if available.
220 */
221static int cdv_hdmi_get_modes(struct drm_connector *connector)
222{
223 struct psb_intel_output *psb_intel_output =
224 to_psb_intel_output(connector);
225 struct edid *edid = NULL;
226 int ret = 0;
227
228 edid = drm_get_edid(&psb_intel_output->base,
229 psb_intel_output->hdmi_i2c_adapter);
230 if (edid) {
231 drm_mode_connector_update_edid_property(&psb_intel_output->
232 base, edid);
233 ret = drm_add_edid_modes(&psb_intel_output->base, edid);
234 kfree(edid);
235 }
236 return ret;
237}
238
239static int cdv_hdmi_mode_valid(struct drm_connector *connector,
240 struct drm_display_mode *mode)
241{
242
243 if (mode->clock > 165000)
244 return MODE_CLOCK_HIGH;
245 if (mode->clock < 20000)
246 return MODE_CLOCK_HIGH;
247
248 /* just in case */
249 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
250 return MODE_NO_DBLESCAN;
251
252 /* just in case */
253 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
254 return MODE_NO_INTERLACE;
255
256 /*
257 * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
258 * will go beyond the stolen memory size allocated to the framebuffer
259 */
260 if (mode->hdisplay > 1680)
261 return MODE_PANEL;
262 if (mode->vdisplay > 1050)
263 return MODE_PANEL;
264 return MODE_OK;
265}
266
267static void cdv_hdmi_destroy(struct drm_connector *connector)
268{
269 struct psb_intel_output *psb_intel_output =
270 to_psb_intel_output(connector);
271
272 if (psb_intel_output->ddc_bus)
273 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
274 drm_sysfs_connector_remove(connector);
275 drm_connector_cleanup(connector);
276 kfree(connector);
277}
278
279static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
280 .dpms = cdv_hdmi_dpms,
281 .mode_fixup = cdv_hdmi_mode_fixup,
282 .prepare = psb_intel_encoder_prepare,
283 .mode_set = cdv_hdmi_mode_set,
284 .commit = psb_intel_encoder_commit,
285};
286
287static const struct drm_connector_helper_funcs
288 cdv_hdmi_connector_helper_funcs = {
289 .get_modes = cdv_hdmi_get_modes,
290 .mode_valid = cdv_hdmi_mode_valid,
291 .best_encoder = psb_intel_best_encoder,
292};
293
294static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
295 .dpms = drm_helper_connector_dpms,
296 .save = cdv_hdmi_save,
297 .restore = cdv_hdmi_restore,
298 .detect = cdv_hdmi_detect,
299 .fill_modes = drm_helper_probe_single_connector_modes,
300 .set_property = cdv_hdmi_set_property,
301 .destroy = cdv_hdmi_destroy,
302};
303
304void cdv_hdmi_init(struct drm_device *dev,
305 struct psb_intel_mode_device *mode_dev, int reg)
306{
307 struct psb_intel_output *psb_intel_output;
308 struct drm_connector *connector;
309 struct drm_encoder *encoder;
310 struct mid_intel_hdmi_priv *hdmi_priv;
311 int ddc_bus;
312
313 psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
314 sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
315 if (!psb_intel_output)
316 return;
317
318 hdmi_priv = (struct mid_intel_hdmi_priv *)(psb_intel_output + 1);
319 psb_intel_output->mode_dev = mode_dev;
320 connector = &psb_intel_output->base;
321 encoder = &psb_intel_output->enc;
322 drm_connector_init(dev, &psb_intel_output->base,
323 &cdv_hdmi_connector_funcs,
324 DRM_MODE_CONNECTOR_DVID);
325
326 drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
327 DRM_MODE_ENCODER_TMDS);
328
329 drm_mode_connector_attach_encoder(&psb_intel_output->base,
330 &psb_intel_output->enc);
331 psb_intel_output->type = INTEL_OUTPUT_HDMI;
332 hdmi_priv->hdmi_reg = reg;
333 hdmi_priv->has_hdmi_sink = false;
334 psb_intel_output->dev_priv = hdmi_priv;
335
336 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
337 drm_connector_helper_add(connector,
338 &cdv_hdmi_connector_helper_funcs);
339 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
340 connector->interlace_allowed = false;
341 connector->doublescan_allowed = false;
342
343 drm_connector_attach_property(connector,
344 dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
345
346 switch (reg) {
347 case SDVOB:
348 ddc_bus = GPIOE;
349 break;
350 case SDVOC:
351 ddc_bus = GPIOD;
352 break;
353 default:
354 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
355 goto failed_ddc;
356 break;
357 }
358
359 psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
360 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
361
362 if (!psb_intel_output->ddc_bus) {
363 dev_err(dev->dev, "No ddc adapter available!\n");
364 goto failed_ddc;
365 }
366 psb_intel_output->hdmi_i2c_adapter =
367 &(psb_intel_output->ddc_bus->adapter);
368 hdmi_priv->dev = dev;
369 drm_sysfs_connector_add(connector);
370 return;
371
372failed_ddc:
373 drm_encoder_cleanup(&psb_intel_output->enc);
374 drm_connector_cleanup(&psb_intel_output->base);
375 kfree(psb_intel_output);
376}
diff --git a/drivers/staging/gma500/cdv_intel_lvds.c b/drivers/staging/gma500/cdv_intel_lvds.c
deleted file mode 100644
index 988b2d0acf43..000000000000
--- a/drivers/staging/gma500/cdv_intel_lvds.c
+++ /dev/null
@@ -1,721 +0,0 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <linux/dmi.h>
25#include <drm/drmP.h>
26
27#include "intel_bios.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "power.h"
32#include <linux/pm_runtime.h>
33#include "cdv_device.h"
34
35/**
36 * LVDS I2C backlight control macros
37 */
38#define BRIGHTNESS_MAX_LEVEL 100
39#define BRIGHTNESS_MASK 0xFF
40#define BLC_I2C_TYPE 0x01
41#define BLC_PWM_TYPT 0x02
42
43#define BLC_POLARITY_NORMAL 0
44#define BLC_POLARITY_INVERSE 1
45
46#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
47#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
48#define PSB_BLC_PWM_PRECISION_FACTOR (10)
49#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
50#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
51
52struct cdv_intel_lvds_priv {
53 /**
54 * Saved LVDO output states
55 */
56 uint32_t savePP_ON;
57 uint32_t savePP_OFF;
58 uint32_t saveLVDS;
59 uint32_t savePP_CONTROL;
60 uint32_t savePP_CYCLE;
61 uint32_t savePFIT_CONTROL;
62 uint32_t savePFIT_PGM_RATIOS;
63 uint32_t saveBLC_PWM_CTL;
64};
65
66/*
67 * Returns the maximum level of the backlight duty cycle field.
68 */
69static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
70{
71 struct drm_psb_private *dev_priv = dev->dev_private;
72 u32 retval;
73
74 if (gma_power_begin(dev, false)) {
75 retval = ((REG_READ(BLC_PWM_CTL) &
76 BACKLIGHT_MODULATION_FREQ_MASK) >>
77 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
78
79 gma_power_end(dev);
80 } else
81 retval = ((dev_priv->saveBLC_PWM_CTL &
82 BACKLIGHT_MODULATION_FREQ_MASK) >>
83 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
84
85 return retval;
86}
87
88/*
89 * Set LVDS backlight level by I2C command
90 */
91static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
92 unsigned int level)
93{
94 struct drm_psb_private *dev_priv = dev->dev_private;
95 struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
96 u8 out_buf[2];
97 unsigned int blc_i2c_brightness;
98
99 struct i2c_msg msgs[] = {
100 {
101 .addr = lvds_i2c_bus->slave_addr,
102 .flags = 0,
103 .len = 2,
104 .buf = out_buf,
105 }
106 };
107
108 blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
109 BRIGHTNESS_MASK /
110 BRIGHTNESS_MAX_LEVEL);
111
112 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
113 blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
114
115 out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
116 out_buf[1] = (u8)blc_i2c_brightness;
117
118 if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
119 return 0;
120
121 DRM_ERROR("I2C transfer error\n");
122 return -1;
123}
124
125
126static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
127{
128 struct drm_psb_private *dev_priv = dev->dev_private;
129
130 u32 max_pwm_blc;
131 u32 blc_pwm_duty_cycle;
132
133 max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
134
135 /*BLC_PWM_CTL Should be initiated while backlight device init*/
136 BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
137
138 blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
139
140 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
141 blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
142
143 blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
144 REG_WRITE(BLC_PWM_CTL,
145 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
146 (blc_pwm_duty_cycle));
147
148 return 0;
149}
150
151/*
152 * Set LVDS backlight level either by I2C or PWM
153 */
154void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
155{
156 struct drm_psb_private *dev_priv = dev->dev_private;
157
158 if (!dev_priv->lvds_bl) {
159 DRM_ERROR("NO LVDS Backlight Info\n");
160 return;
161 }
162
163 if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
164 cdv_lvds_i2c_set_brightness(dev, level);
165 else
166 cdv_lvds_pwm_set_brightness(dev, level);
167}
168
169/**
170 * Sets the backlight level.
171 *
172 * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
173 */
174static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
175{
176 struct drm_psb_private *dev_priv = dev->dev_private;
177 u32 blc_pwm_ctl;
178
179 if (gma_power_begin(dev, false)) {
180 blc_pwm_ctl =
181 REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
182 REG_WRITE(BLC_PWM_CTL,
183 (blc_pwm_ctl |
184 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
185 gma_power_end(dev);
186 } else {
187 blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
188 ~BACKLIGHT_DUTY_CYCLE_MASK;
189 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
190 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
191 }
192}
193
194/**
195 * Sets the power state for the panel.
196 */
197static void cdv_intel_lvds_set_power(struct drm_device *dev,
198 struct psb_intel_output *output, bool on)
199{
200 u32 pp_status;
201
202 if (!gma_power_begin(dev, true))
203 return;
204
205 if (on) {
206 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
207 POWER_TARGET_ON);
208 do {
209 pp_status = REG_READ(PP_STATUS);
210 } while ((pp_status & PP_ON) == 0);
211
212 cdv_intel_lvds_set_backlight(dev,
213 output->
214 mode_dev->backlight_duty_cycle);
215 } else {
216 cdv_intel_lvds_set_backlight(dev, 0);
217
218 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
219 ~POWER_TARGET_ON);
220 do {
221 pp_status = REG_READ(PP_STATUS);
222 } while (pp_status & PP_ON);
223 }
224 gma_power_end(dev);
225}
226
227static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
228{
229 struct drm_device *dev = encoder->dev;
230 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
231 if (mode == DRM_MODE_DPMS_ON)
232 cdv_intel_lvds_set_power(dev, output, true);
233 else
234 cdv_intel_lvds_set_power(dev, output, false);
235 /* XXX: We never power down the LVDS pairs. */
236}
237
238static void cdv_intel_lvds_save(struct drm_connector *connector)
239{
240}
241
242static void cdv_intel_lvds_restore(struct drm_connector *connector)
243{
244}
245
246int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
247 struct drm_display_mode *mode)
248{
249 struct psb_intel_output *psb_intel_output =
250 to_psb_intel_output(connector);
251 struct drm_display_mode *fixed_mode =
252 psb_intel_output->mode_dev->panel_fixed_mode;
253
254 /* just in case */
255 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
256 return MODE_NO_DBLESCAN;
257
258 /* just in case */
259 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
260 return MODE_NO_INTERLACE;
261
262 if (fixed_mode) {
263 if (mode->hdisplay > fixed_mode->hdisplay)
264 return MODE_PANEL;
265 if (mode->vdisplay > fixed_mode->vdisplay)
266 return MODE_PANEL;
267 }
268 return MODE_OK;
269}
270
271bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
272 struct drm_display_mode *mode,
273 struct drm_display_mode *adjusted_mode)
274{
275 struct psb_intel_mode_device *mode_dev =
276 enc_to_psb_intel_output(encoder)->mode_dev;
277 struct drm_device *dev = encoder->dev;
278 struct drm_encoder *tmp_encoder;
279 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
280
281 /* Should never happen!! */
282 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
283 head) {
284 if (tmp_encoder != encoder
285 && tmp_encoder->crtc == encoder->crtc) {
286 printk(KERN_ERR "Can't enable LVDS and another "
287 "encoder on the same pipe\n");
288 return false;
289 }
290 }
291
292 /*
293 * If we have timings from the BIOS for the panel, put them in
294 * to the adjusted mode. The CRTC will be set up for this mode,
295 * with the panel scaling set up to source from the H/VDisplay
296 * of the original mode.
297 */
298 if (panel_fixed_mode != NULL) {
299 adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
300 adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
301 adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
302 adjusted_mode->htotal = panel_fixed_mode->htotal;
303 adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
304 adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
305 adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
306 adjusted_mode->vtotal = panel_fixed_mode->vtotal;
307 adjusted_mode->clock = panel_fixed_mode->clock;
308 drm_mode_set_crtcinfo(adjusted_mode,
309 CRTC_INTERLACE_HALVE_V);
310 }
311
312 /*
313 * XXX: It would be nice to support lower refresh rates on the
314 * panels to reduce power consumption, and perhaps match the
315 * user's requested refresh rate.
316 */
317
318 return true;
319}
320
321static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
322{
323 struct drm_device *dev = encoder->dev;
324 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
325 struct psb_intel_mode_device *mode_dev = output->mode_dev;
326
327 if (!gma_power_begin(dev, true))
328 return;
329
330 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
331 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
332 BACKLIGHT_DUTY_CYCLE_MASK);
333
334 cdv_intel_lvds_set_power(dev, output, false);
335
336 gma_power_end(dev);
337}
338
339static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
340{
341 struct drm_device *dev = encoder->dev;
342 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
343 struct psb_intel_mode_device *mode_dev = output->mode_dev;
344
345 if (mode_dev->backlight_duty_cycle == 0)
346 mode_dev->backlight_duty_cycle =
347 cdv_intel_lvds_get_max_backlight(dev);
348
349 cdv_intel_lvds_set_power(dev, output, true);
350}
351
352static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
353 struct drm_display_mode *mode,
354 struct drm_display_mode *adjusted_mode)
355{
356 struct drm_device *dev = encoder->dev;
357 struct drm_psb_private *dev_priv = dev->dev_private;
358 u32 pfit_control;
359
360 /*
361 * The LVDS pin pair will already have been turned on in the
362 * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
363 * settings.
364 */
365
366 /*
367 * Enable automatic panel scaling so that non-native modes fill the
368 * screen. Should be enabled before the pipe is enabled, according to
369 * register description and PRM.
370 */
371 if (mode->hdisplay != adjusted_mode->hdisplay ||
372 mode->vdisplay != adjusted_mode->vdisplay)
373 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
374 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
375 HORIZ_INTERP_BILINEAR);
376 else
377 pfit_control = 0;
378
379 if (dev_priv->lvds_dither)
380 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
381
382 REG_WRITE(PFIT_CONTROL, pfit_control);
383}
384
385/**
386 * Detect the LVDS connection.
387 *
388 * This always returns CONNECTOR_STATUS_CONNECTED.
389 * This connector should only have
390 * been set up if the LVDS was actually connected anyway.
391 */
392static enum drm_connector_status cdv_intel_lvds_detect(
393 struct drm_connector *connector, bool force)
394{
395 return connector_status_connected;
396}
397
398/**
399 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
400 */
401static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
402{
403 struct drm_device *dev = connector->dev;
404 struct psb_intel_output *psb_intel_output =
405 to_psb_intel_output(connector);
406 struct psb_intel_mode_device *mode_dev =
407 psb_intel_output->mode_dev;
408 int ret;
409
410 ret = psb_intel_ddc_get_modes(psb_intel_output);
411
412 if (ret)
413 return ret;
414
415 /* Didn't get an EDID, so
416 * Set wide sync ranges so we get all modes
417 * handed to valid_mode for checking
418 */
419 connector->display_info.min_vfreq = 0;
420 connector->display_info.max_vfreq = 200;
421 connector->display_info.min_hfreq = 0;
422 connector->display_info.max_hfreq = 200;
423 if (mode_dev->panel_fixed_mode != NULL) {
424 struct drm_display_mode *mode =
425 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
426 drm_mode_probed_add(connector, mode);
427 return 1;
428 }
429
430 return 0;
431}
432
433/**
434 * cdv_intel_lvds_destroy - unregister and free LVDS structures
435 * @connector: connector to free
436 *
437 * Unregister the DDC bus for this connector then free the driver private
438 * structure.
439 */
440void cdv_intel_lvds_destroy(struct drm_connector *connector)
441{
442 struct psb_intel_output *psb_intel_output =
443 to_psb_intel_output(connector);
444
445 if (psb_intel_output->ddc_bus)
446 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
447 drm_sysfs_connector_remove(connector);
448 drm_connector_cleanup(connector);
449 kfree(connector);
450}
451
452int cdv_intel_lvds_set_property(struct drm_connector *connector,
453 struct drm_property *property,
454 uint64_t value)
455{
456 struct drm_encoder *encoder = connector->encoder;
457
458 if (!strcmp(property->name, "scaling mode") && encoder) {
459 struct psb_intel_crtc *crtc =
460 to_psb_intel_crtc(encoder->crtc);
461 uint64_t curValue;
462
463 if (!crtc)
464 return -1;
465
466 switch (value) {
467 case DRM_MODE_SCALE_FULLSCREEN:
468 break;
469 case DRM_MODE_SCALE_NO_SCALE:
470 break;
471 case DRM_MODE_SCALE_ASPECT:
472 break;
473 default:
474 return -1;
475 }
476
477 if (drm_connector_property_get_value(connector,
478 property,
479 &curValue))
480 return -1;
481
482 if (curValue == value)
483 return 0;
484
485 if (drm_connector_property_set_value(connector,
486 property,
487 value))
488 return -1;
489
490 if (crtc->saved_mode.hdisplay != 0 &&
491 crtc->saved_mode.vdisplay != 0) {
492 if (!drm_crtc_helper_set_mode(encoder->crtc,
493 &crtc->saved_mode,
494 encoder->crtc->x,
495 encoder->crtc->y,
496 encoder->crtc->fb))
497 return -1;
498 }
499 } else if (!strcmp(property->name, "backlight") && encoder) {
500 if (drm_connector_property_set_value(connector,
501 property,
502 value))
503 return -1;
504 else {
505#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
506 struct drm_psb_private *dev_priv =
507 encoder->dev->dev_private;
508 struct backlight_device *bd =
509 dev_priv->backlight_device;
510 bd->props.brightness = value;
511 backlight_update_status(bd);
512#endif
513 }
514 } else if (!strcmp(property->name, "DPMS") && encoder) {
515 struct drm_encoder_helper_funcs *helpers =
516 encoder->helper_private;
517 helpers->dpms(encoder, value);
518 }
519 return 0;
520}
521
522static const struct drm_encoder_helper_funcs
523 cdv_intel_lvds_helper_funcs = {
524 .dpms = cdv_intel_lvds_encoder_dpms,
525 .mode_fixup = cdv_intel_lvds_mode_fixup,
526 .prepare = cdv_intel_lvds_prepare,
527 .mode_set = cdv_intel_lvds_mode_set,
528 .commit = cdv_intel_lvds_commit,
529};
530
531static const struct drm_connector_helper_funcs
532 cdv_intel_lvds_connector_helper_funcs = {
533 .get_modes = cdv_intel_lvds_get_modes,
534 .mode_valid = cdv_intel_lvds_mode_valid,
535 .best_encoder = psb_intel_best_encoder,
536};
537
538static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
539 .dpms = drm_helper_connector_dpms,
540 .save = cdv_intel_lvds_save,
541 .restore = cdv_intel_lvds_restore,
542 .detect = cdv_intel_lvds_detect,
543 .fill_modes = drm_helper_probe_single_connector_modes,
544 .set_property = cdv_intel_lvds_set_property,
545 .destroy = cdv_intel_lvds_destroy,
546};
547
548
549static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
550{
551 drm_encoder_cleanup(encoder);
552}
553
554const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
555 .destroy = cdv_intel_lvds_enc_destroy,
556};
557
558/**
559 * cdv_intel_lvds_init - setup LVDS connectors on this device
560 * @dev: drm device
561 *
562 * Create the connector, register the LVDS DDC bus, and try to figure out what
563 * modes we can display on the LVDS panel (if present).
564 */
565void cdv_intel_lvds_init(struct drm_device *dev,
566 struct psb_intel_mode_device *mode_dev)
567{
568 struct psb_intel_output *psb_intel_output;
569 struct cdv_intel_lvds_priv *lvds_priv;
570 struct drm_connector *connector;
571 struct drm_encoder *encoder;
572 struct drm_display_mode *scan;
573 struct drm_crtc *crtc;
574 struct drm_psb_private *dev_priv = dev->dev_private;
575 u32 lvds;
576 int pipe;
577
578 psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
579 sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
580 if (!psb_intel_output)
581 return;
582
583 lvds_priv = (struct cdv_intel_lvds_priv *)(psb_intel_output + 1);
584
585 psb_intel_output->dev_priv = lvds_priv;
586
587 psb_intel_output->mode_dev = mode_dev;
588 connector = &psb_intel_output->base;
589 encoder = &psb_intel_output->enc;
590
591
592 drm_connector_init(dev, &psb_intel_output->base,
593 &cdv_intel_lvds_connector_funcs,
594 DRM_MODE_CONNECTOR_LVDS);
595
596 drm_encoder_init(dev, &psb_intel_output->enc,
597 &cdv_intel_lvds_enc_funcs,
598 DRM_MODE_ENCODER_LVDS);
599
600
601 drm_mode_connector_attach_encoder(&psb_intel_output->base,
602 &psb_intel_output->enc);
603 psb_intel_output->type = INTEL_OUTPUT_LVDS;
604
605 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
606 drm_connector_helper_add(connector,
607 &cdv_intel_lvds_connector_helper_funcs);
608 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
609 connector->interlace_allowed = false;
610 connector->doublescan_allowed = false;
611
612 /*Attach connector properties*/
613 drm_connector_attach_property(connector,
614 dev->mode_config.scaling_mode_property,
615 DRM_MODE_SCALE_FULLSCREEN);
616 drm_connector_attach_property(connector,
617 dev_priv->backlight_property,
618 BRIGHTNESS_MAX_LEVEL);
619
620 /**
621 * Set up I2C bus
622 * FIXME: distroy i2c_bus when exit
623 */
624 psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
625 GPIOB,
626 "LVDSBLC_B");
627 if (!psb_intel_output->i2c_bus) {
628 dev_printk(KERN_ERR,
629 &dev->pdev->dev, "I2C bus registration failed.\n");
630 goto failed_blc_i2c;
631 }
632 psb_intel_output->i2c_bus->slave_addr = 0x2C;
633 dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
634
635 /*
636 * LVDS discovery:
637 * 1) check for EDID on DDC
638 * 2) check for VBT data
639 * 3) check to see if LVDS is already on
640 * if none of the above, no panel
641 * 4) make sure lid is open
642 * if closed, act like it's not there for now
643 */
644
645 /* Set up the DDC bus. */
646 psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
647 GPIOC,
648 "LVDSDDC_C");
649 if (!psb_intel_output->ddc_bus) {
650 dev_printk(KERN_ERR, &dev->pdev->dev,
651 "DDC bus registration " "failed.\n");
652 goto failed_ddc;
653 }
654
655 /*
656 * Attempt to get the fixed panel mode from DDC. Assume that the
657 * preferred mode is the right one.
658 */
659 psb_intel_ddc_get_modes(psb_intel_output);
660 list_for_each_entry(scan, &connector->probed_modes, head) {
661 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
662 mode_dev->panel_fixed_mode =
663 drm_mode_duplicate(dev, scan);
664 goto out; /* FIXME: check for quirks */
665 }
666 }
667
668 /* Failed to get EDID, what about VBT? do we need this?*/
669 if (dev_priv->lfp_lvds_vbt_mode) {
670 mode_dev->panel_fixed_mode =
671 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
672 if (mode_dev->panel_fixed_mode) {
673 mode_dev->panel_fixed_mode->type |=
674 DRM_MODE_TYPE_PREFERRED;
675 goto out; /* FIXME: check for quirks */
676 }
677 }
678 /*
679 * If we didn't get EDID, try checking if the panel is already turned
680 * on. If so, assume that whatever is currently programmed is the
681 * correct mode.
682 */
683 lvds = REG_READ(LVDS);
684 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
685 crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
686
687 if (crtc && (lvds & LVDS_PORT_EN)) {
688 mode_dev->panel_fixed_mode =
689 cdv_intel_crtc_mode_get(dev, crtc);
690 if (mode_dev->panel_fixed_mode) {
691 mode_dev->panel_fixed_mode->type |=
692 DRM_MODE_TYPE_PREFERRED;
693 goto out; /* FIXME: check for quirks */
694 }
695 }
696
697 /* If we still don't have a mode after all that, give up. */
698 if (!mode_dev->panel_fixed_mode) {
699 DRM_DEBUG
700 ("Found no modes on the lvds, ignoring the LVDS\n");
701 goto failed_find;
702 }
703
704out:
705 drm_sysfs_connector_add(connector);
706 return;
707
708failed_find:
709 printk(KERN_ERR "Failed find\n");
710 if (psb_intel_output->ddc_bus)
711 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
712failed_ddc:
713 printk(KERN_ERR "Failed DDC\n");
714 if (psb_intel_output->i2c_bus)
715 psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
716failed_blc_i2c:
717 printk(KERN_ERR "Failed BLC\n");
718 drm_encoder_cleanup(encoder);
719 drm_connector_cleanup(connector);
720 kfree(connector);
721}
diff --git a/drivers/staging/gma500/displays/hdmi.h b/drivers/staging/gma500/displays/hdmi.h
deleted file mode 100644
index d58ba9bd010f..000000000000
--- a/drivers/staging/gma500/displays/hdmi.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26 */
27
28#ifndef HDMI_H
29#define HDMI_H
30
31extern void hdmi_init(struct drm_device *dev);
32
33#endif
diff --git a/drivers/staging/gma500/displays/pyr_cmd.h b/drivers/staging/gma500/displays/pyr_cmd.h
deleted file mode 100644
index 84bae5c8c552..000000000000
--- a/drivers/staging/gma500/displays/pyr_cmd.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26 */
27
28#ifndef PYR_CMD_H
29#define PYR_CMD_H
30
31extern void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32
33#endif
34
diff --git a/drivers/staging/gma500/displays/pyr_vid.h b/drivers/staging/gma500/displays/pyr_vid.h
deleted file mode 100644
index ce98860fa68a..000000000000
--- a/drivers/staging/gma500/displays/pyr_vid.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#ifndef PYR_VID_H
29#define PYR_VID_H
30
31extern void pyr_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32extern struct drm_display_mode *pyr_vid_get_config_mode(struct drm_device* dev);
33
34#endif
diff --git a/drivers/staging/gma500/displays/tmd_cmd.h b/drivers/staging/gma500/displays/tmd_cmd.h
deleted file mode 100644
index 641e85eedece..000000000000
--- a/drivers/staging/gma500/displays/tmd_cmd.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26 */
27
28#ifndef TMD_CMD_H
29#define TMD_CMD_H
30
31extern void tmd_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32extern struct drm_display_mode *tmd_cmd_get_config_mode(struct drm_device *dev);
33
34#endif
diff --git a/drivers/staging/gma500/displays/tmd_vid.h b/drivers/staging/gma500/displays/tmd_vid.h
deleted file mode 100644
index 7a5fa3b935e3..000000000000
--- a/drivers/staging/gma500/displays/tmd_vid.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#ifndef TMD_VID_H
29#define TMD_VID_H
30
31extern void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32extern struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev);
33
34#endif
diff --git a/drivers/staging/gma500/displays/tpo_cmd.h b/drivers/staging/gma500/displays/tpo_cmd.h
deleted file mode 100644
index 610552730d71..000000000000
--- a/drivers/staging/gma500/displays/tpo_cmd.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#ifndef TPO_CMD_H
29#define TPO_CMD_H
30
31extern void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32/* extern struct drm_display_mode * */
33/* tpo_cmd_get_config_mode(struct drm_device *dev); */
34
35#endif
diff --git a/drivers/staging/gma500/displays/tpo_vid.h b/drivers/staging/gma500/displays/tpo_vid.h
deleted file mode 100644
index c24f05722de1..000000000000
--- a/drivers/staging/gma500/displays/tpo_vid.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26 */
27
28#ifndef TPO_VID_H
29#define TPO_VID_H
30
31extern void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
32
33#endif
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
deleted file mode 100644
index b00761cba144..000000000000
--- a/drivers/staging/gma500/framebuffer.c
+++ /dev/null
@@ -1,856 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/tty.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fb.h>
29#include <linux/init.h>
30#include <linux/console.h>
31
32#include <drm/drmP.h>
33#include <drm/drm.h>
34#include <drm/drm_crtc.h>
35#include <drm/drm_fb_helper.h>
36
37#include "psb_drv.h"
38#include "psb_intel_reg.h"
39#include "psb_intel_drv.h"
40#include "framebuffer.h"
41#include "gtt.h"
42
43#include "mdfld_output.h"
44
45static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
46static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
47 struct drm_file *file_priv,
48 unsigned int *handle);
49
50static const struct drm_framebuffer_funcs psb_fb_funcs = {
51 .destroy = psb_user_framebuffer_destroy,
52 .create_handle = psb_user_framebuffer_create_handle,
53};
54
55#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
56
57static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
58 unsigned blue, unsigned transp,
59 struct fb_info *info)
60{
61 struct psb_fbdev *fbdev = info->par;
62 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
63 uint32_t v;
64
65 if (!fb)
66 return -ENOMEM;
67
68 if (regno > 255)
69 return 1;
70
71 red = CMAP_TOHW(red, info->var.red.length);
72 blue = CMAP_TOHW(blue, info->var.blue.length);
73 green = CMAP_TOHW(green, info->var.green.length);
74 transp = CMAP_TOHW(transp, info->var.transp.length);
75
76 v = (red << info->var.red.offset) |
77 (green << info->var.green.offset) |
78 (blue << info->var.blue.offset) |
79 (transp << info->var.transp.offset);
80
81 if (regno < 16) {
82 switch (fb->bits_per_pixel) {
83 case 16:
84 ((uint32_t *) info->pseudo_palette)[regno] = v;
85 break;
86 case 24:
87 case 32:
88 ((uint32_t *) info->pseudo_palette)[regno] = v;
89 break;
90 }
91 }
92
93 return 0;
94}
95
96static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
97{
98 struct psb_fbdev *fbdev = info->par;
99 struct psb_framebuffer *psbfb = &fbdev->pfb;
100 struct drm_device *dev = psbfb->base.dev;
101
102 /*
103 * We have to poke our nose in here. The core fb code assumes
104 * panning is part of the hardware that can be invoked before
105 * the actual fb is mapped. In our case that isn't quite true.
106 */
107 if (psbfb->gtt->npage)
108 psb_gtt_roll(dev, psbfb->gtt, var->yoffset);
109 return 0;
110}
111
112void psbfb_suspend(struct drm_device *dev)
113{
114 struct drm_framebuffer *fb = 0;
115 struct psb_framebuffer *psbfb = to_psb_fb(fb);
116
117 console_lock();
118 mutex_lock(&dev->mode_config.mutex);
119 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
120 struct fb_info *info = psbfb->fbdev;
121 fb_set_suspend(info, 1);
122 drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
123 }
124 mutex_unlock(&dev->mode_config.mutex);
125 console_unlock();
126}
127
128void psbfb_resume(struct drm_device *dev)
129{
130 struct drm_framebuffer *fb = 0;
131 struct psb_framebuffer *psbfb = to_psb_fb(fb);
132
133 console_lock();
134 mutex_lock(&dev->mode_config.mutex);
135 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
136 struct fb_info *info = psbfb->fbdev;
137 fb_set_suspend(info, 0);
138 drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
139 }
140 mutex_unlock(&dev->mode_config.mutex);
141 console_unlock();
142 drm_helper_disable_unused_functions(dev);
143}
144
145static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
146{
147 struct psb_framebuffer *psbfb = vma->vm_private_data;
148 struct drm_device *dev = psbfb->base.dev;
149 struct drm_psb_private *dev_priv = dev->dev_private;
150 int page_num;
151 int i;
152 unsigned long address;
153 int ret;
154 unsigned long pfn;
155 /* FIXME: assumes fb at stolen base which may not be true */
156 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
157
158 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
159 address = (unsigned long)vmf->virtual_address;
160
161 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
162
163 for (i = 0; i < page_num; i++) {
164 pfn = (phys_addr >> PAGE_SHIFT);
165
166 ret = vm_insert_mixed(vma, address, pfn);
167 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
168 break;
169 else if (unlikely(ret != 0)) {
170 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
171 return ret;
172 }
173 address += PAGE_SIZE;
174 phys_addr += PAGE_SIZE;
175 }
176 return VM_FAULT_NOPAGE;
177}
178
179static void psbfb_vm_open(struct vm_area_struct *vma)
180{
181}
182
183static void psbfb_vm_close(struct vm_area_struct *vma)
184{
185}
186
187static struct vm_operations_struct psbfb_vm_ops = {
188 .fault = psbfb_vm_fault,
189 .open = psbfb_vm_open,
190 .close = psbfb_vm_close
191};
192
193static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
194{
195 struct psb_fbdev *fbdev = info->par;
196 struct psb_framebuffer *psbfb = &fbdev->pfb;
197
198 if (vma->vm_pgoff != 0)
199 return -EINVAL;
200 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
201 return -EINVAL;
202
203 if (!psbfb->addr_space)
204 psbfb->addr_space = vma->vm_file->f_mapping;
205 /*
206 * If this is a GEM object then info->screen_base is the virtual
207 * kernel remapping of the object. FIXME: Review if this is
208 * suitable for our mmap work
209 */
210 vma->vm_ops = &psbfb_vm_ops;
211 vma->vm_private_data = (void *)psbfb;
212 vma->vm_flags |= VM_RESERVED | VM_IO |
213 VM_MIXEDMAP | VM_DONTEXPAND;
214 return 0;
215}
216
217static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
218 unsigned long arg)
219{
220 return -ENOTTY;
221}
222
223static struct fb_ops psbfb_ops = {
224 .owner = THIS_MODULE,
225 .fb_check_var = drm_fb_helper_check_var,
226 .fb_set_par = drm_fb_helper_set_par,
227 .fb_blank = drm_fb_helper_blank,
228 .fb_setcolreg = psbfb_setcolreg,
229 .fb_fillrect = cfb_fillrect,
230 .fb_copyarea = psbfb_copyarea,
231 .fb_imageblit = cfb_imageblit,
232 .fb_mmap = psbfb_mmap,
233 .fb_sync = psbfb_sync,
234 .fb_ioctl = psbfb_ioctl,
235};
236
237static struct fb_ops psbfb_roll_ops = {
238 .owner = THIS_MODULE,
239 .fb_check_var = drm_fb_helper_check_var,
240 .fb_set_par = drm_fb_helper_set_par,
241 .fb_blank = drm_fb_helper_blank,
242 .fb_setcolreg = psbfb_setcolreg,
243 .fb_fillrect = cfb_fillrect,
244 .fb_copyarea = cfb_copyarea,
245 .fb_imageblit = cfb_imageblit,
246 .fb_pan_display = psbfb_pan,
247 .fb_mmap = psbfb_mmap,
248 .fb_sync = psbfb_sync,
249 .fb_ioctl = psbfb_ioctl,
250};
251
252static struct fb_ops psbfb_unaccel_ops = {
253 .owner = THIS_MODULE,
254 .fb_check_var = drm_fb_helper_check_var,
255 .fb_set_par = drm_fb_helper_set_par,
256 .fb_blank = drm_fb_helper_blank,
257 .fb_setcolreg = psbfb_setcolreg,
258 .fb_fillrect = cfb_fillrect,
259 .fb_copyarea = cfb_copyarea,
260 .fb_imageblit = cfb_imageblit,
261 .fb_mmap = psbfb_mmap,
262 .fb_ioctl = psbfb_ioctl,
263};
264
265/**
266 * psb_framebuffer_init - initialize a framebuffer
267 * @dev: our DRM device
268 * @fb: framebuffer to set up
269 * @mode_cmd: mode description
270 * @gt: backing object
271 *
272 * Configure and fill in the boilerplate for our frame buffer. Return
273 * 0 on success or an error code if we fail.
274 */
275static int psb_framebuffer_init(struct drm_device *dev,
276 struct psb_framebuffer *fb,
277 struct drm_mode_fb_cmd2 *mode_cmd,
278 struct gtt_range *gt)
279{
280 u32 bpp, depth;
281 int ret;
282
283 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
284
285 if (mode_cmd->pitches[0] & 63)
286 return -EINVAL;
287 switch (bpp) {
288 case 8:
289 case 16:
290 case 24:
291 case 32:
292 break;
293 default:
294 return -EINVAL;
295 }
296 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
297 if (ret) {
298 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
299 return ret;
300 }
301 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
302 fb->gtt = gt;
303 return 0;
304}
305
306/**
307 * psb_framebuffer_create - create a framebuffer backed by gt
308 * @dev: our DRM device
309 * @mode_cmd: the description of the requested mode
310 * @gt: the backing object
311 *
312 * Create a framebuffer object backed by the gt, and fill in the
313 * boilerplate required
314 *
315 * TODO: review object references
316 */
317
318static struct drm_framebuffer *psb_framebuffer_create
319 (struct drm_device *dev,
320 struct drm_mode_fb_cmd2 *mode_cmd,
321 struct gtt_range *gt)
322{
323 struct psb_framebuffer *fb;
324 int ret;
325
326 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
327 if (!fb)
328 return ERR_PTR(-ENOMEM);
329
330 ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
331 if (ret) {
332 kfree(fb);
333 return ERR_PTR(ret);
334 }
335 return &fb->base;
336}
337
338/**
339 * psbfb_alloc - allocate frame buffer memory
340 * @dev: the DRM device
341 * @aligned_size: space needed
342 * @force: fall back to GEM buffers if need be
343 *
344 * Allocate the frame buffer. In the usual case we get a GTT range that
345 * is stolen memory backed and life is simple. If there isn't sufficient
346 * stolen memory or the system has no stolen memory we allocate a range
347 * and back it with a GEM object.
348 *
349 * In this case the GEM object has no handle.
350 */
351static struct gtt_range *psbfb_alloc(struct drm_device *dev,
352 int aligned_size, int force)
353{
354 struct gtt_range *backing;
355 /* Begin by trying to use stolen memory backing */
356 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
357 if (backing) {
358 if (drm_gem_private_object_init(dev,
359 &backing->gem, aligned_size) == 0)
360 return backing;
361 psb_gtt_free_range(dev, backing);
362 }
363 if (!force)
364 return NULL;
365
366 /* Next try using GEM host memory */
367 backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
368 if (backing == NULL)
369 return NULL;
370
371 /* Now back it with an object */
372 if (drm_gem_object_init(dev, &backing->gem, aligned_size) != 0) {
373 psb_gtt_free_range(dev, backing);
374 return NULL;
375 }
376 return backing;
377}
378
379/**
380 * psbfb_create - create a framebuffer
381 * @fbdev: the framebuffer device
382 * @sizes: specification of the layout
383 *
384 * Create a framebuffer to the specifications provided
385 */
386static int psbfb_create(struct psb_fbdev *fbdev,
387 struct drm_fb_helper_surface_size *sizes)
388{
389 struct drm_device *dev = fbdev->psb_fb_helper.dev;
390 struct drm_psb_private *dev_priv = dev->dev_private;
391 struct fb_info *info;
392 struct drm_framebuffer *fb;
393 struct psb_framebuffer *psbfb = &fbdev->pfb;
394 struct drm_mode_fb_cmd2 mode_cmd;
395 struct device *device = &dev->pdev->dev;
396 int size;
397 int ret;
398 struct gtt_range *backing;
399 int gtt_roll = 1;
400 u32 bpp, depth;
401
402 mode_cmd.width = sizes->surface_width;
403 mode_cmd.height = sizes->surface_height;
404 bpp = sizes->surface_bpp;
405
406 /* No 24bit packed */
407 if (bpp == 24)
408 bpp = 32;
409
410 /* Acceleration via the GTT requires pitch to be 4096 byte aligned
411 (ie 1024 or 2048 pixels in normal use) */
412 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
413 depth = sizes->surface_depth;
414
415 size = mode_cmd.pitches[0] * mode_cmd.height;
416 size = ALIGN(size, PAGE_SIZE);
417
418 /* Allocate the framebuffer in the GTT with stolen page backing */
419 backing = psbfb_alloc(dev, size, 0);
420 if (backing == NULL) {
421 /*
422 * We couldn't get the space we wanted, fall back to the
423 * display engine requirement instead. The HW requires
424 * the pitch to be 64 byte aligned
425 */
426
427 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
428
429 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
430 depth = sizes->surface_depth;
431
432 size = mode_cmd.pitches[0] * mode_cmd.height;
433 size = ALIGN(size, PAGE_SIZE);
434
435 /* Allocate the framebuffer in the GTT with stolen page
436 backing when there is room */
437 backing = psbfb_alloc(dev, size, 1);
438 if (backing == NULL)
439 return -ENOMEM;
440 }
441
442 mutex_lock(&dev->struct_mutex);
443
444 info = framebuffer_alloc(0, device);
445 if (!info) {
446 ret = -ENOMEM;
447 goto out_err1;
448 }
449 info->par = fbdev;
450
451 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
452
453 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
454 if (ret)
455 goto out_unref;
456
457 fb = &psbfb->base;
458 psbfb->fbdev = info;
459
460 fbdev->psb_fb_helper.fb = fb;
461 fbdev->psb_fb_helper.fbdev = info;
462
463 strcpy(info->fix.id, "psbfb");
464
465 info->flags = FBINFO_DEFAULT;
466 if (gtt_roll) { /* GTT rolling seems best */
467 info->fbops = &psbfb_roll_ops;
468 info->flags |= FBINFO_HWACCEL_YPAN;
469 }
470 else if (dev_priv->ops->accel_2d) /* 2D engine */
471 info->fbops = &psbfb_ops;
472 else /* Software */
473 info->fbops = &psbfb_unaccel_ops;
474
475 ret = fb_alloc_cmap(&info->cmap, 256, 0);
476 if (ret) {
477 ret = -ENOMEM;
478 goto out_unref;
479 }
480
481 info->fix.smem_start = dev->mode_config.fb_base;
482 info->fix.smem_len = size;
483 info->fix.ywrapstep = gtt_roll;
484 info->fix.ypanstep = gtt_roll;
485
486 if (backing->stolen) {
487 /* Accessed stolen memory directly */
488 info->screen_base = (char *)dev_priv->vram_addr +
489 backing->offset;
490 } else {
491 /* Pin the pages into the GTT and create a mapping to them */
492 psb_gtt_pin(backing);
493 info->screen_base = vm_map_ram(backing->pages, backing->npage,
494 -1, PAGE_KERNEL);
495 if (info->screen_base == NULL) {
496 psb_gtt_unpin(backing);
497 ret = -ENOMEM;
498 goto out_unref;
499 }
500 psbfb->vm_map = 1;
501 }
502 info->screen_size = size;
503
504 if (dev_priv->gtt.stolen_size) {
505 info->apertures = alloc_apertures(1);
506 if (!info->apertures) {
507 ret = -ENOMEM;
508 goto out_unref;
509 }
510 info->apertures->ranges[0].base = dev->mode_config.fb_base;
511 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
512 }
513
514 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
515 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
516 sizes->fb_width, sizes->fb_height);
517
518 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
519 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
520
521 info->pixmap.size = 64 * 1024;
522 info->pixmap.buf_align = 8;
523 info->pixmap.access_align = 32;
524 info->pixmap.flags = FB_PIXMAP_SYSTEM;
525 info->pixmap.scan_align = 1;
526
527 dev_info(dev->dev, "allocated %dx%d fb\n",
528 psbfb->base.width, psbfb->base.height);
529
530 mutex_unlock(&dev->struct_mutex);
531 return 0;
532out_unref:
533 if (backing->stolen)
534 psb_gtt_free_range(dev, backing);
535 else {
536 if (psbfb->vm_map)
537 vm_unmap_ram(info->screen_base, backing->npage);
538 drm_gem_object_unreference(&backing->gem);
539 }
540out_err1:
541 mutex_unlock(&dev->struct_mutex);
542 psb_gtt_free_range(dev, backing);
543 return ret;
544}
545
546/**
547 * psb_user_framebuffer_create - create framebuffer
548 * @dev: our DRM device
549 * @filp: client file
550 * @cmd: mode request
551 *
552 * Create a new framebuffer backed by a userspace GEM object
553 */
554static struct drm_framebuffer *psb_user_framebuffer_create
555 (struct drm_device *dev, struct drm_file *filp,
556 struct drm_mode_fb_cmd2 *cmd)
557{
558 struct gtt_range *r;
559 struct drm_gem_object *obj;
560
561 /*
562 * Find the GEM object and thus the gtt range object that is
563 * to back this space
564 */
565 obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
566 if (obj == NULL)
567 return ERR_PTR(-ENOENT);
568
569 /* Let the core code do all the work */
570 r = container_of(obj, struct gtt_range, gem);
571 return psb_framebuffer_create(dev, cmd, r);
572}
573
574static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
575 u16 blue, int regno)
576{
577}
578
579static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
580 u16 *green, u16 *blue, int regno)
581{
582}
583
584static int psbfb_probe(struct drm_fb_helper *helper,
585 struct drm_fb_helper_surface_size *sizes)
586{
587 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
588 int new_fb = 0;
589 int ret;
590
591 if (!helper->fb) {
592 ret = psbfb_create(psb_fbdev, sizes);
593 if (ret)
594 return ret;
595 new_fb = 1;
596 }
597 return new_fb;
598}
599
600struct drm_fb_helper_funcs psb_fb_helper_funcs = {
601 .gamma_set = psbfb_gamma_set,
602 .gamma_get = psbfb_gamma_get,
603 .fb_probe = psbfb_probe,
604};
605
606int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
607{
608 struct fb_info *info;
609 struct psb_framebuffer *psbfb = &fbdev->pfb;
610
611 if (fbdev->psb_fb_helper.fbdev) {
612 info = fbdev->psb_fb_helper.fbdev;
613
614 /* If this is our base framebuffer then kill any virtual map
615 for the framebuffer layer and unpin it */
616 if (psbfb->vm_map) {
617 vm_unmap_ram(info->screen_base, psbfb->gtt->npage);
618 psb_gtt_unpin(psbfb->gtt);
619 }
620 unregister_framebuffer(info);
621 if (info->cmap.len)
622 fb_dealloc_cmap(&info->cmap);
623 framebuffer_release(info);
624 }
625 drm_fb_helper_fini(&fbdev->psb_fb_helper);
626 drm_framebuffer_cleanup(&psbfb->base);
627
628 if (psbfb->gtt)
629 drm_gem_object_unreference(&psbfb->gtt->gem);
630 return 0;
631}
632
633int psb_fbdev_init(struct drm_device *dev)
634{
635 struct psb_fbdev *fbdev;
636 struct drm_psb_private *dev_priv = dev->dev_private;
637
638 fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
639 if (!fbdev) {
640 dev_err(dev->dev, "no memory\n");
641 return -ENOMEM;
642 }
643
644 dev_priv->fbdev = fbdev;
645 fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
646
647 drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
648 INTELFB_CONN_LIMIT);
649
650 drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
651 drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
652 return 0;
653}
654
655void psb_fbdev_fini(struct drm_device *dev)
656{
657 struct drm_psb_private *dev_priv = dev->dev_private;
658
659 if (!dev_priv->fbdev)
660 return;
661
662 psb_fbdev_destroy(dev, dev_priv->fbdev);
663 kfree(dev_priv->fbdev);
664 dev_priv->fbdev = NULL;
665}
666
667static void psbfb_output_poll_changed(struct drm_device *dev)
668{
669 struct drm_psb_private *dev_priv = dev->dev_private;
670 struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
671 drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
672}
673
674/**
675 * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
676 * @fb: framebuffer
677 * @file_priv: our DRM file
678 * @handle: returned handle
679 *
680 * Our framebuffer object is a GTT range which also contains a GEM
681 * object. We need to turn it into a handle for userspace. GEM will do
682 * the work for us
683 */
684static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
685 struct drm_file *file_priv,
686 unsigned int *handle)
687{
688 struct psb_framebuffer *psbfb = to_psb_fb(fb);
689 struct gtt_range *r = psbfb->gtt;
690 return drm_gem_handle_create(file_priv, &r->gem, handle);
691}
692
693/**
694 * psb_user_framebuffer_destroy - destruct user created fb
695 * @fb: framebuffer
696 *
697 * User framebuffers are backed by GEM objects so all we have to do is
698 * clean up a bit and drop the reference, GEM will handle the fallout
699 */
700static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
701{
702 struct psb_framebuffer *psbfb = to_psb_fb(fb);
703 struct gtt_range *r = psbfb->gtt;
704 struct drm_device *dev = fb->dev;
705 struct drm_psb_private *dev_priv = dev->dev_private;
706 struct psb_fbdev *fbdev = dev_priv->fbdev;
707 struct drm_crtc *crtc;
708 int reset = 0;
709
710 /* Should never get stolen memory for a user fb */
711 WARN_ON(r->stolen);
712
713 /* Check if we are erroneously live */
714 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
715 if (crtc->fb == fb)
716 reset = 1;
717
718 if (reset)
719 /*
720 * Now force a sane response before we permit the DRM CRTC
721 * layer to do stupid things like blank the display. Instead
722 * we reset this framebuffer as if the user had forced a reset.
723 * We must do this before the cleanup so that the DRM layer
724 * doesn't get a chance to stick its oar in where it isn't
725 * wanted.
726 */
727 drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
728
729 /* Let DRM do its clean up */
730 drm_framebuffer_cleanup(fb);
731 /* We are no longer using the resource in GEM */
732 drm_gem_object_unreference_unlocked(&r->gem);
733 kfree(fb);
734}
735
736static const struct drm_mode_config_funcs psb_mode_funcs = {
737 .fb_create = psb_user_framebuffer_create,
738 .output_poll_changed = psbfb_output_poll_changed,
739};
740
741static int psb_create_backlight_property(struct drm_device *dev)
742{
743 struct drm_psb_private *dev_priv = dev->dev_private;
744 struct drm_property *backlight;
745
746 if (dev_priv->backlight_property)
747 return 0;
748
749 backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
750 "backlight", 2);
751 backlight->values[0] = 0;
752 backlight->values[1] = 100;
753
754 dev_priv->backlight_property = backlight;
755
756 return 0;
757}
758
759static void psb_setup_outputs(struct drm_device *dev)
760{
761 struct drm_psb_private *dev_priv = dev->dev_private;
762 struct drm_connector *connector;
763
764 drm_mode_create_scaling_mode_property(dev);
765 psb_create_backlight_property(dev);
766
767 dev_priv->ops->output_init(dev);
768
769 list_for_each_entry(connector, &dev->mode_config.connector_list,
770 head) {
771 struct psb_intel_output *psb_intel_output =
772 to_psb_intel_output(connector);
773 struct drm_encoder *encoder = &psb_intel_output->enc;
774 int crtc_mask = 0, clone_mask = 0;
775
776 /* valid crtcs */
777 switch (psb_intel_output->type) {
778 case INTEL_OUTPUT_ANALOG:
779 crtc_mask = (1 << 0);
780 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
781 break;
782 case INTEL_OUTPUT_SDVO:
783 crtc_mask = ((1 << 0) | (1 << 1));
784 clone_mask = (1 << INTEL_OUTPUT_SDVO);
785 break;
786 case INTEL_OUTPUT_LVDS:
787 if (IS_MRST(dev))
788 crtc_mask = (1 << 0);
789 else
790 crtc_mask = (1 << 1);
791 clone_mask = (1 << INTEL_OUTPUT_LVDS);
792 break;
793 case INTEL_OUTPUT_MIPI:
794 crtc_mask = (1 << 0);
795 clone_mask = (1 << INTEL_OUTPUT_MIPI);
796 break;
797 case INTEL_OUTPUT_MIPI2:
798 crtc_mask = (1 << 2);
799 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
800 break;
801 case INTEL_OUTPUT_HDMI:
802 /* HDMI on crtc 1 for SoC devices and crtc 0 for
803 Cedarview. HDMI on Poulsbo is only via external
804 logic */
805 if (IS_MFLD(dev) || IS_MRST(dev))
806 crtc_mask = (1 << 1);
807 else
808 crtc_mask = (1 << 0); /* Cedarview */
809 clone_mask = (1 << INTEL_OUTPUT_HDMI);
810 break;
811 }
812 encoder->possible_crtcs = crtc_mask;
813 encoder->possible_clones =
814 psb_intel_connector_clones(dev, clone_mask);
815 }
816}
817
818void psb_modeset_init(struct drm_device *dev)
819{
820 struct drm_psb_private *dev_priv =
821 (struct drm_psb_private *) dev->dev_private;
822 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
823 int i;
824
825 drm_mode_config_init(dev);
826
827 dev->mode_config.min_width = 0;
828 dev->mode_config.min_height = 0;
829
830 dev->mode_config.funcs = (void *) &psb_mode_funcs;
831
832 /* set memory base */
833 /* MRST and PSB should use BAR 2*/
834 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
835 &(dev->mode_config.fb_base));
836
837 /* num pipes is 2 for PSB but 1 for Mrst */
838 for (i = 0; i < dev_priv->num_pipe; i++)
839 psb_intel_crtc_init(dev, i, mode_dev);
840
841 dev->mode_config.max_width = 2048;
842 dev->mode_config.max_height = 2048;
843
844 psb_setup_outputs(dev);
845}
846
847void psb_modeset_cleanup(struct drm_device *dev)
848{
849 mutex_lock(&dev->struct_mutex);
850
851 drm_kms_helper_poll_fini(dev);
852 psb_fbdev_fini(dev);
853 drm_mode_config_cleanup(dev);
854
855 mutex_unlock(&dev->struct_mutex);
856}
diff --git a/drivers/staging/gma500/framebuffer.h b/drivers/staging/gma500/framebuffer.h
deleted file mode 100644
index d1b2289447f0..000000000000
--- a/drivers/staging/gma500/framebuffer.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2008-2011, Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21
22#ifndef _FRAMEBUFFER_H_
23#define _FRAMEBUFFER_H_
24
25#include <drm/drmP.h>
26#include <drm/drm_fb_helper.h>
27
28#include "psb_drv.h"
29
30struct psb_framebuffer {
31 struct drm_framebuffer base;
32 struct address_space *addr_space;
33 struct fb_info *fbdev;
34 struct gtt_range *gtt;
35 bool vm_map; /* True if we must undo a vm_map_ram */
36};
37
38struct psb_fbdev {
39 struct drm_fb_helper psb_fb_helper;
40 struct psb_framebuffer pfb;
41};
42
43#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
44
45extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
46
47#endif
48
diff --git a/drivers/staging/gma500/gem.c b/drivers/staging/gma500/gem.c
deleted file mode 100644
index f6433c037d24..000000000000
--- a/drivers/staging/gma500/gem.c
+++ /dev/null
@@ -1,292 +0,0 @@
1/*
2 * psb GEM interface
3 *
4 * Copyright (c) 2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Alan Cox
20 *
21 * TODO:
22 * - we need to work out if the MMU is relevant (eg for
23 * accelerated operations on a GEM object)
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm.h>
28#include "psb_drm.h"
29#include "psb_drv.h"
30
31int psb_gem_init_object(struct drm_gem_object *obj)
32{
33 return -EINVAL;
34}
35
36void psb_gem_free_object(struct drm_gem_object *obj)
37{
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 drm_gem_object_release_wrap(obj);
40 /* This must occur last as it frees up the memory of the GEM object */
41 psb_gtt_free_range(obj->dev, gtt);
42}
43
44int psb_gem_get_aperture(struct drm_device *dev, void *data,
45 struct drm_file *file)
46{
47 return -EINVAL;
48}
49
50/**
51 * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
52 * @file: our drm client file
53 * @dev: drm device
54 * @handle: GEM handle to the object (from dumb_create)
55 *
56 * Do the necessary setup to allow the mapping of the frame buffer
57 * into user memory. We don't have to do much here at the moment.
58 */
59int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
60 uint32_t handle, uint64_t *offset)
61{
62 int ret = 0;
63 struct drm_gem_object *obj;
64
65 if (!(dev->driver->driver_features & DRIVER_GEM))
66 return -ENODEV;
67
68 mutex_lock(&dev->struct_mutex);
69
70 /* GEM does all our handle to object mapping */
71 obj = drm_gem_object_lookup(dev, file, handle);
72 if (obj == NULL) {
73 ret = -ENOENT;
74 goto unlock;
75 }
76 /* What validation is needed here ? */
77
78 /* Make it mmapable */
79 if (!obj->map_list.map) {
80 ret = gem_create_mmap_offset(obj);
81 if (ret)
82 goto out;
83 }
84 /* GEM should really work out the hash offsets for us */
85 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
86out:
87 drm_gem_object_unreference(obj);
88unlock:
89 mutex_unlock(&dev->struct_mutex);
90 return ret;
91}
92
93/**
94 * psb_gem_create - create a mappable object
95 * @file: the DRM file of the client
96 * @dev: our device
97 * @size: the size requested
98 * @handlep: returned handle (opaque number)
99 *
100 * Create a GEM object, fill in the boilerplate and attach a handle to
101 * it so that userspace can speak about it. This does the core work
102 * for the various methods that do/will create GEM objects for things
103 */
104static int psb_gem_create(struct drm_file *file,
105 struct drm_device *dev, uint64_t size, uint32_t *handlep)
106{
107 struct gtt_range *r;
108 int ret;
109 u32 handle;
110
111 size = roundup(size, PAGE_SIZE);
112
113 /* Allocate our object - for now a direct gtt range which is not
114 stolen memory backed */
115 r = psb_gtt_alloc_range(dev, size, "gem", 0);
116 if (r == NULL) {
117 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
118 return -ENOSPC;
119 }
120 /* Initialize the extra goodies GEM needs to do all the hard work */
121 if (drm_gem_object_init(dev, &r->gem, size) != 0) {
122 psb_gtt_free_range(dev, r);
123 /* GEM doesn't give an error code so use -ENOMEM */
124 dev_err(dev->dev, "GEM init failed for %lld\n", size);
125 return -ENOMEM;
126 }
127 /* Give the object a handle so we can carry it more easily */
128 ret = drm_gem_handle_create(file, &r->gem, &handle);
129 if (ret) {
130 dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
131 &r->gem, size);
132 drm_gem_object_release(&r->gem);
133 psb_gtt_free_range(dev, r);
134 return ret;
135 }
136 /* We have the initial and handle reference but need only one now */
137 drm_gem_object_unreference(&r->gem);
138 *handlep = handle;
139 return 0;
140}
141
142/**
143 * psb_gem_dumb_create - create a dumb buffer
144 * @drm_file: our client file
145 * @dev: our device
146 * @args: the requested arguments copied from userspace
147 *
148 * Allocate a buffer suitable for use for a frame buffer of the
149 * form described by user space. Give userspace a handle by which
150 * to reference it.
151 */
152int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
153 struct drm_mode_create_dumb *args)
154{
155 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
156 args->size = args->pitch * args->height;
157 return psb_gem_create(file, dev, args->size, &args->handle);
158}
159
160/**
161 * psb_gem_dumb_destroy - destroy a dumb buffer
162 * @file: client file
163 * @dev: our DRM device
164 * @handle: the object handle
165 *
166 * Destroy a handle that was created via psb_gem_dumb_create, at least
167 * we hope it was created that way. i915 seems to assume the caller
168 * does the checking but that might be worth review ! FIXME
169 */
170int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
171 uint32_t handle)
172{
173 /* No special work needed, drop the reference and see what falls out */
174 return drm_gem_handle_delete(file, handle);
175}
176
177/**
178 * psb_gem_fault - pagefault handler for GEM objects
179 * @vma: the VMA of the GEM object
180 * @vmf: fault detail
181 *
182 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
183 * does most of the work for us including the actual map/unmap calls
184 * but we need to do the actual page work.
185 *
186 * This code eventually needs to handle faulting objects in and out
187 * of the GTT and repacking it when we run out of space. We can put
188 * that off for now and for our simple uses
189 *
190 * The VMA was set up by GEM. In doing so it also ensured that the
191 * vma->vm_private_data points to the GEM object that is backing this
192 * mapping.
193 */
194int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
195{
196 struct drm_gem_object *obj;
197 struct gtt_range *r;
198 int ret;
199 unsigned long pfn;
200 pgoff_t page_offset;
201 struct drm_device *dev;
202 struct drm_psb_private *dev_priv;
203
204 obj = vma->vm_private_data; /* GEM object */
205 dev = obj->dev;
206 dev_priv = dev->dev_private;
207
208 r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
209
210 /* Make sure we don't parallel update on a fault, nor move or remove
211 something from beneath our feet */
212 mutex_lock(&dev->struct_mutex);
213
214 /* For now the mmap pins the object and it stays pinned. As things
215 stand that will do us no harm */
216 if (r->mmapping == 0) {
217 ret = psb_gtt_pin(r);
218 if (ret < 0) {
219 dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
220 goto fail;
221 }
222 r->mmapping = 1;
223 }
224
225 /* Page relative to the VMA start - we must calculate this ourselves
226 because vmf->pgoff is the fake GEM offset */
227 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
228 >> PAGE_SHIFT;
229
230 /* CPU view of the page, don't go via the GART for CPU writes */
231 if (r->stolen)
232 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
233 else
234 pfn = page_to_pfn(r->pages[page_offset]);
235 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
236
237fail:
238 mutex_unlock(&dev->struct_mutex);
239 switch (ret) {
240 case 0:
241 case -ERESTARTSYS:
242 case -EINTR:
243 return VM_FAULT_NOPAGE;
244 case -ENOMEM:
245 return VM_FAULT_OOM;
246 default:
247 return VM_FAULT_SIGBUS;
248 }
249}
250
251static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
252 int size, u32 *handle)
253{
254 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
255 if (gtt == NULL)
256 return -ENOMEM;
257 if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
258 goto free_gtt;
259 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
260 return 0;
261free_gtt:
262 psb_gtt_free_range(dev, gtt);
263 return -ENOMEM;
264}
265
266/*
267 * GEM interfaces for our specific client
268 */
269int psb_gem_create_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct drm_psb_gem_create *args = data;
273 int ret;
274 if (args->flags & PSB_GEM_CREATE_STOLEN) {
275 ret = psb_gem_create_stolen(file, dev, args->size,
276 &args->handle);
277 if (ret == 0)
278 return 0;
279 /* Fall throguh */
280 args->flags &= ~PSB_GEM_CREATE_STOLEN;
281 }
282 return psb_gem_create(file, dev, args->size, &args->handle);
283}
284
285int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
286 struct drm_file *file)
287{
288 struct drm_psb_gem_mmap *args = data;
289 return dev->driver->dumb_map_offset(file, dev,
290 args->handle, &args->offset);
291}
292
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
deleted file mode 100644
index daac12120653..000000000000
--- a/drivers/staging/gma500/gem_glue.c
+++ /dev/null
@@ -1,89 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <drm/drmP.h>
21#include <drm/drm.h>
22
23void drm_gem_object_release_wrap(struct drm_gem_object *obj)
24{
25 /* Remove the list map if one is present */
26 if (obj->map_list.map) {
27 struct drm_gem_mm *mm = obj->dev->mm_private;
28 struct drm_map_list *list = &obj->map_list;
29 drm_ht_remove_item(&mm->offset_hash, &list->hash);
30 drm_mm_put_block(list->file_offset_node);
31 kfree(list->map);
32 list->map = NULL;
33 }
34 drm_gem_object_release(obj);
35}
36
37/**
38 * gem_create_mmap_offset - invent an mmap offset
39 * @obj: our object
40 *
41 * Standard implementation of offset generation for mmap as is
42 * duplicated in several drivers. This belongs in GEM.
43 */
44int gem_create_mmap_offset(struct drm_gem_object *obj)
45{
46 struct drm_device *dev = obj->dev;
47 struct drm_gem_mm *mm = dev->mm_private;
48 struct drm_map_list *list;
49 struct drm_local_map *map;
50 int ret;
51
52 list = &obj->map_list;
53 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
54 if (list->map == NULL)
55 return -ENOMEM;
56 map = list->map;
57 map->type = _DRM_GEM;
58 map->size = obj->size;
59 map->handle = obj;
60
61 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
62 obj->size / PAGE_SIZE, 0, 0);
63 if (!list->file_offset_node) {
64 dev_err(dev->dev, "failed to allocate offset for bo %d\n",
65 obj->name);
66 ret = -ENOSPC;
67 goto free_it;
68 }
69 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
70 obj->size / PAGE_SIZE, 0);
71 if (!list->file_offset_node) {
72 ret = -ENOMEM;
73 goto free_it;
74 }
75 list->hash.key = list->file_offset_node->start;
76 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
77 if (ret) {
78 dev_err(dev->dev, "failed to add to map hash\n");
79 goto free_mm;
80 }
81 return 0;
82
83free_mm:
84 drm_mm_put_block(list->file_offset_node);
85free_it:
86 kfree(list->map);
87 list->map = NULL;
88 return ret;
89}
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30f74db..000000000000
--- a/drivers/staging/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
2extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/staging/gma500/gtt.c b/drivers/staging/gma500/gtt.c
deleted file mode 100644
index e770bd190a5c..000000000000
--- a/drivers/staging/gma500/gtt.c
+++ /dev/null
@@ -1,553 +0,0 @@
1/*
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
20 */
21
22#include <drm/drmP.h>
23#include "psb_drv.h"
24
25
26/*
27 * GTT resource allocator - manage page mappings in GTT space
28 */
29
30/**
31 * psb_gtt_mask_pte - generate GTT pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GTT
34 *
35 * Set the GTT entry for the appropriate memory type.
36 */
37static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
38{
39 uint32_t mask = PSB_PTE_VALID;
40
41 if (type & PSB_MMU_CACHED_MEMORY)
42 mask |= PSB_PTE_CACHED;
43 if (type & PSB_MMU_RO_MEMORY)
44 mask |= PSB_PTE_RO;
45 if (type & PSB_MMU_WO_MEMORY)
46 mask |= PSB_PTE_WO;
47
48 return (pfn << PAGE_SHIFT) | mask;
49}
50
51/**
52 * psb_gtt_entry - find the GTT entries for a gtt_range
53 * @dev: our DRM device
54 * @r: our GTT range
55 *
56 * Given a gtt_range object return the GTT offset of the page table
57 * entries for this gtt_range
58 */
59u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
60{
61 struct drm_psb_private *dev_priv = dev->dev_private;
62 unsigned long offset;
63
64 offset = r->resource.start - dev_priv->gtt_mem->start;
65
66 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
67}
68
69/**
70 * psb_gtt_insert - put an object into the GTT
71 * @dev: our DRM device
72 * @r: our GTT range
73 *
74 * Take our preallocated GTT range and insert the GEM object into
75 * the GTT. This is protected via the gtt mutex which the caller
76 * must hold.
77 */
78static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
79{
80 u32 *gtt_slot, pte;
81 struct page **pages;
82 int i;
83
84 if (r->pages == NULL) {
85 WARN_ON(1);
86 return -EINVAL;
87 }
88
89 WARN_ON(r->stolen); /* refcount these maybe ? */
90
91 gtt_slot = psb_gtt_entry(dev, r);
92 pages = r->pages;
93
94 /* Make sure changes are visible to the GPU */
95 set_pages_array_uc(pages, r->npage);
96
97 /* Write our page entries into the GTT itself */
98 for (i = r->roll; i < r->npage; i++) {
99 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
100 iowrite32(pte, gtt_slot++);
101 }
102 for (i = 0; i < r->roll; i++) {
103 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
104 iowrite32(pte, gtt_slot++);
105 }
106 /* Make sure all the entries are set before we return */
107 ioread32(gtt_slot - 1);
108
109 return 0;
110}
111
112/**
113 * psb_gtt_remove - remove an object from the GTT
114 * @dev: our DRM device
115 * @r: our GTT range
116 *
117 * Remove a preallocated GTT range from the GTT. Overwrite all the
118 * page table entries with the dummy page. This is protected via the gtt
119 * mutex which the caller must hold.
120 */
121static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
122{
123 struct drm_psb_private *dev_priv = dev->dev_private;
124 u32 *gtt_slot, pte;
125 int i;
126
127 WARN_ON(r->stolen);
128
129 gtt_slot = psb_gtt_entry(dev, r);
130 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
131
132 for (i = 0; i < r->npage; i++)
133 iowrite32(pte, gtt_slot++);
134 ioread32(gtt_slot - 1);
135 set_pages_array_wb(r->pages, r->npage);
136}
137
138/**
139 * psb_gtt_roll - set scrolling position
140 * @dev: our DRM device
141 * @r: the gtt mapping we are using
142 * @roll: roll offset
143 *
144 * Roll an existing pinned mapping by moving the pages through the GTT.
145 * This allows us to implement hardware scrolling on the consoles without
146 * a 2D engine
147 */
148void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
149{
150 u32 *gtt_slot, pte;
151 int i;
152
153 if (roll >= r->npage) {
154 WARN_ON(1);
155 return;
156 }
157
158 r->roll = roll;
159
160 /* Not currently in the GTT - no worry we will write the mapping at
161 the right position when it gets pinned */
162 if (!r->stolen && !r->in_gart)
163 return;
164
165 gtt_slot = psb_gtt_entry(dev, r);
166
167 for (i = r->roll; i < r->npage; i++) {
168 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
169 iowrite32(pte, gtt_slot++);
170 }
171 for (i = 0; i < r->roll; i++) {
172 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
173 iowrite32(pte, gtt_slot++);
174 }
175 ioread32(gtt_slot - 1);
176}
177
178/**
179 * psb_gtt_attach_pages - attach and pin GEM pages
180 * @gt: the gtt range
181 *
182 * Pin and build an in kernel list of the pages that back our GEM object.
183 * While we hold this the pages cannot be swapped out. This is protected
184 * via the gtt mutex which the caller must hold.
185 */
186static int psb_gtt_attach_pages(struct gtt_range *gt)
187{
188 struct inode *inode;
189 struct address_space *mapping;
190 int i;
191 struct page *p;
192 int pages = gt->gem.size / PAGE_SIZE;
193
194 WARN_ON(gt->pages);
195
196 /* This is the shared memory object that backs the GEM resource */
197 inode = gt->gem.filp->f_path.dentry->d_inode;
198 mapping = inode->i_mapping;
199
200 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
201 if (gt->pages == NULL)
202 return -ENOMEM;
203 gt->npage = pages;
204
205 for (i = 0; i < pages; i++) {
206 /* FIXME: needs updating as per mail from Hugh Dickins */
207 p = read_cache_page_gfp(mapping, i,
208 __GFP_COLD | GFP_KERNEL);
209 if (IS_ERR(p))
210 goto err;
211 gt->pages[i] = p;
212 }
213 return 0;
214
215err:
216 while (i--)
217 page_cache_release(gt->pages[i]);
218 kfree(gt->pages);
219 gt->pages = NULL;
220 return PTR_ERR(p);
221}
222
223/**
224 * psb_gtt_detach_pages - attach and pin GEM pages
225 * @gt: the gtt range
226 *
227 * Undo the effect of psb_gtt_attach_pages. At this point the pages
228 * must have been removed from the GTT as they could now be paged out
229 * and move bus address. This is protected via the gtt mutex which the
230 * caller must hold.
231 */
232static void psb_gtt_detach_pages(struct gtt_range *gt)
233{
234 int i;
235 for (i = 0; i < gt->npage; i++) {
236 /* FIXME: do we need to force dirty */
237 set_page_dirty(gt->pages[i]);
238 page_cache_release(gt->pages[i]);
239 }
240 kfree(gt->pages);
241 gt->pages = NULL;
242}
243
244/**
245 * psb_gtt_pin - pin pages into the GTT
246 * @gt: range to pin
247 *
248 * Pin a set of pages into the GTT. The pins are refcounted so that
249 * multiple pins need multiple unpins to undo.
250 *
251 * Non GEM backed objects treat this as a no-op as they are always GTT
252 * backed objects.
253 */
254int psb_gtt_pin(struct gtt_range *gt)
255{
256 int ret = 0;
257 struct drm_device *dev = gt->gem.dev;
258 struct drm_psb_private *dev_priv = dev->dev_private;
259
260 mutex_lock(&dev_priv->gtt_mutex);
261
262 if (gt->in_gart == 0 && gt->stolen == 0) {
263 ret = psb_gtt_attach_pages(gt);
264 if (ret < 0)
265 goto out;
266 ret = psb_gtt_insert(dev, gt);
267 if (ret < 0) {
268 psb_gtt_detach_pages(gt);
269 goto out;
270 }
271 }
272 gt->in_gart++;
273out:
274 mutex_unlock(&dev_priv->gtt_mutex);
275 return ret;
276}
277
278/**
279 * psb_gtt_unpin - Drop a GTT pin requirement
280 * @gt: range to pin
281 *
282 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
283 * will be removed from the GTT which will also drop the page references
284 * and allow the VM to clean up or page stuff.
285 *
286 * Non GEM backed objects treat this as a no-op as they are always GTT
287 * backed objects.
288 */
289void psb_gtt_unpin(struct gtt_range *gt)
290{
291 struct drm_device *dev = gt->gem.dev;
292 struct drm_psb_private *dev_priv = dev->dev_private;
293
294 mutex_lock(&dev_priv->gtt_mutex);
295
296 WARN_ON(!gt->in_gart);
297
298 gt->in_gart--;
299 if (gt->in_gart == 0 && gt->stolen == 0) {
300 psb_gtt_remove(dev, gt);
301 psb_gtt_detach_pages(gt);
302 }
303 mutex_unlock(&dev_priv->gtt_mutex);
304}
305
306/*
307 * GTT resource allocator - allocate and manage GTT address space
308 */
309
310/**
311 * psb_gtt_alloc_range - allocate GTT address space
312 * @dev: Our DRM device
313 * @len: length (bytes) of address space required
314 * @name: resource name
315 * @backed: resource should be backed by stolen pages
316 *
317 * Ask the kernel core to find us a suitable range of addresses
318 * to use for a GTT mapping.
319 *
320 * Returns a gtt_range structure describing the object, or NULL on
321 * error. On successful return the resource is both allocated and marked
322 * as in use.
323 */
324struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
325 const char *name, int backed)
326{
327 struct drm_psb_private *dev_priv = dev->dev_private;
328 struct gtt_range *gt;
329 struct resource *r = dev_priv->gtt_mem;
330 int ret;
331 unsigned long start, end;
332
333 if (backed) {
334 /* The start of the GTT is the stolen pages */
335 start = r->start;
336 end = r->start + dev_priv->gtt.stolen_size - 1;
337 } else {
338 /* The rest we will use for GEM backed objects */
339 start = r->start + dev_priv->gtt.stolen_size;
340 end = r->end;
341 }
342
343 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
344 if (gt == NULL)
345 return NULL;
346 gt->resource.name = name;
347 gt->stolen = backed;
348 gt->in_gart = backed;
349 gt->roll = 0;
350 /* Ensure this is set for non GEM objects */
351 gt->gem.dev = dev;
352 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
353 len, start, end, PAGE_SIZE, NULL, NULL);
354 if (ret == 0) {
355 gt->offset = gt->resource.start - r->start;
356 return gt;
357 }
358 kfree(gt);
359 return NULL;
360}
361
362/**
363 * psb_gtt_free_range - release GTT address space
364 * @dev: our DRM device
365 * @gt: a mapping created with psb_gtt_alloc_range
366 *
367 * Release a resource that was allocated with psb_gtt_alloc_range. If the
368 * object has been pinned by mmap users we clean this up here currently.
369 */
370void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
371{
372 /* Undo the mmap pin if we are destroying the object */
373 if (gt->mmapping) {
374 psb_gtt_unpin(gt);
375 gt->mmapping = 0;
376 }
377 WARN_ON(gt->in_gart && !gt->stolen);
378 release_resource(&gt->resource);
379 kfree(gt);
380}
381
382void psb_gtt_alloc(struct drm_device *dev)
383{
384 struct drm_psb_private *dev_priv = dev->dev_private;
385 init_rwsem(&dev_priv->gtt.sem);
386}
387
388void psb_gtt_takedown(struct drm_device *dev)
389{
390 struct drm_psb_private *dev_priv = dev->dev_private;
391
392 if (dev_priv->gtt_map) {
393 iounmap(dev_priv->gtt_map);
394 dev_priv->gtt_map = NULL;
395 }
396 if (dev_priv->gtt_initialized) {
397 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
398 dev_priv->gmch_ctrl);
399 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
400 (void) PSB_RVDC32(PSB_PGETBL_CTL);
401 }
402 if (dev_priv->vram_addr)
403 iounmap(dev_priv->gtt_map);
404}
405
406int psb_gtt_init(struct drm_device *dev, int resume)
407{
408 struct drm_psb_private *dev_priv = dev->dev_private;
409 unsigned gtt_pages;
410 unsigned long stolen_size, vram_stolen_size;
411 unsigned i, num_pages;
412 unsigned pfn_base;
413 uint32_t vram_pages;
414 uint32_t dvmt_mode = 0;
415 struct psb_gtt *pg;
416
417 int ret = 0;
418 uint32_t pte;
419
420 mutex_init(&dev_priv->gtt_mutex);
421
422 psb_gtt_alloc(dev);
423 pg = &dev_priv->gtt;
424
425 /* Enable the GTT */
426 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
429
430 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432 (void) PSB_RVDC32(PSB_PGETBL_CTL);
433
434 /* The root resource we allocate address space from */
435 dev_priv->gtt_initialized = 1;
436
437 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
438
439 /*
440 * The video mmu has a hw bug when accessing 0x0D0000000.
441 * Make gatt start at 0x0e000,0000. This doesn't actually
442 * matter for us but may do if the video acceleration ever
443 * gets opened up.
444 */
445 pg->mmu_gatt_start = 0xE0000000;
446
447 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
448 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
449 >> PAGE_SHIFT;
450 /* Some CDV firmware doesn't report this currently. In which case the
451 system has 64 gtt pages */
452 if (pg->gtt_start == 0 || gtt_pages == 0) {
453 dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
454 gtt_pages = 64;
455 pg->gtt_start = dev_priv->pge_ctl;
456 }
457
458 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
459 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
460 >> PAGE_SHIFT;
461 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
462
463 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
464 static struct resource fudge; /* Preferably peppermint */
465 /* This can occur on CDV SDV systems. Fudge it in this case.
466 We really don't care what imaginary space is being allocated
467 at this point */
468 dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
469 pg->gatt_start = 0x40000000;
470 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
471 /* This is a little confusing but in fact the GTT is providing
472 a view from the GPU into memory and not vice versa. As such
473 this is really allocating space that is not the same as the
474 CPU address space on CDV */
475 fudge.start = 0x40000000;
476 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
477 fudge.name = "fudge";
478 fudge.flags = IORESOURCE_MEM;
479 dev_priv->gtt_mem = &fudge;
480 }
481
482 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
483 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
484 - PAGE_SIZE;
485
486 stolen_size = vram_stolen_size;
487
488 printk(KERN_INFO "Stolen memory information\n");
489 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
490 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
491 vram_stolen_size/1024);
492 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
493 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
494 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
495
496 if (resume && (gtt_pages != pg->gtt_pages) &&
497 (stolen_size != pg->stolen_size)) {
498 dev_err(dev->dev, "GTT resume error.\n");
499 ret = -EINVAL;
500 goto out_err;
501 }
502
503 pg->gtt_pages = gtt_pages;
504 pg->stolen_size = stolen_size;
505 dev_priv->vram_stolen_size = vram_stolen_size;
506
507 /*
508 * Map the GTT and the stolen memory area
509 */
510 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
511 gtt_pages << PAGE_SHIFT);
512 if (!dev_priv->gtt_map) {
513 dev_err(dev->dev, "Failure to map gtt.\n");
514 ret = -ENOMEM;
515 goto out_err;
516 }
517
518 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
519 if (!dev_priv->vram_addr) {
520 dev_err(dev->dev, "Failure to map stolen base.\n");
521 ret = -ENOMEM;
522 goto out_err;
523 }
524
525 /*
526 * Insert vram stolen pages into the GTT
527 */
528
529 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
530 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
531 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
532 num_pages, pfn_base << PAGE_SHIFT, 0);
533 for (i = 0; i < num_pages; ++i) {
534 pte = psb_gtt_mask_pte(pfn_base + i, 0);
535 iowrite32(pte, dev_priv->gtt_map + i);
536 }
537
538 /*
539 * Init rest of GTT to the scratch page to avoid accidents or scribbles
540 */
541
542 pfn_base = page_to_pfn(dev_priv->scratch_page);
543 pte = psb_gtt_mask_pte(pfn_base, 0);
544 for (; i < gtt_pages; ++i)
545 iowrite32(pte, dev_priv->gtt_map + i);
546
547 (void) ioread32(dev_priv->gtt_map + i - 1);
548 return 0;
549
550out_err:
551 psb_gtt_takedown(dev);
552 return ret;
553}
diff --git a/drivers/staging/gma500/gtt.h b/drivers/staging/gma500/gtt.h
deleted file mode 100644
index aa1742387f5a..000000000000
--- a/drivers/staging/gma500/gtt.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2008, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#ifndef _PSB_GTT_H_
21#define _PSB_GTT_H_
22
23#include <drm/drmP.h>
24
25/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
26struct psb_gtt {
27 uint32_t gatt_start;
28 uint32_t mmu_gatt_start;
29 uint32_t gtt_start;
30 uint32_t gtt_phys_start;
31 unsigned gtt_pages;
32 unsigned gatt_pages;
33 unsigned long stolen_size;
34 unsigned long vram_stolen_size;
35 struct rw_semaphore sem;
36};
37
38/* Exported functions */
39extern int psb_gtt_init(struct drm_device *dev, int resume);
40extern void psb_gtt_takedown(struct drm_device *dev);
41
42/* Each gtt_range describes an allocation in the GTT area */
43struct gtt_range {
44 struct resource resource; /* Resource for our allocation */
45 u32 offset; /* GTT offset of our object */
46 struct drm_gem_object gem; /* GEM high level stuff */
47 int in_gart; /* Currently in the GART (ref ct) */
48 bool stolen; /* Backed from stolen RAM */
49 bool mmapping; /* Is mmappable */
50 struct page **pages; /* Backing pages if present */
51 int npage; /* Number of backing pages */
52 int roll; /* Roll applied to the GTT entries */
53};
54
55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
56 const char *name, int backed);
57extern void psb_gtt_kref_put(struct gtt_range *gt);
58extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
59extern int psb_gtt_pin(struct gtt_range *gt);
60extern void psb_gtt_unpin(struct gtt_range *gt);
61extern void psb_gtt_roll(struct drm_device *dev,
62 struct gtt_range *gt, int roll);
63
64#endif
diff --git a/drivers/staging/gma500/intel_bios.c b/drivers/staging/gma500/intel_bios.c
deleted file mode 100644
index 096757f9bc89..000000000000
--- a/drivers/staging/gma500/intel_bios.c
+++ /dev/null
@@ -1,303 +0,0 @@
1/*
2 * Copyright (c) 2006 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "psb_drm.h"
24#include "psb_drv.h"
25#include "psb_intel_drv.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28
29
30static void *find_section(struct bdb_header *bdb, int section_id)
31{
32 u8 *base = (u8 *)bdb;
33 int index = 0;
34 u16 total, current_size;
35 u8 current_id;
36
37 /* skip to first section */
38 index += bdb->header_size;
39 total = bdb->bdb_size;
40
41 /* walk the sections looking for section_id */
42 while (index < total) {
43 current_id = *(base + index);
44 index++;
45 current_size = *((u16 *)(base + index));
46 index += 2;
47 if (current_id == section_id)
48 return base + index;
49 index += current_size;
50 }
51
52 return NULL;
53}
54
55static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
56 struct lvds_dvo_timing *dvo_timing)
57{
58 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
59 dvo_timing->hactive_lo;
60 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
61 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
62 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
63 dvo_timing->hsync_pulse_width;
64 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
65 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
66
67 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
68 dvo_timing->vactive_lo;
69 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
70 dvo_timing->vsync_off;
71 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
72 dvo_timing->vsync_pulse_width;
73 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
74 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
75 panel_fixed_mode->clock = dvo_timing->clock * 10;
76 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
77
78 /* Some VBTs have bogus h/vtotal values */
79 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
80 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
81 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
82 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
83
84 drm_mode_set_name(panel_fixed_mode);
85}
86
87static void parse_backlight_data(struct drm_psb_private *dev_priv,
88 struct bdb_header *bdb)
89{
90 struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
91 struct bdb_lvds_backlight *lvds_bl;
92 u8 p_type = 0;
93 void *bl_start = NULL;
94 struct bdb_lvds_options *lvds_opts
95 = find_section(bdb, BDB_LVDS_OPTIONS);
96
97 dev_priv->lvds_bl = NULL;
98
99 if (lvds_opts)
100 p_type = lvds_opts->panel_type;
101 else
102 return;
103
104 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
105 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
106
107 lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
108 if (!lvds_bl) {
109 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
110 return;
111 }
112 memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
113 dev_priv->lvds_bl = lvds_bl;
114}
115
116/* Try to find integrated panel data */
117static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
118 struct bdb_header *bdb)
119{
120 struct bdb_lvds_options *lvds_options;
121 struct bdb_lvds_lfp_data *lvds_lfp_data;
122 struct bdb_lvds_lfp_data_entry *entry;
123 struct lvds_dvo_timing *dvo_timing;
124 struct drm_display_mode *panel_fixed_mode;
125
126 /* Defaults if we can't find VBT info */
127 dev_priv->lvds_dither = 0;
128 dev_priv->lvds_vbt = 0;
129
130 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
131 if (!lvds_options)
132 return;
133
134 dev_priv->lvds_dither = lvds_options->pixel_dither;
135 if (lvds_options->panel_type == 0xff)
136 return;
137
138 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
139 if (!lvds_lfp_data)
140 return;
141
142
143 entry = &lvds_lfp_data->data[lvds_options->panel_type];
144 dvo_timing = &entry->dvo_timing;
145
146 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
147 GFP_KERNEL);
148 if (panel_fixed_mode == NULL) {
149 dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
150 return;
151 }
152
153 dev_priv->lvds_vbt = 1;
154 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
155
156 if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
157 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
158 drm_mode_debug_printmodeline(panel_fixed_mode);
159 } else {
160 dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
161 dev_priv->lvds_vbt = 0;
162 kfree(panel_fixed_mode);
163 }
164 return;
165}
166
167/* Try to find sdvo panel data */
168static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
169 struct bdb_header *bdb)
170{
171 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
172 struct lvds_dvo_timing *dvo_timing;
173 struct drm_display_mode *panel_fixed_mode;
174
175 dev_priv->sdvo_lvds_vbt_mode = NULL;
176
177 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
178 if (!sdvo_lvds_options)
179 return;
180
181 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
182 if (!dvo_timing)
183 return;
184
185 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
186
187 if (!panel_fixed_mode)
188 return;
189
190 fill_detail_timing_data(panel_fixed_mode,
191 dvo_timing + sdvo_lvds_options->panel_type);
192
193 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
194
195 return;
196}
197
198static void parse_general_features(struct drm_psb_private *dev_priv,
199 struct bdb_header *bdb)
200{
201 struct bdb_general_features *general;
202
203 /* Set sensible defaults in case we can't find the general block */
204 dev_priv->int_tv_support = 1;
205 dev_priv->int_crt_support = 1;
206
207 general = find_section(bdb, BDB_GENERAL_FEATURES);
208 if (general) {
209 dev_priv->int_tv_support = general->int_tv_support;
210 dev_priv->int_crt_support = general->int_crt_support;
211 dev_priv->lvds_use_ssc = general->enable_ssc;
212
213 if (dev_priv->lvds_use_ssc) {
214 dev_priv->lvds_ssc_freq
215 = general->ssc_freq ? 100 : 96;
216 }
217 }
218}
219
220/**
221 * psb_intel_init_bios - initialize VBIOS settings & find VBT
222 * @dev: DRM device
223 *
224 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
225 * to appropriate values.
226 *
227 * VBT existence is a sanity check that is relied on by other i830_bios.c code.
228 * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
229 * feed an updated VBT back through that, compared to what we'll fetch using
230 * this method of groping around in the BIOS data.
231 *
232 * Returns 0 on success, nonzero on failure.
233 */
234bool psb_intel_init_bios(struct drm_device *dev)
235{
236 struct drm_psb_private *dev_priv = dev->dev_private;
237 struct pci_dev *pdev = dev->pdev;
238 struct vbt_header *vbt = NULL;
239 struct bdb_header *bdb;
240 u8 __iomem *bios;
241 size_t size;
242 int i;
243
244 bios = pci_map_rom(pdev, &size);
245 if (!bios)
246 return -1;
247
248 /* Scour memory looking for the VBT signature */
249 for (i = 0; i + 4 < size; i++) {
250 if (!memcmp(bios + i, "$VBT", 4)) {
251 vbt = (struct vbt_header *)(bios + i);
252 break;
253 }
254 }
255
256 if (!vbt) {
257 dev_err(dev->dev, "VBT signature missing\n");
258 pci_unmap_rom(pdev, bios);
259 return -1;
260 }
261
262 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
263
264 /* Grab useful general definitions */
265 parse_general_features(dev_priv, bdb);
266 parse_lfp_panel_data(dev_priv, bdb);
267 parse_sdvo_panel_data(dev_priv, bdb);
268 parse_backlight_data(dev_priv, bdb);
269
270 pci_unmap_rom(pdev, bios);
271
272 return 0;
273}
274
275/**
276 * Destroy and free VBT data
277 */
278void psb_intel_destroy_bios(struct drm_device *dev)
279{
280 struct drm_psb_private *dev_priv = dev->dev_private;
281 struct drm_display_mode *sdvo_lvds_vbt_mode =
282 dev_priv->sdvo_lvds_vbt_mode;
283 struct drm_display_mode *lfp_lvds_vbt_mode =
284 dev_priv->lfp_lvds_vbt_mode;
285 struct bdb_lvds_backlight *lvds_bl =
286 dev_priv->lvds_bl;
287
288 /*free sdvo panel mode*/
289 if (sdvo_lvds_vbt_mode) {
290 dev_priv->sdvo_lvds_vbt_mode = NULL;
291 kfree(sdvo_lvds_vbt_mode);
292 }
293
294 if (lfp_lvds_vbt_mode) {
295 dev_priv->lfp_lvds_vbt_mode = NULL;
296 kfree(lfp_lvds_vbt_mode);
297 }
298
299 if (lvds_bl) {
300 dev_priv->lvds_bl = NULL;
301 kfree(lvds_bl);
302 }
303}
diff --git a/drivers/staging/gma500/intel_bios.h b/drivers/staging/gma500/intel_bios.h
deleted file mode 100644
index 70f1bf018183..000000000000
--- a/drivers/staging/gma500/intel_bios.h
+++ /dev/null
@@ -1,430 +0,0 @@
1/*
2 * Copyright (c) 2006 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21
22#ifndef _I830_BIOS_H_
23#define _I830_BIOS_H_
24
25#include <drm/drmP.h>
26
27struct vbt_header {
28 u8 signature[20]; /**< Always starts with 'VBT$' */
29 u16 version; /**< decimal */
30 u16 header_size; /**< in bytes */
31 u16 vbt_size; /**< in bytes */
32 u8 vbt_checksum;
33 u8 reserved0;
34 u32 bdb_offset; /**< from beginning of VBT */
35 u32 aim_offset[4]; /**< from beginning of VBT */
36} __attribute__((packed));
37
38
39struct bdb_header {
40 u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
41 u16 version; /**< decimal */
42 u16 header_size; /**< in bytes */
43 u16 bdb_size; /**< in bytes */
44};
45
46/* strictly speaking, this is a "skip" block, but it has interesting info */
47struct vbios_data {
48 u8 type; /* 0 == desktop, 1 == mobile */
49 u8 relstage;
50 u8 chipset;
51 u8 lvds_present:1;
52 u8 tv_present:1;
53 u8 rsvd2:6; /* finish byte */
54 u8 rsvd3[4];
55 u8 signon[155];
56 u8 copyright[61];
57 u16 code_segment;
58 u8 dos_boot_mode;
59 u8 bandwidth_percent;
60 u8 rsvd4; /* popup memory size */
61 u8 resize_pci_bios;
62 u8 rsvd5; /* is crt already on ddc2 */
63} __attribute__((packed));
64
65/*
66 * There are several types of BIOS data blocks (BDBs), each block has
67 * an ID and size in the first 3 bytes (ID in first, size in next 2).
68 * Known types are listed below.
69 */
70#define BDB_GENERAL_FEATURES 1
71#define BDB_GENERAL_DEFINITIONS 2
72#define BDB_OLD_TOGGLE_LIST 3
73#define BDB_MODE_SUPPORT_LIST 4
74#define BDB_GENERIC_MODE_TABLE 5
75#define BDB_EXT_MMIO_REGS 6
76#define BDB_SWF_IO 7
77#define BDB_SWF_MMIO 8
78#define BDB_DOT_CLOCK_TABLE 9
79#define BDB_MODE_REMOVAL_TABLE 10
80#define BDB_CHILD_DEVICE_TABLE 11
81#define BDB_DRIVER_FEATURES 12
82#define BDB_DRIVER_PERSISTENCE 13
83#define BDB_EXT_TABLE_PTRS 14
84#define BDB_DOT_CLOCK_OVERRIDE 15
85#define BDB_DISPLAY_SELECT 16
86/* 17 rsvd */
87#define BDB_DRIVER_ROTATION 18
88#define BDB_DISPLAY_REMOVE 19
89#define BDB_OEM_CUSTOM 20
90#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
91#define BDB_SDVO_LVDS_OPTIONS 22
92#define BDB_SDVO_PANEL_DTDS 23
93#define BDB_SDVO_LVDS_PNP_IDS 24
94#define BDB_SDVO_LVDS_POWER_SEQ 25
95#define BDB_TV_OPTIONS 26
96#define BDB_LVDS_OPTIONS 40
97#define BDB_LVDS_LFP_DATA_PTRS 41
98#define BDB_LVDS_LFP_DATA 42
99#define BDB_LVDS_BACKLIGHT 43
100#define BDB_LVDS_POWER 44
101#define BDB_SKIP 254 /* VBIOS private block, ignore */
102
103struct bdb_general_features {
104 /* bits 1 */
105 u8 panel_fitting:2;
106 u8 flexaim:1;
107 u8 msg_enable:1;
108 u8 clear_screen:3;
109 u8 color_flip:1;
110
111 /* bits 2 */
112 u8 download_ext_vbt:1;
113 u8 enable_ssc:1;
114 u8 ssc_freq:1;
115 u8 enable_lfp_on_override:1;
116 u8 disable_ssc_ddt:1;
117 u8 rsvd8:3; /* finish byte */
118
119 /* bits 3 */
120 u8 disable_smooth_vision:1;
121 u8 single_dvi:1;
122 u8 rsvd9:6; /* finish byte */
123
124 /* bits 4 */
125 u8 legacy_monitor_detect;
126
127 /* bits 5 */
128 u8 int_crt_support:1;
129 u8 int_tv_support:1;
130 u8 rsvd11:6; /* finish byte */
131} __attribute__((packed));
132
133struct bdb_general_definitions {
134 /* DDC GPIO */
135 u8 crt_ddc_gmbus_pin;
136
137 /* DPMS bits */
138 u8 dpms_acpi:1;
139 u8 skip_boot_crt_detect:1;
140 u8 dpms_aim:1;
141 u8 rsvd1:5; /* finish byte */
142
143 /* boot device bits */
144 u8 boot_display[2];
145 u8 child_dev_size;
146
147 /* device info */
148 u8 tv_or_lvds_info[33];
149 u8 dev1[33];
150 u8 dev2[33];
151 u8 dev3[33];
152 u8 dev4[33];
153 /* may be another device block here on some platforms */
154};
155
156struct bdb_lvds_options {
157 u8 panel_type;
158 u8 rsvd1;
159 /* LVDS capabilities, stored in a dword */
160 u8 pfit_mode:2;
161 u8 pfit_text_mode_enhanced:1;
162 u8 pfit_gfx_mode_enhanced:1;
163 u8 pfit_ratio_auto:1;
164 u8 pixel_dither:1;
165 u8 lvds_edid:1;
166 u8 rsvd2:1;
167 u8 rsvd4;
168} __attribute__((packed));
169
170struct bdb_lvds_backlight {
171 u8 type:2;
172 u8 pol:1;
173 u8 gpio:3;
174 u8 gmbus:2;
175 u16 freq;
176 u8 minbrightness;
177 u8 i2caddr;
178 u8 brightnesscmd;
179 /*FIXME: more...*/
180} __attribute__((packed));
181
182/* LFP pointer table contains entries to the struct below */
183struct bdb_lvds_lfp_data_ptr {
184 u16 fp_timing_offset; /* offsets are from start of bdb */
185 u8 fp_table_size;
186 u16 dvo_timing_offset;
187 u8 dvo_table_size;
188 u16 panel_pnp_id_offset;
189 u8 pnp_table_size;
190} __attribute__((packed));
191
192struct bdb_lvds_lfp_data_ptrs {
193 u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
194 struct bdb_lvds_lfp_data_ptr ptr[16];
195} __attribute__((packed));
196
197/* LFP data has 3 blocks per entry */
198struct lvds_fp_timing {
199 u16 x_res;
200 u16 y_res;
201 u32 lvds_reg;
202 u32 lvds_reg_val;
203 u32 pp_on_reg;
204 u32 pp_on_reg_val;
205 u32 pp_off_reg;
206 u32 pp_off_reg_val;
207 u32 pp_cycle_reg;
208 u32 pp_cycle_reg_val;
209 u32 pfit_reg;
210 u32 pfit_reg_val;
211 u16 terminator;
212} __attribute__((packed));
213
214struct lvds_dvo_timing {
215 u16 clock; /**< In 10khz */
216 u8 hactive_lo;
217 u8 hblank_lo;
218 u8 hblank_hi:4;
219 u8 hactive_hi:4;
220 u8 vactive_lo;
221 u8 vblank_lo;
222 u8 vblank_hi:4;
223 u8 vactive_hi:4;
224 u8 hsync_off_lo;
225 u8 hsync_pulse_width;
226 u8 vsync_pulse_width:4;
227 u8 vsync_off:4;
228 u8 rsvd0:6;
229 u8 hsync_off_hi:2;
230 u8 h_image;
231 u8 v_image;
232 u8 max_hv;
233 u8 h_border;
234 u8 v_border;
235 u8 rsvd1:3;
236 u8 digital:2;
237 u8 vsync_positive:1;
238 u8 hsync_positive:1;
239 u8 rsvd2:1;
240} __attribute__((packed));
241
242struct lvds_pnp_id {
243 u16 mfg_name;
244 u16 product_code;
245 u32 serial;
246 u8 mfg_week;
247 u8 mfg_year;
248} __attribute__((packed));
249
250struct bdb_lvds_lfp_data_entry {
251 struct lvds_fp_timing fp_timing;
252 struct lvds_dvo_timing dvo_timing;
253 struct lvds_pnp_id pnp_id;
254} __attribute__((packed));
255
256struct bdb_lvds_lfp_data {
257 struct bdb_lvds_lfp_data_entry data[16];
258} __attribute__((packed));
259
260struct aimdb_header {
261 char signature[16];
262 char oem_device[20];
263 u16 aimdb_version;
264 u16 aimdb_header_size;
265 u16 aimdb_size;
266} __attribute__((packed));
267
268struct aimdb_block {
269 u8 aimdb_id;
270 u16 aimdb_size;
271} __attribute__((packed));
272
273struct vch_panel_data {
274 u16 fp_timing_offset;
275 u8 fp_timing_size;
276 u16 dvo_timing_offset;
277 u8 dvo_timing_size;
278 u16 text_fitting_offset;
279 u8 text_fitting_size;
280 u16 graphics_fitting_offset;
281 u8 graphics_fitting_size;
282} __attribute__((packed));
283
284struct vch_bdb_22 {
285 struct aimdb_block aimdb_block;
286 struct vch_panel_data panels[16];
287} __attribute__((packed));
288
289struct bdb_sdvo_lvds_options {
290 u8 panel_backlight;
291 u8 h40_set_panel_type;
292 u8 panel_type;
293 u8 ssc_clk_freq;
294 u16 als_low_trip;
295 u16 als_high_trip;
296 u8 sclalarcoeff_tab_row_num;
297 u8 sclalarcoeff_tab_row_size;
298 u8 coefficient[8];
299 u8 panel_misc_bits_1;
300 u8 panel_misc_bits_2;
301 u8 panel_misc_bits_3;
302 u8 panel_misc_bits_4;
303} __attribute__((packed));
304
305
306extern bool psb_intel_init_bios(struct drm_device *dev);
307extern void psb_intel_destroy_bios(struct drm_device *dev);
308
309/*
310 * Driver<->VBIOS interaction occurs through scratch bits in
311 * GR18 & SWF*.
312 */
313
314/* GR18 bits are set on display switch and hotkey events */
315#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
316#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
317#define GR18_HK_NONE (0x0<<3)
318#define GR18_HK_LFP_STRETCH (0x1<<3)
319#define GR18_HK_TOGGLE_DISP (0x2<<3)
320#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
321#define GR18_HK_POPUP_DISABLED (0x6<<3)
322#define GR18_HK_POPUP_ENABLED (0x7<<3)
323#define GR18_HK_PFIT (0x8<<3)
324#define GR18_HK_APM_CHANGE (0xa<<3)
325#define GR18_HK_MULTIPLE (0xc<<3)
326#define GR18_USER_INT_EN (1<<2)
327#define GR18_A0000_FLUSH_EN (1<<1)
328#define GR18_SMM_EN (1<<0)
329
330/* Set by driver, cleared by VBIOS */
331#define SWF00_YRES_SHIFT 16
332#define SWF00_XRES_SHIFT 0
333#define SWF00_RES_MASK 0xffff
334
335/* Set by VBIOS at boot time and driver at runtime */
336#define SWF01_TV2_FORMAT_SHIFT 8
337#define SWF01_TV1_FORMAT_SHIFT 0
338#define SWF01_TV_FORMAT_MASK 0xffff
339
340#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
341#define SWF10_GTT_OVERRIDE_EN (1<<28)
342#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
343#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
344#define SWF10_OLD_TOGGLE 0x0
345#define SWF10_TOGGLE_LIST_1 0x1
346#define SWF10_TOGGLE_LIST_2 0x2
347#define SWF10_TOGGLE_LIST_3 0x3
348#define SWF10_TOGGLE_LIST_4 0x4
349#define SWF10_PANNING_EN (1<<23)
350#define SWF10_DRIVER_LOADED (1<<22)
351#define SWF10_EXTENDED_DESKTOP (1<<21)
352#define SWF10_EXCLUSIVE_MODE (1<<20)
353#define SWF10_OVERLAY_EN (1<<19)
354#define SWF10_PLANEB_HOLDOFF (1<<18)
355#define SWF10_PLANEA_HOLDOFF (1<<17)
356#define SWF10_VGA_HOLDOFF (1<<16)
357#define SWF10_ACTIVE_DISP_MASK 0xffff
358#define SWF10_PIPEB_LFP2 (1<<15)
359#define SWF10_PIPEB_EFP2 (1<<14)
360#define SWF10_PIPEB_TV2 (1<<13)
361#define SWF10_PIPEB_CRT2 (1<<12)
362#define SWF10_PIPEB_LFP (1<<11)
363#define SWF10_PIPEB_EFP (1<<10)
364#define SWF10_PIPEB_TV (1<<9)
365#define SWF10_PIPEB_CRT (1<<8)
366#define SWF10_PIPEA_LFP2 (1<<7)
367#define SWF10_PIPEA_EFP2 (1<<6)
368#define SWF10_PIPEA_TV2 (1<<5)
369#define SWF10_PIPEA_CRT2 (1<<4)
370#define SWF10_PIPEA_LFP (1<<3)
371#define SWF10_PIPEA_EFP (1<<2)
372#define SWF10_PIPEA_TV (1<<1)
373#define SWF10_PIPEA_CRT (1<<0)
374
375#define SWF11_MEMORY_SIZE_SHIFT 16
376#define SWF11_SV_TEST_EN (1<<15)
377#define SWF11_IS_AGP (1<<14)
378#define SWF11_DISPLAY_HOLDOFF (1<<13)
379#define SWF11_DPMS_REDUCED (1<<12)
380#define SWF11_IS_VBE_MODE (1<<11)
381#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
382#define SWF11_DPMS_MASK 0x07
383#define SWF11_DPMS_OFF (1<<2)
384#define SWF11_DPMS_SUSPEND (1<<1)
385#define SWF11_DPMS_STANDBY (1<<0)
386#define SWF11_DPMS_ON 0
387
388#define SWF14_GFX_PFIT_EN (1<<31)
389#define SWF14_TEXT_PFIT_EN (1<<30)
390#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
391#define SWF14_POPUP_EN (1<<28)
392#define SWF14_DISPLAY_HOLDOFF (1<<27)
393#define SWF14_DISP_DETECT_EN (1<<26)
394#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
395#define SWF14_DRIVER_STATUS (1<<24)
396#define SWF14_OS_TYPE_WIN9X (1<<23)
397#define SWF14_OS_TYPE_WINNT (1<<22)
398/* 21:19 rsvd */
399#define SWF14_PM_TYPE_MASK 0x00070000
400#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
401#define SWF14_PM_ACPI (0x3 << 16)
402#define SWF14_PM_APM_12 (0x2 << 16)
403#define SWF14_PM_APM_11 (0x1 << 16)
404#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
405 /* if GR18 indicates a display switch */
406#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
407#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
408#define SWF14_DS_PIPEB_TV2_EN (1<<13)
409#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
410#define SWF14_DS_PIPEB_LFP_EN (1<<11)
411#define SWF14_DS_PIPEB_EFP_EN (1<<10)
412#define SWF14_DS_PIPEB_TV_EN (1<<9)
413#define SWF14_DS_PIPEB_CRT_EN (1<<8)
414#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
415#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
416#define SWF14_DS_PIPEA_TV2_EN (1<<5)
417#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
418#define SWF14_DS_PIPEA_LFP_EN (1<<3)
419#define SWF14_DS_PIPEA_EFP_EN (1<<2)
420#define SWF14_DS_PIPEA_TV_EN (1<<1)
421#define SWF14_DS_PIPEA_CRT_EN (1<<0)
422 /* if GR18 indicates a panel fitting request */
423#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
424 /* if GR18 indicates an APM change request */
425#define SWF14_APM_HIBERNATE 0x4
426#define SWF14_APM_SUSPEND 0x3
427#define SWF14_APM_STANDBY 0x1
428#define SWF14_APM_RESTORE 0x0
429
430#endif /* _I830_BIOS_H_ */
diff --git a/drivers/staging/gma500/intel_i2c.c b/drivers/staging/gma500/intel_i2c.c
deleted file mode 100644
index 51cbf65268e6..000000000000
--- a/drivers/staging/gma500/intel_i2c.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/i2c-algo-bit.h>
23#include <linux/export.h>
24
25#include "psb_drv.h"
26#include "psb_intel_reg.h"
27
28/*
29 * Intel GPIO access functions
30 */
31
32#define I2C_RISEFALL_TIME 20
33
34static int get_clock(void *data)
35{
36 struct psb_intel_i2c_chan *chan = data;
37 struct drm_device *dev = chan->drm_dev;
38 u32 val;
39
40 val = REG_READ(chan->reg);
41 return (val & GPIO_CLOCK_VAL_IN) != 0;
42}
43
44static int get_data(void *data)
45{
46 struct psb_intel_i2c_chan *chan = data;
47 struct drm_device *dev = chan->drm_dev;
48 u32 val;
49
50 val = REG_READ(chan->reg);
51 return (val & GPIO_DATA_VAL_IN) != 0;
52}
53
54static void set_clock(void *data, int state_high)
55{
56 struct psb_intel_i2c_chan *chan = data;
57 struct drm_device *dev = chan->drm_dev;
58 u32 reserved = 0, clock_bits;
59
60 /* On most chips, these bits must be preserved in software. */
61 reserved =
62 REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
63 GPIO_CLOCK_PULLUP_DISABLE);
64
65 if (state_high)
66 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
67 else
68 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
69 GPIO_CLOCK_VAL_MASK;
70 REG_WRITE(chan->reg, reserved | clock_bits);
71 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
72}
73
74static void set_data(void *data, int state_high)
75{
76 struct psb_intel_i2c_chan *chan = data;
77 struct drm_device *dev = chan->drm_dev;
78 u32 reserved = 0, data_bits;
79
80 /* On most chips, these bits must be preserved in software. */
81 reserved =
82 REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
83 GPIO_CLOCK_PULLUP_DISABLE);
84
85 if (state_high)
86 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
87 else
88 data_bits =
89 GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
90 GPIO_DATA_VAL_MASK;
91
92 REG_WRITE(chan->reg, reserved | data_bits);
93 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
94}
95
96/**
97 * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
98 * @dev: DRM device
99 * @output: driver specific output device
100 * @reg: GPIO reg to use
101 * @name: name for this bus
102 *
103 * Creates and registers a new i2c bus with the Linux i2c layer, for use
104 * in output probing and control (e.g. DDC or SDVO control functions).
105 *
106 * Possible values for @reg include:
107 * %GPIOA
108 * %GPIOB
109 * %GPIOC
110 * %GPIOD
111 * %GPIOE
112 * %GPIOF
113 * %GPIOG
114 * %GPIOH
115 * see PRM for details on how these different busses are used.
116 */
117struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
118 const u32 reg, const char *name)
119{
120 struct psb_intel_i2c_chan *chan;
121
122 chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
123 if (!chan)
124 goto out_free;
125
126 chan->drm_dev = dev;
127 chan->reg = reg;
128 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
129 chan->adapter.owner = THIS_MODULE;
130 chan->adapter.algo_data = &chan->algo;
131 chan->adapter.dev.parent = &dev->pdev->dev;
132 chan->algo.setsda = set_data;
133 chan->algo.setscl = set_clock;
134 chan->algo.getsda = get_data;
135 chan->algo.getscl = get_clock;
136 chan->algo.udelay = 20;
137 chan->algo.timeout = usecs_to_jiffies(2200);
138 chan->algo.data = chan;
139
140 i2c_set_adapdata(&chan->adapter, chan);
141
142 if (i2c_bit_add_bus(&chan->adapter))
143 goto out_free;
144
145 /* JJJ: raise SCL and SDA? */
146 set_data(chan, 1);
147 set_clock(chan, 1);
148 udelay(20);
149
150 return chan;
151
152out_free:
153 kfree(chan);
154 return NULL;
155}
156
157/**
158 * psb_intel_i2c_destroy - unregister and free i2c bus resources
159 * @output: channel to free
160 *
161 * Unregister the adapter from the i2c layer, then free the structure.
162 */
163void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
164{
165 if (!chan)
166 return;
167
168 i2c_del_adapter(&chan->adapter);
169 kfree(chan);
170}
diff --git a/drivers/staging/gma500/intel_opregion.c b/drivers/staging/gma500/intel_opregion.c
deleted file mode 100644
index d946bc1b17bf..000000000000
--- a/drivers/staging/gma500/intel_opregion.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * FIXME: resolve with the i915 version
24 */
25
26#include "psb_drv.h"
27
28struct opregion_header {
29 u8 signature[16];
30 u32 size;
31 u32 opregion_ver;
32 u8 bios_ver[32];
33 u8 vbios_ver[16];
34 u8 driver_ver[16];
35 u32 mboxes;
36 u8 reserved[164];
37} __packed;
38
39struct opregion_apci {
40 /*FIXME: add it later*/
41} __packed;
42
43struct opregion_swsci {
44 /*FIXME: add it later*/
45} __packed;
46
47struct opregion_acpi {
48 /*FIXME: add it later*/
49} __packed;
50
51int gma_intel_opregion_init(struct drm_device *dev)
52{
53 struct drm_psb_private *dev_priv = dev->dev_private;
54 u32 opregion_phy;
55 void *base;
56 u32 *lid_state;
57
58 dev_priv->lid_state = NULL;
59
60 pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
61 if (opregion_phy == 0)
62 return -ENOTSUPP;
63
64 base = ioremap(opregion_phy, 8*1024);
65 if (!base)
66 return -ENOMEM;
67
68 lid_state = base + 0x01ac;
69
70 dev_priv->lid_state = lid_state;
71 dev_priv->lid_last_state = readl(lid_state);
72 return 0;
73}
74
75int gma_intel_opregion_exit(struct drm_device *dev)
76{
77 struct drm_psb_private *dev_priv = dev->dev_private;
78 if (dev_priv->lid_state)
79 iounmap(dev_priv->lid_state);
80 return 0;
81}
diff --git a/drivers/staging/gma500/mdfld_device.c b/drivers/staging/gma500/mdfld_device.c
deleted file mode 100644
index f47aeb7a2039..000000000000
--- a/drivers/staging/gma500/mdfld_device.c
+++ /dev/null
@@ -1,714 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "psb_reg.h"
24#include "psb_intel_reg.h"
25#include "psb_drm.h"
26#include "psb_drv.h"
27#include "mdfld_output.h"
28#include "mdfld_dsi_output.h"
29#include "mid_bios.h"
30
31/*
32 * Provide the Medfield specific backlight management
33 */
34
35#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
36
37static int mdfld_brightness;
38struct backlight_device *mdfld_backlight_device;
39
40static int mfld_set_brightness(struct backlight_device *bd)
41{
42 struct drm_device *dev = bl_get_data(mdfld_backlight_device);
43 struct drm_psb_private *dev_priv = dev->dev_private;
44 int level = bd->props.brightness;
45
46 /* Percentage 1-100% being valid */
47 if (level < 1)
48 level = 1;
49
50 if (gma_power_begin(dev, 0)) {
51 /* Calculate and set the brightness value */
52 u32 adjusted_level;
53
54 /* Adjust the backlight level with the percent in
55 * dev_priv->blc_adj2;
56 */
57 adjusted_level = level * dev_priv->blc_adj2;
58 adjusted_level = adjusted_level / 100;
59#if 0
60#ifndef CONFIG_MDFLD_DSI_DPU
61 if(!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL) &&
62 (dev_priv->dbi_panel_on || dev_priv->dbi_panel_on2)){
63 mdfld_dsi_dbi_exit_dsr(dev,MDFLD_DSR_MIPI_CONTROL, 0, 0);
64 dev_dbg(dev->dev, "Out of DSR before set brightness to %d.\n",adjusted_level);
65 }
66#endif
67 mdfld_dsi_brightness_control(dev, 0, adjusted_level);
68
69 if ((dev_priv->dbi_panel_on2) || (dev_priv->dpi_panel_on2))
70 mdfld_dsi_brightness_control(dev, 2, adjusted_level);
71#endif
72 gma_power_end(dev);
73 }
74 mdfld_brightness = level;
75 return 0;
76}
77
78int psb_get_brightness(struct backlight_device *bd)
79{
80 /* return locally cached var instead of HW read (due to DPST etc.) */
81 /* FIXME: ideally return actual value in case firmware fiddled with
82 it */
83 return mdfld_brightness;
84}
85
86static const struct backlight_ops mfld_ops = {
87 .get_brightness = psb_get_brightness,
88 .update_status = mfld_set_brightness,
89};
90
91static int mdfld_backlight_init(struct drm_device *dev)
92{
93 struct drm_psb_private *dev_priv = dev->dev_private;
94 struct backlight_properties props;
95 memset(&props, 0, sizeof(struct backlight_properties));
96 props.max_brightness = 100;
97 props.type = BACKLIGHT_PLATFORM;
98
99 mdfld_backlight_device = backlight_device_register("mfld-bl",
100 NULL, (void *)dev, &mfld_ops, &props);
101
102 if (IS_ERR(mdfld_backlight_device))
103 return PTR_ERR(mdfld_backlight_device);
104
105 dev_priv->blc_adj1 = 100;
106 dev_priv->blc_adj2 = 100;
107 mdfld_backlight_device->props.brightness = 100;
108 mdfld_backlight_device->props.max_brightness = 100;
109 backlight_update_status(mdfld_backlight_device);
110 dev_priv->backlight_device = mdfld_backlight_device;
111 return 0;
112}
113
114#endif
115
116/*
117 * Provide the Medfield specific chip logic and low level methods for
118 * power management.
119 */
120
121static void mdfld_init_pm(struct drm_device *dev)
122{
123 /* No work needed here yet */
124}
125
126/**
127 * mdfld_save_display_registers - save registers for pipe
128 * @dev: our device
129 * @pipe: pipe to save
130 *
131 * Save the pipe state of the device before we power it off. Keep everything
132 * we need to put it back again
133 */
134static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
135{
136 struct drm_psb_private *dev_priv = dev->dev_private;
137 int i;
138
139 /* register */
140 u32 dpll_reg = MRST_DPLL_A;
141 u32 fp_reg = MRST_FPA0;
142 u32 pipeconf_reg = PIPEACONF;
143 u32 htot_reg = HTOTAL_A;
144 u32 hblank_reg = HBLANK_A;
145 u32 hsync_reg = HSYNC_A;
146 u32 vtot_reg = VTOTAL_A;
147 u32 vblank_reg = VBLANK_A;
148 u32 vsync_reg = VSYNC_A;
149 u32 pipesrc_reg = PIPEASRC;
150 u32 dspstride_reg = DSPASTRIDE;
151 u32 dsplinoff_reg = DSPALINOFF;
152 u32 dsptileoff_reg = DSPATILEOFF;
153 u32 dspsize_reg = DSPASIZE;
154 u32 dsppos_reg = DSPAPOS;
155 u32 dspsurf_reg = DSPASURF;
156 u32 mipi_reg = MIPI;
157 u32 dspcntr_reg = DSPACNTR;
158 u32 dspstatus_reg = PIPEASTAT;
159 u32 palette_reg = PALETTE_A;
160
161 /* pointer to values */
162 u32 *dpll_val = &dev_priv->saveDPLL_A;
163 u32 *fp_val = &dev_priv->saveFPA0;
164 u32 *pipeconf_val = &dev_priv->savePIPEACONF;
165 u32 *htot_val = &dev_priv->saveHTOTAL_A;
166 u32 *hblank_val = &dev_priv->saveHBLANK_A;
167 u32 *hsync_val = &dev_priv->saveHSYNC_A;
168 u32 *vtot_val = &dev_priv->saveVTOTAL_A;
169 u32 *vblank_val = &dev_priv->saveVBLANK_A;
170 u32 *vsync_val = &dev_priv->saveVSYNC_A;
171 u32 *pipesrc_val = &dev_priv->savePIPEASRC;
172 u32 *dspstride_val = &dev_priv->saveDSPASTRIDE;
173 u32 *dsplinoff_val = &dev_priv->saveDSPALINOFF;
174 u32 *dsptileoff_val = &dev_priv->saveDSPATILEOFF;
175 u32 *dspsize_val = &dev_priv->saveDSPASIZE;
176 u32 *dsppos_val = &dev_priv->saveDSPAPOS;
177 u32 *dspsurf_val = &dev_priv->saveDSPASURF;
178 u32 *mipi_val = &dev_priv->saveMIPI;
179 u32 *dspcntr_val = &dev_priv->saveDSPACNTR;
180 u32 *dspstatus_val = &dev_priv->saveDSPASTATUS;
181 u32 *palette_val = dev_priv->save_palette_a;
182
183 switch (pipe) {
184 case 0:
185 break;
186 case 1:
187 /* register */
188 dpll_reg = MDFLD_DPLL_B;
189 fp_reg = MDFLD_DPLL_DIV0;
190 pipeconf_reg = PIPEBCONF;
191 htot_reg = HTOTAL_B;
192 hblank_reg = HBLANK_B;
193 hsync_reg = HSYNC_B;
194 vtot_reg = VTOTAL_B;
195 vblank_reg = VBLANK_B;
196 vsync_reg = VSYNC_B;
197 pipesrc_reg = PIPEBSRC;
198 dspstride_reg = DSPBSTRIDE;
199 dsplinoff_reg = DSPBLINOFF;
200 dsptileoff_reg = DSPBTILEOFF;
201 dspsize_reg = DSPBSIZE;
202 dsppos_reg = DSPBPOS;
203 dspsurf_reg = DSPBSURF;
204 dspcntr_reg = DSPBCNTR;
205 dspstatus_reg = PIPEBSTAT;
206 palette_reg = PALETTE_B;
207
208 /* values */
209 dpll_val = &dev_priv->saveDPLL_B;
210 fp_val = &dev_priv->saveFPB0;
211 pipeconf_val = &dev_priv->savePIPEBCONF;
212 htot_val = &dev_priv->saveHTOTAL_B;
213 hblank_val = &dev_priv->saveHBLANK_B;
214 hsync_val = &dev_priv->saveHSYNC_B;
215 vtot_val = &dev_priv->saveVTOTAL_B;
216 vblank_val = &dev_priv->saveVBLANK_B;
217 vsync_val = &dev_priv->saveVSYNC_B;
218 pipesrc_val = &dev_priv->savePIPEBSRC;
219 dspstride_val = &dev_priv->saveDSPBSTRIDE;
220 dsplinoff_val = &dev_priv->saveDSPBLINOFF;
221 dsptileoff_val = &dev_priv->saveDSPBTILEOFF;
222 dspsize_val = &dev_priv->saveDSPBSIZE;
223 dsppos_val = &dev_priv->saveDSPBPOS;
224 dspsurf_val = &dev_priv->saveDSPBSURF;
225 dspcntr_val = &dev_priv->saveDSPBCNTR;
226 dspstatus_val = &dev_priv->saveDSPBSTATUS;
227 palette_val = dev_priv->save_palette_b;
228 break;
229 case 2:
230 /* register */
231 pipeconf_reg = PIPECCONF;
232 htot_reg = HTOTAL_C;
233 hblank_reg = HBLANK_C;
234 hsync_reg = HSYNC_C;
235 vtot_reg = VTOTAL_C;
236 vblank_reg = VBLANK_C;
237 vsync_reg = VSYNC_C;
238 pipesrc_reg = PIPECSRC;
239 dspstride_reg = DSPCSTRIDE;
240 dsplinoff_reg = DSPCLINOFF;
241 dsptileoff_reg = DSPCTILEOFF;
242 dspsize_reg = DSPCSIZE;
243 dsppos_reg = DSPCPOS;
244 dspsurf_reg = DSPCSURF;
245 mipi_reg = MIPI_C;
246 dspcntr_reg = DSPCCNTR;
247 dspstatus_reg = PIPECSTAT;
248 palette_reg = PALETTE_C;
249
250 /* pointer to values */
251 pipeconf_val = &dev_priv->savePIPECCONF;
252 htot_val = &dev_priv->saveHTOTAL_C;
253 hblank_val = &dev_priv->saveHBLANK_C;
254 hsync_val = &dev_priv->saveHSYNC_C;
255 vtot_val = &dev_priv->saveVTOTAL_C;
256 vblank_val = &dev_priv->saveVBLANK_C;
257 vsync_val = &dev_priv->saveVSYNC_C;
258 pipesrc_val = &dev_priv->savePIPECSRC;
259 dspstride_val = &dev_priv->saveDSPCSTRIDE;
260 dsplinoff_val = &dev_priv->saveDSPCLINOFF;
261 dsptileoff_val = &dev_priv->saveDSPCTILEOFF;
262 dspsize_val = &dev_priv->saveDSPCSIZE;
263 dsppos_val = &dev_priv->saveDSPCPOS;
264 dspsurf_val = &dev_priv->saveDSPCSURF;
265 mipi_val = &dev_priv->saveMIPI_C;
266 dspcntr_val = &dev_priv->saveDSPCCNTR;
267 dspstatus_val = &dev_priv->saveDSPCSTATUS;
268 palette_val = dev_priv->save_palette_c;
269 break;
270 default:
271 DRM_ERROR("%s, invalid pipe number.\n", __func__);
272 return -EINVAL;
273 }
274
275 /* Pipe & plane A info */
276 *dpll_val = PSB_RVDC32(dpll_reg);
277 *fp_val = PSB_RVDC32(fp_reg);
278 *pipeconf_val = PSB_RVDC32(pipeconf_reg);
279 *htot_val = PSB_RVDC32(htot_reg);
280 *hblank_val = PSB_RVDC32(hblank_reg);
281 *hsync_val = PSB_RVDC32(hsync_reg);
282 *vtot_val = PSB_RVDC32(vtot_reg);
283 *vblank_val = PSB_RVDC32(vblank_reg);
284 *vsync_val = PSB_RVDC32(vsync_reg);
285 *pipesrc_val = PSB_RVDC32(pipesrc_reg);
286 *dspstride_val = PSB_RVDC32(dspstride_reg);
287 *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
288 *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
289 *dspsize_val = PSB_RVDC32(dspsize_reg);
290 *dsppos_val = PSB_RVDC32(dsppos_reg);
291 *dspsurf_val = PSB_RVDC32(dspsurf_reg);
292 *dspcntr_val = PSB_RVDC32(dspcntr_reg);
293 *dspstatus_val = PSB_RVDC32(dspstatus_reg);
294
295 /*save palette (gamma) */
296 for (i = 0; i < 256; i++)
297 palette_val[i] = PSB_RVDC32(palette_reg + (i<<2));
298
299 if (pipe == 1) {
300 dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
301 dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
302 dev_priv->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
303 dev_priv->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
304 return 0;
305 }
306 *mipi_val = PSB_RVDC32(mipi_reg);
307 return 0;
308}
309
310/**
311 * mdfld_save_cursor_overlay_registers - save cursor overlay info
312 * @dev: our device
313 *
314 * Save the cursor and overlay register state
315 */
316static int mdfld_save_cursor_overlay_registers(struct drm_device *dev)
317{
318 struct drm_psb_private *dev_priv = dev->dev_private;
319
320 /* Save cursor regs */
321 dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
322 dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
323 dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
324
325 dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
326 dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
327 dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
328
329 dev_priv->saveDSPCCURSOR_CTRL = PSB_RVDC32(CURCCNTR);
330 dev_priv->saveDSPCCURSOR_BASE = PSB_RVDC32(CURCBASE);
331 dev_priv->saveDSPCCURSOR_POS = PSB_RVDC32(CURCPOS);
332
333 /* HW overlay */
334 dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
335 dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
336 dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
337 dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
338 dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
339 dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
340 dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
341
342 dev_priv->saveOV_OVADD_C = PSB_RVDC32(OV_OVADD + OV_C_OFFSET);
343 dev_priv->saveOV_OGAMC0_C = PSB_RVDC32(OV_OGAMC0 + OV_C_OFFSET);
344 dev_priv->saveOV_OGAMC1_C = PSB_RVDC32(OV_OGAMC1 + OV_C_OFFSET);
345 dev_priv->saveOV_OGAMC2_C = PSB_RVDC32(OV_OGAMC2 + OV_C_OFFSET);
346 dev_priv->saveOV_OGAMC3_C = PSB_RVDC32(OV_OGAMC3 + OV_C_OFFSET);
347 dev_priv->saveOV_OGAMC4_C = PSB_RVDC32(OV_OGAMC4 + OV_C_OFFSET);
348 dev_priv->saveOV_OGAMC5_C = PSB_RVDC32(OV_OGAMC5 + OV_C_OFFSET);
349
350 return 0;
351}
352/*
353 * mdfld_restore_display_registers - restore the state of a pipe
354 * @dev: our device
355 * @pipe: the pipe to restore
356 *
357 * Restore the state of a pipe to that which was saved by the register save
358 * functions.
359 */
360static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
361{
362 /* To get panel out of ULPS mode */
363 struct drm_psb_private *dev_priv = dev->dev_private;
364 struct mdfld_dsi_config *dsi_config = NULL;
365 u32 i = 0;
366 u32 dpll = 0;
367 u32 timeout = 0;
368 u32 reg_offset = 0;
369
370 /* register */
371 u32 dpll_reg = MRST_DPLL_A;
372 u32 fp_reg = MRST_FPA0;
373 u32 pipeconf_reg = PIPEACONF;
374 u32 htot_reg = HTOTAL_A;
375 u32 hblank_reg = HBLANK_A;
376 u32 hsync_reg = HSYNC_A;
377 u32 vtot_reg = VTOTAL_A;
378 u32 vblank_reg = VBLANK_A;
379 u32 vsync_reg = VSYNC_A;
380 u32 pipesrc_reg = PIPEASRC;
381 u32 dspstride_reg = DSPASTRIDE;
382 u32 dsplinoff_reg = DSPALINOFF;
383 u32 dsptileoff_reg = DSPATILEOFF;
384 u32 dspsize_reg = DSPASIZE;
385 u32 dsppos_reg = DSPAPOS;
386 u32 dspsurf_reg = DSPASURF;
387 u32 dspstatus_reg = PIPEASTAT;
388 u32 mipi_reg = MIPI;
389 u32 dspcntr_reg = DSPACNTR;
390 u32 palette_reg = PALETTE_A;
391
392 /* values */
393 u32 dpll_val = dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE;
394 u32 fp_val = dev_priv->saveFPA0;
395 u32 pipeconf_val = dev_priv->savePIPEACONF;
396 u32 htot_val = dev_priv->saveHTOTAL_A;
397 u32 hblank_val = dev_priv->saveHBLANK_A;
398 u32 hsync_val = dev_priv->saveHSYNC_A;
399 u32 vtot_val = dev_priv->saveVTOTAL_A;
400 u32 vblank_val = dev_priv->saveVBLANK_A;
401 u32 vsync_val = dev_priv->saveVSYNC_A;
402 u32 pipesrc_val = dev_priv->savePIPEASRC;
403 u32 dspstride_val = dev_priv->saveDSPASTRIDE;
404 u32 dsplinoff_val = dev_priv->saveDSPALINOFF;
405 u32 dsptileoff_val = dev_priv->saveDSPATILEOFF;
406 u32 dspsize_val = dev_priv->saveDSPASIZE;
407 u32 dsppos_val = dev_priv->saveDSPAPOS;
408 u32 dspsurf_val = dev_priv->saveDSPASURF;
409 u32 dspstatus_val = dev_priv->saveDSPASTATUS;
410 u32 mipi_val = dev_priv->saveMIPI;
411 u32 dspcntr_val = dev_priv->saveDSPACNTR;
412 u32 *palette_val = dev_priv->save_palette_a;
413
414 switch (pipe) {
415 case 0:
416 dsi_config = dev_priv->dsi_configs[0];
417 break;
418 case 1:
419 /* register */
420 dpll_reg = MDFLD_DPLL_B;
421 fp_reg = MDFLD_DPLL_DIV0;
422 pipeconf_reg = PIPEBCONF;
423 htot_reg = HTOTAL_B;
424 hblank_reg = HBLANK_B;
425 hsync_reg = HSYNC_B;
426 vtot_reg = VTOTAL_B;
427 vblank_reg = VBLANK_B;
428 vsync_reg = VSYNC_B;
429 pipesrc_reg = PIPEBSRC;
430 dspstride_reg = DSPBSTRIDE;
431 dsplinoff_reg = DSPBLINOFF;
432 dsptileoff_reg = DSPBTILEOFF;
433 dspsize_reg = DSPBSIZE;
434 dsppos_reg = DSPBPOS;
435 dspsurf_reg = DSPBSURF;
436 dspcntr_reg = DSPBCNTR;
437 palette_reg = PALETTE_B;
438 dspstatus_reg = PIPEBSTAT;
439
440 /* values */
441 dpll_val = dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE;
442 fp_val = dev_priv->saveFPB0;
443 pipeconf_val = dev_priv->savePIPEBCONF;
444 htot_val = dev_priv->saveHTOTAL_B;
445 hblank_val = dev_priv->saveHBLANK_B;
446 hsync_val = dev_priv->saveHSYNC_B;
447 vtot_val = dev_priv->saveVTOTAL_B;
448 vblank_val = dev_priv->saveVBLANK_B;
449 vsync_val = dev_priv->saveVSYNC_B;
450 pipesrc_val = dev_priv->savePIPEBSRC;
451 dspstride_val = dev_priv->saveDSPBSTRIDE;
452 dsplinoff_val = dev_priv->saveDSPBLINOFF;
453 dsptileoff_val = dev_priv->saveDSPBTILEOFF;
454 dspsize_val = dev_priv->saveDSPBSIZE;
455 dsppos_val = dev_priv->saveDSPBPOS;
456 dspsurf_val = dev_priv->saveDSPBSURF;
457 dspcntr_val = dev_priv->saveDSPBCNTR;
458 dspstatus_val = dev_priv->saveDSPBSTATUS;
459 palette_val = dev_priv->save_palette_b;
460 break;
461 case 2:
462 reg_offset = MIPIC_REG_OFFSET;
463
464 /* register */
465 pipeconf_reg = PIPECCONF;
466 htot_reg = HTOTAL_C;
467 hblank_reg = HBLANK_C;
468 hsync_reg = HSYNC_C;
469 vtot_reg = VTOTAL_C;
470 vblank_reg = VBLANK_C;
471 vsync_reg = VSYNC_C;
472 pipesrc_reg = PIPECSRC;
473 dspstride_reg = DSPCSTRIDE;
474 dsplinoff_reg = DSPCLINOFF;
475 dsptileoff_reg = DSPCTILEOFF;
476 dspsize_reg = DSPCSIZE;
477 dsppos_reg = DSPCPOS;
478 dspsurf_reg = DSPCSURF;
479 mipi_reg = MIPI_C;
480 dspcntr_reg = DSPCCNTR;
481 palette_reg = PALETTE_C;
482 dspstatus_reg = PIPECSTAT;
483
484 /* values */
485 pipeconf_val = dev_priv->savePIPECCONF;
486 htot_val = dev_priv->saveHTOTAL_C;
487 hblank_val = dev_priv->saveHBLANK_C;
488 hsync_val = dev_priv->saveHSYNC_C;
489 vtot_val = dev_priv->saveVTOTAL_C;
490 vblank_val = dev_priv->saveVBLANK_C;
491 vsync_val = dev_priv->saveVSYNC_C;
492 pipesrc_val = dev_priv->savePIPECSRC;
493 dspstride_val = dev_priv->saveDSPCSTRIDE;
494 dsplinoff_val = dev_priv->saveDSPCLINOFF;
495 dsptileoff_val = dev_priv->saveDSPCTILEOFF;
496 dspsize_val = dev_priv->saveDSPCSIZE;
497 dsppos_val = dev_priv->saveDSPCPOS;
498 dspsurf_val = dev_priv->saveDSPCSURF;
499 dspstatus_val = dev_priv->saveDSPCSTATUS;
500 mipi_val = dev_priv->saveMIPI_C;
501 dspcntr_val = dev_priv->saveDSPCCNTR;
502 palette_val = dev_priv->save_palette_c;
503
504 dsi_config = dev_priv->dsi_configs[1];
505 break;
506 default:
507 DRM_ERROR("%s, invalid pipe number.\n", __func__);
508 return -EINVAL;
509 }
510
511 /* Make sure VGA plane is off. it initializes to on after reset!*/
512 PSB_WVDC32(0x80000000, VGACNTRL);
513 if (pipe == 1) {
514 PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
515 PSB_RVDC32(dpll_reg);
516
517 PSB_WVDC32(fp_val, fp_reg);
518 } else {
519 dpll = PSB_RVDC32(dpll_reg);
520
521 if (!(dpll & DPLL_VCO_ENABLE)) {
522
523 /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
524 if (dpll & MDFLD_PWR_GATE_EN) {
525 dpll &= ~MDFLD_PWR_GATE_EN;
526 PSB_WVDC32(dpll, dpll_reg);
527 udelay(500); /* FIXME: 1 ? */
528 }
529
530 PSB_WVDC32(fp_val, fp_reg);
531 PSB_WVDC32(dpll_val, dpll_reg);
532 /* FIXME_MDFLD PO - change 500 to 1 after PO */
533 udelay(500);
534
535 dpll_val |= DPLL_VCO_ENABLE;
536 PSB_WVDC32(dpll_val, dpll_reg);
537 PSB_RVDC32(dpll_reg);
538
539 /* wait for DSI PLL to lock */
540 while ((timeout < 20000) && !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
541 udelay(150);
542 timeout++;
543 }
544
545 if (timeout == 20000) {
546 DRM_ERROR("%s, can't lock DSIPLL.\n",
547 __func__);
548 return -EINVAL;
549 }
550 }
551 }
552 /* Restore mode */
553 PSB_WVDC32(htot_val, htot_reg);
554 PSB_WVDC32(hblank_val, hblank_reg);
555 PSB_WVDC32(hsync_val, hsync_reg);
556 PSB_WVDC32(vtot_val, vtot_reg);
557 PSB_WVDC32(vblank_val, vblank_reg);
558 PSB_WVDC32(vsync_val, vsync_reg);
559 PSB_WVDC32(pipesrc_val, pipesrc_reg);
560 PSB_WVDC32(dspstatus_val, dspstatus_reg);
561
562 /* Set up the plane */
563 PSB_WVDC32(dspstride_val, dspstride_reg);
564 PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
565 PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
566 PSB_WVDC32(dspsize_val, dspsize_reg);
567 PSB_WVDC32(dsppos_val, dsppos_reg);
568 PSB_WVDC32(dspsurf_val, dspsurf_reg);
569
570 if (pipe == 1) {
571 PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
572 PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
573 PSB_WVDC32(dev_priv->saveHDMIPHYMISCCTL, HDMIPHYMISCCTL);
574 PSB_WVDC32(dev_priv->saveHDMIB_CONTROL, HDMIB_CONTROL);
575
576 } else {
577 /* Set up pipe related registers */
578 PSB_WVDC32(mipi_val, mipi_reg);
579 /* Setup MIPI adapter + MIPI IP registers */
580 mdfld_dsi_controller_init(dsi_config, pipe);
581 msleep(20);
582 }
583 /* Enable the plane */
584 PSB_WVDC32(dspcntr_val, dspcntr_reg);
585 msleep(20);
586 /* Enable the pipe */
587 PSB_WVDC32(pipeconf_val, pipeconf_reg);
588
589 for (i = 0; i < 256; i++)
590 PSB_WVDC32(palette_val[i], palette_reg + (i<<2));
591 if (pipe == 1)
592 return 0;
593 if (!mdfld_panel_dpi(dev))
594 mdfld_enable_te(dev, pipe);
595 return 0;
596}
597
598/**
599 * mdfld_restore_cursor_overlay_registers - restore cursor
600 * @dev: our device
601 *
602 * Restore the cursor and overlay state that was saved earlier
603 */
604static int mdfld_restore_cursor_overlay_registers(struct drm_device *dev)
605{
606 struct drm_psb_private *dev_priv = dev->dev_private;
607
608 /* Enable Cursor A */
609 PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
610 PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
611 PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
612
613 PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
614 PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
615 PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
616
617 PSB_WVDC32(dev_priv->saveDSPCCURSOR_CTRL, CURCCNTR);
618 PSB_WVDC32(dev_priv->saveDSPCCURSOR_POS, CURCPOS);
619 PSB_WVDC32(dev_priv->saveDSPCCURSOR_BASE, CURCBASE);
620
621 /* Restore HW overlay */
622 PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
623 PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
624 PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
625 PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
626 PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
627 PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
628 PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
629
630 PSB_WVDC32(dev_priv->saveOV_OVADD_C, OV_OVADD + OV_C_OFFSET);
631 PSB_WVDC32(dev_priv->saveOV_OGAMC0_C, OV_OGAMC0 + OV_C_OFFSET);
632 PSB_WVDC32(dev_priv->saveOV_OGAMC1_C, OV_OGAMC1 + OV_C_OFFSET);
633 PSB_WVDC32(dev_priv->saveOV_OGAMC2_C, OV_OGAMC2 + OV_C_OFFSET);
634 PSB_WVDC32(dev_priv->saveOV_OGAMC3_C, OV_OGAMC3 + OV_C_OFFSET);
635 PSB_WVDC32(dev_priv->saveOV_OGAMC4_C, OV_OGAMC4 + OV_C_OFFSET);
636 PSB_WVDC32(dev_priv->saveOV_OGAMC5_C, OV_OGAMC5 + OV_C_OFFSET);
637
638 return 0;
639}
640
641/**
642 * mdfld_save_display_registers - save registers lost on suspend
643 * @dev: our DRM device
644 *
645 * Save the state we need in order to be able to restore the interface
646 * upon resume from suspend
647 */
648static int mdfld_save_registers(struct drm_device *dev)
649{
650 /* FIXME: We need to shut down panels here if using them
651 and once the right bits are merged */
652 mdfld_save_cursor_overlay_registers(dev);
653 mdfld_save_display_registers(dev, 0);
654 mdfld_save_display_registers(dev, 0);
655 mdfld_save_display_registers(dev, 2);
656 mdfld_save_display_registers(dev, 1);
657 mdfld_disable_crtc(dev, 0);
658 mdfld_disable_crtc(dev, 2);
659 mdfld_disable_crtc(dev, 1);
660 return 0;
661}
662
663/**
664 * mdfld_restore_display_registers - restore lost register state
665 * @dev: our DRM device
666 *
667 * Restore register state that was lost during suspend and resume.
668 */
669static int mdfld_restore_registers(struct drm_device *dev)
670{
671 mdfld_restore_display_registers(dev, 1);
672 mdfld_restore_display_registers(dev, 0);
673 mdfld_restore_display_registers(dev, 2);
674 mdfld_restore_cursor_overlay_registers(dev);
675 return 0;
676}
677
678static int mdfld_power_down(struct drm_device *dev)
679{
680 /* FIXME */
681 return 0;
682}
683
684static int mdfld_power_up(struct drm_device *dev)
685{
686 /* FIXME */
687 return 0;
688}
689
690const struct psb_ops mdfld_chip_ops = {
691 .name = "Medfield",
692 .accel_2d = 0,
693 .pipes = 3,
694 .crtcs = 2,
695 .sgx_offset = MRST_SGX_OFFSET,
696
697 .chip_setup = mid_chip_setup,
698
699 .crtc_helper = &mdfld_helper_funcs,
700 .crtc_funcs = &mdfld_intel_crtc_funcs,
701
702 .output_init = mdfld_output_init,
703
704#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
705 .backlight_init = mdfld_backlight_init,
706#endif
707
708 .init_pm = mdfld_init_pm,
709 .save_regs = mdfld_save_registers,
710 .restore_regs = mdfld_restore_registers,
711 .power_down = mdfld_power_down,
712 .power_up = mdfld_power_up,
713};
714
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
deleted file mode 100644
index fd211f3467c4..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ /dev/null
@@ -1,761 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#include "mdfld_dsi_dbi.h"
29#include "mdfld_dsi_dbi_dpu.h"
30#include "mdfld_dsi_pkg_sender.h"
31
32#include "power.h"
33#include <linux/pm_runtime.h>
34
35int enable_gfx_rtpm;
36
37extern struct drm_device *gpDrmDevice;
38extern int gfxrtdelay;
39int enter_dsr;
40struct mdfld_dsi_dbi_output *gdbi_output;
41extern bool gbgfxsuspended;
42extern int enable_gfx_rtpm;
43extern int gfxrtdelay;
44
45#define MDFLD_DSR_MAX_IDLE_COUNT 2
46
47/*
48 * set refreshing area
49 */
50int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
51 u16 x1, u16 y1, u16 x2, u16 y2)
52{
53 struct mdfld_dsi_pkg_sender *sender =
54 mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
55 u8 param[4];
56 u8 cmd;
57 int err;
58
59 if (!sender) {
60 WARN_ON(1);
61 return -EINVAL;
62 }
63
64 /* Set column */
65 cmd = DCS_SET_COLUMN_ADDRESS;
66 param[0] = x1 >> 8;
67 param[1] = x1;
68 param[2] = x2 >> 8;
69 param[3] = x2;
70
71 err = mdfld_dsi_send_dcs(sender,
72 cmd,
73 param,
74 4,
75 CMD_DATA_SRC_SYSTEM_MEM,
76 MDFLD_DSI_QUEUE_PACKAGE);
77 if (err) {
78 dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
79 goto err_out;
80 }
81
82 /* Set page */
83 cmd = DCS_SET_PAGE_ADDRESS;
84 param[0] = y1 >> 8;
85 param[1] = y1;
86 param[2] = y2 >> 8;
87 param[3] = y2;
88
89 err = mdfld_dsi_send_dcs(sender,
90 cmd,
91 param,
92 4,
93 CMD_DATA_SRC_SYSTEM_MEM,
94 MDFLD_DSI_QUEUE_PACKAGE);
95 if (err) {
96 dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
97 goto err_out;
98 }
99
100 /*update screen*/
101 err = mdfld_dsi_send_dcs(sender,
102 write_mem_start,
103 NULL,
104 0,
105 CMD_DATA_SRC_PIPE,
106 MDFLD_DSI_QUEUE_PACKAGE);
107 if (err) {
108 dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
109 goto err_out;
110 }
111 mdfld_dsi_cmds_kick_out(sender);
112err_out:
113 return err;
114}
115
116/*
117 * set panel's power state
118 */
119int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
120 int mode)
121{
122 struct drm_device *dev = dbi_output->dev;
123 struct mdfld_dsi_pkg_sender *sender =
124 mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
125 u8 param = 0;
126 u32 err = 0;
127
128 if (!sender) {
129 WARN_ON(1);
130 return -EINVAL;
131 }
132
133 if (mode == DRM_MODE_DPMS_ON) {
134 /* Exit sleep mode */
135 err = mdfld_dsi_send_dcs(sender,
136 DCS_EXIT_SLEEP_MODE,
137 NULL,
138 0,
139 CMD_DATA_SRC_SYSTEM_MEM,
140 MDFLD_DSI_QUEUE_PACKAGE);
141 if (err) {
142 dev_err(dev->dev, "DCS 0x%x sent failed\n",
143 DCS_EXIT_SLEEP_MODE);
144 goto power_err;
145 }
146
147 /* Set display on */
148 err = mdfld_dsi_send_dcs(sender,
149 DCS_SET_DISPLAY_ON,
150 NULL,
151 0,
152 CMD_DATA_SRC_SYSTEM_MEM,
153 MDFLD_DSI_QUEUE_PACKAGE);
154 if (err) {
155 dev_err(dev->dev, "DCS 0x%x sent failed\n",
156 DCS_SET_DISPLAY_ON);
157 goto power_err;
158 }
159
160 /* set tear effect on */
161 err = mdfld_dsi_send_dcs(sender,
162 DCS_SET_TEAR_ON,
163 &param,
164 1,
165 CMD_DATA_SRC_SYSTEM_MEM,
166 MDFLD_DSI_QUEUE_PACKAGE);
167 if (err) {
168 dev_err(dev->dev, "DCS 0x%x sent failed\n",
169 set_tear_on);
170 goto power_err;
171 }
172
173 /**
174 * FIXME: remove this later
175 */
176 err = mdfld_dsi_send_dcs(sender,
177 DCS_WRITE_MEM_START,
178 NULL,
179 0,
180 CMD_DATA_SRC_PIPE,
181 MDFLD_DSI_QUEUE_PACKAGE);
182 if (err) {
183 dev_err(dev->dev, "DCS 0x%x sent failed\n",
184 DCS_WRITE_MEM_START);
185 goto power_err;
186 }
187 } else {
188 /* Set tear effect off */
189 err = mdfld_dsi_send_dcs(sender,
190 DCS_SET_TEAR_OFF,
191 NULL,
192 0,
193 CMD_DATA_SRC_SYSTEM_MEM,
194 MDFLD_DSI_QUEUE_PACKAGE);
195 if (err) {
196 dev_err(dev->dev, "DCS 0x%x sent failed\n",
197 DCS_SET_TEAR_OFF);
198 goto power_err;
199 }
200
201 /* Turn display off */
202 err = mdfld_dsi_send_dcs(sender,
203 DCS_SET_DISPLAY_OFF,
204 NULL,
205 0,
206 CMD_DATA_SRC_SYSTEM_MEM,
207 MDFLD_DSI_QUEUE_PACKAGE);
208 if (err) {
209 dev_err(dev->dev, "DCS 0x%x sent failed\n",
210 DCS_SET_DISPLAY_OFF);
211 goto power_err;
212 }
213
214 /* Now enter sleep mode */
215 err = mdfld_dsi_send_dcs(sender,
216 DCS_ENTER_SLEEP_MODE,
217 NULL,
218 0,
219 CMD_DATA_SRC_SYSTEM_MEM,
220 MDFLD_DSI_QUEUE_PACKAGE);
221 if (err) {
222 dev_err(dev->dev, "DCS 0x%x sent failed\n",
223 DCS_ENTER_SLEEP_MODE);
224 goto power_err;
225 }
226 }
227 mdfld_dsi_cmds_kick_out(sender);
228power_err:
229 return err;
230}
231
232/*
233 * send a generic DCS command with a parameter list
234 */
235int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
236 u8 dcs, u8 *param, u32 num, u8 data_src)
237{
238 struct mdfld_dsi_pkg_sender *sender =
239 mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
240 int ret;
241
242 if (!sender) {
243 WARN_ON(1);
244 return -EINVAL;
245 }
246
247 ret = mdfld_dsi_send_dcs(sender,
248 dcs,
249 param,
250 num,
251 data_src,
252 MDFLD_DSI_SEND_PACKAGE);
253
254 return ret;
255}
256
257/*
258 * Enter DSR
259 */
260void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
261{
262 u32 reg_val;
263 struct drm_device *dev = dbi_output->dev;
264 struct drm_psb_private *dev_priv = dev->dev_private;
265 struct drm_crtc *crtc = dbi_output->base.base.crtc;
266 struct psb_intel_crtc *psb_crtc = (crtc) ?
267 to_psb_intel_crtc(crtc) : NULL;
268 u32 dpll_reg = MRST_DPLL_A;
269 u32 pipeconf_reg = PIPEACONF;
270 u32 dspcntr_reg = DSPACNTR;
271
272 if (!dbi_output)
273 return;
274
275 /* FIXME check if can go */
276 dev_priv->is_in_idle = true;
277
278 gdbi_output = dbi_output;
279 if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
280 (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
281 return;
282
283 if (pipe == 2) {
284 dpll_reg = MRST_DPLL_A;
285 pipeconf_reg = PIPECCONF;
286 dspcntr_reg = DSPCCNTR;
287 }
288
289 if (!gma_power_begin(dev, true)) {
290 dev_err(dev->dev, "hw begin failed\n");
291 return;
292 }
293 /* Disable te interrupts */
294 mdfld_disable_te(dev, pipe);
295
296 /* Disable plane */
297 reg_val = REG_READ(dspcntr_reg);
298 if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
299 REG_WRITE(dspcntr_reg, reg_val & ~DISPLAY_PLANE_ENABLE);
300 REG_READ(dspcntr_reg);
301 }
302
303 /* Disable pipe */
304 reg_val = REG_READ(pipeconf_reg);
305 if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
306 reg_val &= ~DISPLAY_PLANE_ENABLE;
307 reg_val |= (PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF);
308 REG_WRITE(pipeconf_reg, reg_val);
309 REG_READ(pipeconf_reg);
310 mdfldWaitForPipeDisable(dev, pipe);
311 }
312
313 /* Disable DPLL */
314 reg_val = REG_READ(dpll_reg);
315 if (!(reg_val & DPLL_VCO_ENABLE)) {
316 reg_val &= ~DPLL_VCO_ENABLE;
317 REG_WRITE(dpll_reg, reg_val);
318 REG_READ(dpll_reg);
319 udelay(500);
320 }
321
322 gma_power_end(dev);
323 dbi_output->mode_flags |= MODE_SETTING_IN_DSR;
324 if (pipe == 2) {
325 enter_dsr = 1;
326 /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
327 }
328}
329
330static void mdfld_dbi_output_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
331 int pipe)
332{
333 struct drm_device *dev = dbi_output->dev;
334 struct drm_crtc *crtc = dbi_output->base.base.crtc;
335 struct psb_intel_crtc *psb_crtc = (crtc) ?
336 to_psb_intel_crtc(crtc) : NULL;
337 u32 reg_val;
338 u32 dpll_reg = MRST_DPLL_A;
339 u32 pipeconf_reg = PIPEACONF;
340 u32 dspcntr_reg = DSPACNTR;
341 u32 reg_offset = 0;
342
343 /*if mode setting on-going, back off*/
344 if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
345 (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
346 return;
347
348 if (pipe == 2) {
349 dpll_reg = MRST_DPLL_A;
350 pipeconf_reg = PIPECCONF;
351 dspcntr_reg = DSPCCNTR;
352 reg_offset = MIPIC_REG_OFFSET;
353 }
354
355 if (!gma_power_begin(dev, true)) {
356 dev_err(dev->dev, "hw begin failed\n");
357 return;
358 }
359
360 /* Enable DPLL */
361 reg_val = REG_READ(dpll_reg);
362 if (!(reg_val & DPLL_VCO_ENABLE)) {
363 if (reg_val & MDFLD_PWR_GATE_EN) {
364 reg_val &= ~MDFLD_PWR_GATE_EN;
365 REG_WRITE(dpll_reg, reg_val);
366 REG_READ(dpll_reg);
367 udelay(500);
368 }
369
370 reg_val |= DPLL_VCO_ENABLE;
371 REG_WRITE(dpll_reg, reg_val);
372 REG_READ(dpll_reg);
373 udelay(500);
374
375 /* Add timeout */
376 while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
377 cpu_relax();
378 }
379
380 /* Enable pipe */
381 reg_val = REG_READ(pipeconf_reg);
382 if (!(reg_val & PIPEACONF_ENABLE)) {
383 reg_val |= PIPEACONF_ENABLE;
384 REG_WRITE(pipeconf_reg, reg_val);
385 REG_READ(pipeconf_reg);
386 udelay(500);
387 mdfldWaitForPipeEnable(dev, pipe);
388 }
389
390 /* Enable plane */
391 reg_val = REG_READ(dspcntr_reg);
392 if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
393 reg_val |= DISPLAY_PLANE_ENABLE;
394 REG_WRITE(dspcntr_reg, reg_val);
395 REG_READ(dspcntr_reg);
396 udelay(500);
397 }
398
399 /* Enable TE interrupt on this pipe */
400 mdfld_enable_te(dev, pipe);
401 gma_power_end(dev);
402
403 /*clean IN_DSR flag*/
404 dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
405}
406
407/*
408 * Exit from DSR
409 */
410void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src)
411{
412 struct drm_psb_private *dev_priv = dev->dev_private;
413 struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
414 struct mdfld_dsi_dbi_output **dbi_output;
415 int i;
416 int pipe;
417
418 /* FIXME can go ? */
419 dev_priv->is_in_idle = false;
420 dbi_output = dsr_info->dbi_outputs;
421
422#ifdef CONFIG_PM_RUNTIME
423 if (!enable_gfx_rtpm) {
424/* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
425/* schedule_delayed_work(&rtpm_work, 30 * 1000);*/ /* FIXME: HZ ? */
426 }
427#endif
428
429 /* For each output, exit dsr */
430 for (i = 0; i < dsr_info->dbi_output_num; i++) {
431 /* If panel has been turned off, skip */
432 if (!dbi_output[i] || !dbi_output[i]->dbi_panel_on)
433 continue;
434 pipe = dbi_output[i]->channel_num ? 2 : 0;
435 enter_dsr = 0;
436 mdfld_dbi_output_exit_dsr(dbi_output[i], pipe);
437 }
438 dev_priv->dsr_fb_update |= update_src;
439}
440
441static bool mdfld_dbi_is_in_dsr(struct drm_device *dev)
442{
443 if (REG_READ(MRST_DPLL_A) & DPLL_VCO_ENABLE)
444 return false;
445 if ((REG_READ(PIPEACONF) & PIPEACONF_ENABLE) ||
446 (REG_READ(PIPECCONF) & PIPEACONF_ENABLE))
447 return false;
448 if ((REG_READ(DSPACNTR) & DISPLAY_PLANE_ENABLE) ||
449 (REG_READ(DSPCCNTR) & DISPLAY_PLANE_ENABLE))
450 return false;
451
452 return true;
453}
454
455/* Periodically update dbi panel */
456void mdfld_dbi_update_panel(struct drm_device *dev, int pipe)
457{
458 struct drm_psb_private *dev_priv = dev->dev_private;
459 struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
460 struct mdfld_dsi_dbi_output **dbi_outputs;
461 struct mdfld_dsi_dbi_output *dbi_output;
462 int i;
463 int can_enter_dsr = 0;
464 u32 damage_mask;
465
466 dbi_outputs = dsr_info->dbi_outputs;
467 dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
468
469 if (!dbi_output)
470 return;
471
472 if (pipe == 0)
473 damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_0;
474 else if (pipe == 2)
475 damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_2;
476 else
477 return;
478
479 /* If FB is damaged and panel is on update on-panel FB */
480 if (damage_mask && dbi_output->dbi_panel_on) {
481 dbi_output->dsr_fb_update_done = false;
482
483 if (dbi_output->p_funcs->update_fb)
484 dbi_output->p_funcs->update_fb(dbi_output, pipe);
485
486 if (dev_priv->dsr_enable && dbi_output->dsr_fb_update_done)
487 dev_priv->dsr_fb_update &= ~damage_mask;
488
489 /*clean IN_DSR flag*/
490 dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
491
492 dbi_output->dsr_idle_count = 0;
493 } else {
494 dbi_output->dsr_idle_count++;
495 }
496
497 switch (dsr_info->dbi_output_num) {
498 case 1:
499 if (dbi_output->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
500 can_enter_dsr = 1;
501 break;
502 case 2:
503 if (dbi_outputs[0]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT
504 && dbi_outputs[1]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
505 can_enter_dsr = 1;
506 break;
507 default:
508 DRM_ERROR("Wrong DBI output number\n");
509 }
510
511 /* Try to enter DSR */
512 if (can_enter_dsr) {
513 for (i = 0; i < dsr_info->dbi_output_num; i++) {
514 if (!mdfld_dbi_is_in_dsr(dev) && dbi_outputs[i] &&
515 !(dbi_outputs[i]->mode_flags & MODE_SETTING_ON_GOING)) {
516 mdfld_dsi_dbi_enter_dsr(dbi_outputs[i],
517 dbi_outputs[i]->channel_num ? 2 : 0);
518#if 0
519 enter_dsr = 1;
520 pr_err("%s: enter_dsr = 1\n", __func__);
521#endif
522 }
523 }
524 /*schedule rpm suspend after gfxrtdelay*/
525#ifdef CONFIG_GFX_RTPM
526 if (!dev_priv->rpm_enabled
527 || !enter_dsr
528 /* || (REG_READ(HDMIB_CONTROL) & HDMIB_PORT_EN) */
529 || pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay))
530 dev_warn(dev->dev,
531 "Runtime PM schedule suspend failed, rpm %d\n",
532 dev_priv->rpm_enabled);
533#endif
534 }
535}
536
537int mdfld_dbi_dsr_init(struct drm_device *dev)
538{
539 struct drm_psb_private *dev_priv = dev->dev_private;
540 struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
541
542 if (!dsr_info || IS_ERR(dsr_info)) {
543 dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info),
544 GFP_KERNEL);
545 if (!dsr_info) {
546 dev_err(dev->dev, "No memory\n");
547 return -ENOMEM;
548 }
549 dev_priv->dbi_dsr_info = dsr_info;
550 }
551 return 0;
552}
553
554void mdfld_dbi_dsr_exit(struct drm_device *dev)
555{
556 struct drm_psb_private *dev_priv = dev->dev_private;
557 struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
558
559 if (dsr_info) {
560 kfree(dsr_info);
561 dev_priv->dbi_dsr_info = NULL;
562 }
563}
564
565void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
566 int pipe)
567{
568 struct drm_device *dev = dsi_config->dev;
569 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
570 int lane_count = dsi_config->lane_count;
571 u32 val = 0;
572
573 dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
574
575 /* Un-ready device */
576 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
577
578 /* Init dsi adapter before kicking off */
579 REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
580
581 /* TODO: figure out how to setup these registers */
582 REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
583 REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
584 0x000a0014);
585 REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
586 REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
587 REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
588
589 /* Enable all interrupts */
590 REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
591 /* Max value: 20 clock cycles of txclkesc */
592 REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
593 /* Min 21 txclkesc, max: ffffh */
594 REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
595 /* Min: 7d0 max: 4e20 */
596 REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
597
598 /* Set up func_prg */
599 val |= lane_count;
600 val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
601 val |= DSI_DBI_COLOR_FORMAT_OPTION2;
602 REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
603
604 REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
605 REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
606
607 /* De-assert dbi_stall when half of DBI FIFO is empty */
608 /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
609
610 REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
611 REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
612 REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
613 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
614}
615
616#if 0
617/*DBI encoder helper funcs*/
618static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
619 .dpms = mdfld_dsi_dbi_dpms,
620 .mode_fixup = mdfld_dsi_dbi_mode_fixup,
621 .prepare = mdfld_dsi_dbi_prepare,
622 .mode_set = mdfld_dsi_dbi_mode_set,
623 .commit = mdfld_dsi_dbi_commit,
624};
625
626/*DBI encoder funcs*/
627static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
628 .destroy = drm_encoder_cleanup,
629};
630
631#endif
632
633/*
634 * Init DSI DBI encoder.
635 * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
636 * return pointer of newly allocated DBI encoder, NULL on error
637 */
638struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
639 struct mdfld_dsi_connector *dsi_connector,
640 struct panel_funcs *p_funcs)
641{
642 struct drm_psb_private *dev_priv = dev->dev_private;
643 struct mdfld_dsi_dbi_output *dbi_output = NULL;
644 struct mdfld_dsi_config *dsi_config;
645 struct drm_connector *connector = NULL;
646 struct drm_encoder *encoder = NULL;
647 struct drm_display_mode *fixed_mode = NULL;
648 struct psb_gtt *pg = dev_priv ? (&dev_priv->gtt) : NULL;
649 struct mdfld_dbi_dpu_info *dpu_info = dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
650 struct mdfld_dbi_dsr_info *dsr_info = dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
651 u32 data = 0;
652 int pipe;
653 int ret;
654
655 if (!pg || !dsi_connector || !p_funcs) {
656 WARN_ON(1);
657 return NULL;
658 }
659
660 dsi_config = mdfld_dsi_get_config(dsi_connector);
661 pipe = dsi_connector->pipe;
662
663 /*panel hard-reset*/
664 if (p_funcs->reset) {
665 ret = p_funcs->reset(pipe);
666 if (ret) {
667 DRM_ERROR("Panel %d hard-reset failed\n", pipe);
668 return NULL;
669 }
670 }
671 /* Panel drvIC init */
672 if (p_funcs->drv_ic_init)
673 p_funcs->drv_ic_init(dsi_config, pipe);
674
675 /* Panel power mode detect */
676 ret = mdfld_dsi_get_power_mode(dsi_config,
677 &data,
678 MDFLD_DSI_HS_TRANSMISSION);
679 if (ret) {
680 DRM_ERROR("Panel %d get power mode failed\n", pipe);
681 dsi_connector->status = connector_status_disconnected;
682 } else {
683 DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
684 dsi_connector->status = connector_status_connected;
685 }
686
687 /*TODO: get panel info from DDB*/
688
689 dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
690 if (!dbi_output) {
691 dev_err(dev->dev, "No memory\n");
692 return NULL;
693 }
694
695 if (dsi_connector->pipe == 0) {
696 dbi_output->channel_num = 0;
697 dev_priv->dbi_output = dbi_output;
698 } else if (dsi_connector->pipe == 2) {
699 dbi_output->channel_num = 1;
700 dev_priv->dbi_output2 = dbi_output;
701 } else {
702 dev_err(dev->dev, "only support 2 DSI outputs\n");
703 goto out_err1;
704 }
705
706 dbi_output->dev = dev;
707 dbi_output->p_funcs = p_funcs;
708 fixed_mode = dsi_config->fixed_mode;
709 dbi_output->panel_fixed_mode = fixed_mode;
710
711 /* Create drm encoder object */
712 connector = &dsi_connector->base.base;
713 encoder = &dbi_output->base.base;
714 /* Review this if we ever get MIPI-HDMI bridges or similar */
715 drm_encoder_init(dev,
716 encoder,
717 p_funcs->encoder_funcs,
718 DRM_MODE_ENCODER_LVDS);
719 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
720
721 /* Attach to given connector */
722 drm_mode_connector_attach_encoder(connector, encoder);
723
724 /* Set possible CRTCs and clones */
725 if (dsi_connector->pipe) {
726 encoder->possible_crtcs = (1 << 2);
727 encoder->possible_clones = (1 << 1);
728 } else {
729 encoder->possible_crtcs = (1 << 0);
730 encoder->possible_clones = (1 << 0);
731 }
732
733 dev_priv->dsr_fb_update = 0;
734 dev_priv->dsr_enable = false;
735 dev_priv->exit_idle = mdfld_dsi_dbi_exit_dsr;
736
737 dbi_output->first_boot = true;
738 dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
739
740 /* Add this output to dpu_info if in DPU mode */
741 if (dpu_info && dsi_connector->status == connector_status_connected) {
742 if (dsi_connector->pipe == 0)
743 dpu_info->dbi_outputs[0] = dbi_output;
744 else
745 dpu_info->dbi_outputs[1] = dbi_output;
746
747 dpu_info->dbi_output_num++;
748 } else if (dsi_connector->status == connector_status_connected) {
749 /* Add this output to dsr_info if not */
750 if (dsi_connector->pipe == 0)
751 dsr_info->dbi_outputs[0] = dbi_output;
752 else
753 dsr_info->dbi_outputs[1] = dbi_output;
754
755 dsr_info->dbi_output_num++;
756 }
757 return &dbi_output->base;
758out_err1:
759 kfree(dbi_output);
760 return NULL;
761}
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
deleted file mode 100644
index f0fa986fd934..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#ifndef __MDFLD_DSI_DBI_H__
29#define __MDFLD_DSI_DBI_H__
30
31#include <linux/backlight.h>
32#include <drm/drmP.h>
33#include <drm/drm.h>
34#include <drm/drm_crtc.h>
35#include <drm/drm_edid.h>
36
37#include "psb_drv.h"
38#include "psb_intel_drv.h"
39#include "psb_intel_reg.h"
40#include "power.h"
41
42#include "mdfld_dsi_output.h"
43#include "mdfld_output.h"
44
45/*
46 * DBI encoder which inherits from mdfld_dsi_encoder
47 */
48struct mdfld_dsi_dbi_output {
49 struct mdfld_dsi_encoder base;
50 struct drm_display_mode *panel_fixed_mode;
51 u8 last_cmd;
52 u8 lane_count;
53 u8 channel_num;
54 struct drm_device *dev;
55
56 /* Backlight operations */
57
58 /* DSR timer */
59 u32 dsr_idle_count;
60 bool dsr_fb_update_done;
61
62 /* Mode setting flags */
63 u32 mode_flags;
64
65 /* Panel status */
66 bool dbi_panel_on;
67 bool first_boot;
68 struct panel_funcs *p_funcs;
69
70 /* DPU */
71 u32 *dbi_cb_addr;
72 u32 dbi_cb_phy;
73 spinlock_t cb_lock;
74 u32 cb_write;
75};
76
77#define MDFLD_DSI_DBI_OUTPUT(dsi_encoder) \
78 container_of(dsi_encoder, struct mdfld_dsi_dbi_output, base)
79
80struct mdfld_dbi_dsr_info {
81 int dbi_output_num;
82 struct mdfld_dsi_dbi_output *dbi_outputs[2];
83
84 u32 dsr_idle_count;
85};
86
87#define DBI_CB_TIMEOUT_COUNT 0xffff
88
89/* Offsets */
90#define CMD_MEM_ADDR_OFFSET 0
91
92#define CMD_DATA_SRC_SYSTEM_MEM 0
93#define CMD_DATA_SRC_PIPE 1
94
95static inline int mdfld_dsi_dbi_fifo_ready(struct mdfld_dsi_dbi_output *dbi_output)
96{
97 struct drm_device *dev = dbi_output->dev;
98 u32 retry = DBI_CB_TIMEOUT_COUNT;
99 int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
100 int ret = 0;
101
102 /* Query the dbi fifo status*/
103 while (retry--) {
104 if (REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset) & (1 << 27))
105 break;
106 }
107
108 if (!retry) {
109 DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
110 ret = -EAGAIN;
111 }
112 return ret;
113}
114
115static inline int mdfld_dsi_dbi_cmd_sent(struct mdfld_dsi_dbi_output *dbi_output)
116{
117 struct drm_device *dev = dbi_output->dev;
118 u32 retry = DBI_CB_TIMEOUT_COUNT;
119 int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
120 int ret = 0;
121
122 /* Query the command execution status */
123 while (retry--)
124 if (!(REG_READ(MIPIA_CMD_ADD_REG + reg_offset) & (1 << 0)))
125 break;
126
127 if (!retry) {
128 DRM_ERROR("Timeout waiting for DBI command status\n");
129 ret = -EAGAIN;
130 }
131
132 return ret;
133}
134
135static inline int mdfld_dsi_dbi_cb_ready(struct mdfld_dsi_dbi_output *dbi_output)
136{
137 int ret = 0;
138
139 /* Query the command execution status*/
140 ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
141 if (ret) {
142 DRM_ERROR("Peripheral is busy\n");
143 ret = -EAGAIN;
144 }
145 /* Query the dbi fifo status*/
146 ret = mdfld_dsi_dbi_fifo_ready(dbi_output);
147 if (ret) {
148 DRM_ERROR("DBI FIFO is not empty\n");
149 ret = -EAGAIN;
150 }
151 return ret;
152}
153
154extern void mdfld_dsi_dbi_output_init(struct drm_device *dev,
155 struct psb_intel_mode_device *mode_dev, int pipe);
156extern void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src);
157extern void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
158 int pipe);
159extern int mdfld_dbi_dsr_init(struct drm_device *dev);
160extern void mdfld_dbi_dsr_exit(struct drm_device *dev);
161extern struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
162 struct mdfld_dsi_connector *dsi_connector,
163 struct panel_funcs *p_funcs);
164extern int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
165 u8 dcs, u8 *param, u32 num, u8 data_src);
166extern int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
167 u16 x1, u16 y1, u16 x2, u16 y2);
168extern int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
169 int mode);
170extern void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
171 int pipe);
172
173#endif /*__MDFLD_DSI_DBI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
deleted file mode 100644
index a4e2ff442b1f..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
+++ /dev/null
@@ -1,778 +0,0 @@
1/*
2 * Copyright © 2010-2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jim Liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#include "mdfld_dsi_dbi_dpu.h"
29#include "mdfld_dsi_dbi.h"
30
31/*
32 * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
33 */
34
35static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info *dpu_info,
36 mdfld_plane_t plane,
37 struct psb_drm_dpu_rect *damaged_rect)
38{
39 int x, y;
40 int new_x, new_y;
41 struct psb_drm_dpu_rect *rect;
42 struct psb_drm_dpu_rect *pipe_rect;
43 int cursor_size;
44 struct mdfld_cursor_info *cursor;
45 mdfld_plane_t fb_plane;
46
47 if (plane == MDFLD_CURSORA) {
48 cursor = &dpu_info->cursors[0];
49 x = dpu_info->cursors[0].x;
50 y = dpu_info->cursors[0].y;
51 cursor_size = dpu_info->cursors[0].size;
52 pipe_rect = &dpu_info->damage_pipea;
53 fb_plane = MDFLD_PLANEA;
54 } else {
55 cursor = &dpu_info->cursors[1];
56 x = dpu_info->cursors[1].x;
57 y = dpu_info->cursors[1].y;
58 cursor_size = dpu_info->cursors[1].size;
59 pipe_rect = &dpu_info->damage_pipec;
60 fb_plane = MDFLD_PLANEC;
61 }
62 new_x = damaged_rect->x;
63 new_y = damaged_rect->y;
64
65 if (x == new_x && y == new_y)
66 return 0;
67
68 rect = &dpu_info->damaged_rects[plane];
69 /* Move to right */
70 if (new_x >= x) {
71 if (new_y > y) {
72 rect->x = x;
73 rect->y = y;
74 rect->width = (new_x + cursor_size) - x;
75 rect->height = (new_y + cursor_size) - y;
76 goto cursor_out;
77 } else {
78 rect->x = x;
79 rect->y = new_y;
80 rect->width = (new_x + cursor_size) - x;
81 rect->height = (y - new_y);
82 goto cursor_out;
83 }
84 } else {
85 if (new_y > y) {
86 rect->x = new_x;
87 rect->y = y;
88 rect->width = (x + cursor_size) - new_x;
89 rect->height = new_y - y;
90 goto cursor_out;
91 } else {
92 rect->x = new_x;
93 rect->y = new_y;
94 rect->width = (x + cursor_size) - new_x;
95 rect->height = (y + cursor_size) - new_y;
96 }
97 }
98cursor_out:
99 if (new_x < 0)
100 cursor->x = 0;
101 else if (new_x > 864)
102 cursor->x = 864;
103 else
104 cursor->x = new_x;
105
106 if (new_y < 0)
107 cursor->y = 0;
108 else if (new_y > 480)
109 cursor->y = 480;
110 else
111 cursor->y = new_y;
112
113 /*
114 * FIXME: this is a workaround for cursor plane update,
115 * remove it later!
116 */
117 rect->x = 0;
118 rect->y = 0;
119 rect->width = 864;
120 rect->height = 480;
121
122 mdfld_check_boundary(dpu_info, rect);
123 mdfld_dpu_region_extent(pipe_rect, rect);
124
125 /* Update pending status of dpu_info */
126 dpu_info->pending |= (1 << plane);
127 /* Update fb panel as well */
128 dpu_info->pending |= (1 << fb_plane);
129 return 0;
130}
131
132static int mdfld_fb_damage(struct mdfld_dbi_dpu_info *dpu_info,
133 mdfld_plane_t plane,
134 struct psb_drm_dpu_rect *damaged_rect)
135{
136 struct psb_drm_dpu_rect *rect;
137
138 if (plane == MDFLD_PLANEA)
139 rect = &dpu_info->damage_pipea;
140 else
141 rect = &dpu_info->damage_pipec;
142
143 mdfld_check_boundary(dpu_info, damaged_rect);
144
145 /* Add fb damage area to this pipe */
146 mdfld_dpu_region_extent(rect, damaged_rect);
147
148 /* Update pending status of dpu_info */
149 dpu_info->pending |= (1 << plane);
150 return 0;
151}
152
153/* Do nothing here, right now */
154static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info *dpu_info,
155 mdfld_plane_t plane,
156 struct psb_drm_dpu_rect *damaged_rect)
157{
158 return 0;
159}
160
161int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
162 mdfld_plane_t plane,
163 struct psb_drm_dpu_rect *rect)
164{
165 struct drm_psb_private *dev_priv = dev->dev_private;
166 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
167 int ret = 0;
168
169 /* DPU not in use, no damage reporting needed */
170 if (dpu_info == NULL)
171 return 0;
172
173 spin_lock(&dpu_info->dpu_update_lock);
174
175 switch (plane) {
176 case MDFLD_PLANEA:
177 case MDFLD_PLANEC:
178 mdfld_fb_damage(dpu_info, plane, rect);
179 break;
180 case MDFLD_CURSORA:
181 case MDFLD_CURSORC:
182 mdfld_cursor_damage(dpu_info, plane, rect);
183 break;
184 case MDFLD_OVERLAYA:
185 case MDFLD_OVERLAYC:
186 mdfld_overlay_damage(dpu_info, plane, rect);
187 break;
188 default:
189 DRM_ERROR("Invalid plane type %d\n", plane);
190 ret = -EINVAL;
191 }
192 spin_unlock(&dpu_info->dpu_update_lock);
193 return ret;
194}
195
196int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev)
197{
198 struct drm_psb_private *dev_priv;
199 struct mdfld_dbi_dpu_info *dpu_info;
200 struct mdfld_dsi_config *dsi_config;
201 struct psb_drm_dpu_rect rect;
202 int i;
203
204 if (!dev) {
205 DRM_ERROR("Invalid parameter\n");
206 return -EINVAL;
207 }
208
209 dev_priv = dev->dev_private;
210 dpu_info = dev_priv->dbi_dpu_info;
211
212 /* This is fine - we may be in non DPU mode */
213 if (!dpu_info)
214 return -EINVAL;
215
216 for (i = 0; i < dpu_info->dbi_output_num; i++) {
217 dsi_config = dev_priv->dsi_configs[i];
218 if (dsi_config) {
219 rect.x = rect.y = 0;
220 rect.width = dsi_config->fixed_mode->hdisplay;
221 rect.height = dsi_config->fixed_mode->vdisplay;
222 mdfld_dbi_dpu_report_damage(dev,
223 i ? (MDFLD_PLANEC) : (MDFLD_PLANEA),
224 &rect);
225 }
226 }
227 /* Exit DSR state */
228 mdfld_dpu_exit_dsr(dev);
229 return 0;
230}
231
232int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
233 struct psb_drm_dpu_rect *rect)
234{
235 struct drm_psb_private *dev_priv = dev->dev_private;
236 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
237
238 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
239
240 /* If dual display mode */
241 if (dpu_info->dbi_output_num == 2)
242 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
243
244 /* Force dsi to exit DSR mode */
245 mdfld_dpu_exit_dsr(dev);
246 return 0;
247}
248
249static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
250 mdfld_plane_t plane)
251{
252 struct drm_device *dev = dpu_info->dev;
253 u32 curpos_reg = CURAPOS;
254 u32 curbase_reg = CURABASE;
255 u32 curcntr_reg = CURACNTR;
256 struct mdfld_cursor_info *cursor = &dpu_info->cursors[0];
257
258 if (plane == MDFLD_CURSORC) {
259 curpos_reg = CURCPOS;
260 curbase_reg = CURCBASE;
261 curcntr_reg = CURCCNTR;
262 cursor = &dpu_info->cursors[1];
263 }
264
265 REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
266 REG_WRITE(curpos_reg,
267 (((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
268 ((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
269 REG_WRITE(curbase_reg, REG_READ(curbase_reg));
270}
271
272static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
273 mdfld_plane_t plane)
274{
275 u32 pipesrc_reg = PIPEASRC;
276 u32 dspsize_reg = DSPASIZE;
277 u32 dspoff_reg = DSPALINOFF;
278 u32 dspsurf_reg = DSPASURF;
279 u32 dspstride_reg = DSPASTRIDE;
280 u32 stride;
281 struct psb_drm_dpu_rect *rect = &dpu_info->damage_pipea;
282 struct drm_device *dev = dpu_info->dev;
283
284 if (plane == MDFLD_PLANEC) {
285 pipesrc_reg = PIPECSRC;
286 dspsize_reg = DSPCSIZE;
287 dspoff_reg = DSPCLINOFF;
288 dspsurf_reg = DSPCSURF;
289 dspstride_reg = DSPCSTRIDE;
290 rect = &dpu_info->damage_pipec;
291 }
292
293 stride = REG_READ(dspstride_reg);
294 /* FIXME: should I do the pipe src update here? */
295 REG_WRITE(pipesrc_reg, ((rect->width - 1) << 16) | (rect->height - 1));
296 /* Flush plane */
297 REG_WRITE(dspsize_reg, ((rect->height - 1) << 16) | (rect->width - 1));
298 REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
299 REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
300
301 /*
302 * TODO: wait for flip finished and restore the pipesrc reg,
303 * or cursor will be show at a wrong position
304 */
305}
306
307static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
308 mdfld_plane_t plane)
309{
310}
311
312/*
313 * TODO: we are still in dbi normal mode now, we will try to use partial
314 * mode later.
315 */
316static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output *dbi_output,
317 struct mdfld_dbi_dpu_info *dpu_info, int pipe)
318{
319 u8 *cb_addr = (u8 *)dbi_output->dbi_cb_addr;
320 u32 *index;
321 struct psb_drm_dpu_rect *rect = pipe ?
322 (&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
323
324 /* FIXME: lock command buffer, this may lead to a deadlock,
325 as we already hold the dpu_update_lock */
326 if (!spin_trylock(&dbi_output->cb_lock)) {
327 DRM_ERROR("lock command buffer failed, try again\n");
328 return -EAGAIN;
329 }
330
331 index = &dbi_output->cb_write;
332
333 if (*index) {
334 DRM_ERROR("DBI command buffer unclean\n");
335 return -EAGAIN;
336 }
337
338 /* Column address */
339 *(cb_addr + ((*index)++)) = set_column_address;
340 *(cb_addr + ((*index)++)) = rect->x >> 8;
341 *(cb_addr + ((*index)++)) = rect->x;
342 *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1) >> 8;
343 *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1);
344
345 *index = 8;
346
347 /* Page address */
348 *(cb_addr + ((*index)++)) = set_page_addr;
349 *(cb_addr + ((*index)++)) = rect->y >> 8;
350 *(cb_addr + ((*index)++)) = rect->y;
351 *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
352 *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1);
353
354 *index = 16;
355
356 /*write memory*/
357 *(cb_addr + ((*index)++)) = write_mem_start;
358
359 return 0;
360}
361
362static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
363{
364 u32 cmd_phy = dbi_output->dbi_cb_phy;
365 u32 *index = &dbi_output->cb_write;
366 int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
367 struct drm_device *dev = dbi_output->dev;
368
369 if (*index == 0 || !dbi_output)
370 return 0;
371
372 REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
373 REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | 3);
374
375 *index = 0;
376
377 /* FIXME: unlock command buffer */
378 spin_unlock(&dbi_output->cb_lock);
379 return 0;
380}
381
382static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output *dbi_output,
383 struct mdfld_dbi_dpu_info *dpu_info, int pipe)
384{
385 struct drm_device *dev = dbi_output->dev;
386 struct drm_psb_private *dev_priv = dev->dev_private;
387 mdfld_plane_t cursor_plane = MDFLD_CURSORA;
388 mdfld_plane_t fb_plane = MDFLD_PLANEA;
389 mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
390 int ret = 0;
391 u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
392
393 /* Damaged rects on this pipe */
394 if (pipe) {
395 cursor_plane = MDFLD_CURSORC;
396 fb_plane = MDFLD_PLANEC;
397 overlay_plane = MDFLD_OVERLAYC;
398 plane_mask = MDFLD_PIPEC_PLANE_MASK;
399 }
400
401 /*update cursor which assigned to @pipe*/
402 if (dpu_info->pending & (1 << cursor_plane))
403 mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
404
405 /*update fb which assigned to @pipe*/
406 if (dpu_info->pending & (1 << fb_plane))
407 mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
408
409 /* TODO: update overlay */
410 if (dpu_info->pending & (1 << overlay_plane))
411 mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
412
413 /* Flush damage area to panel fb */
414 if (dpu_info->pending & plane_mask) {
415 ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
416 /*
417 * TODO: remove b_dsr_enable later,
418 * added it so that text console could boot smoothly
419 */
420 /* Clean pending flags on this pipe */
421 if (!ret && dev_priv->dsr_enable) {
422 dpu_info->pending &= ~plane_mask;
423 /* Reset overlay pipe damage rect */
424 mdfld_dpu_init_damage(dpu_info, pipe);
425 }
426 }
427 return ret;
428}
429
430static int mdfld_dpu_update_fb(struct drm_device *dev)
431{
432 struct drm_crtc *crtc;
433 struct psb_intel_crtc *psb_crtc;
434 struct mdfld_dsi_dbi_output **dbi_output;
435 struct drm_psb_private *dev_priv = dev->dev_private;
436 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
437 bool pipe_updated[2];
438 unsigned long irq_flags;
439 u32 dpll_reg = MRST_DPLL_A;
440 u32 dspcntr_reg = DSPACNTR;
441 u32 pipeconf_reg = PIPEACONF;
442 u32 dsplinoff_reg = DSPALINOFF;
443 u32 dspsurf_reg = DSPASURF;
444 u32 mipi_state_reg = MIPIA_INTR_STAT_REG;
445 u32 reg_offset = 0;
446 int pipe;
447 int i;
448 int ret;
449
450 dbi_output = dpu_info->dbi_outputs;
451 pipe_updated[0] = pipe_updated[1] = false;
452
453 if (!gma_power_begin(dev, true))
454 return -EAGAIN;
455
456 /* Try to prevent any new damage reports */
457 if (!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags))
458 return -EAGAIN;
459
460 for (i = 0; i < dpu_info->dbi_output_num; i++) {
461 crtc = dbi_output[i]->base.base.crtc;
462 psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
463
464 pipe = dbi_output[i]->channel_num ? 2 : 0;
465
466 if (pipe == 2) {
467 dspcntr_reg = DSPCCNTR;
468 pipeconf_reg = PIPECCONF;
469 dsplinoff_reg = DSPCLINOFF;
470 dspsurf_reg = DSPCSURF;
471 reg_offset = MIPIC_REG_OFFSET;
472 }
473
474 if (!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset))
475 & (1 << 27)) ||
476 !(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
477 !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
478 !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
479 dev_err(dev->dev,
480 "DBI FIFO is busy, DSI %d state %x\n",
481 pipe,
482 REG_READ(mipi_state_reg + reg_offset));
483 continue;
484 }
485
486 /*
487 * If DBI output is in a exclusive state then the pipe
488 * change won't be updated
489 */
490 if (dbi_output[i]->dbi_panel_on &&
491 !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) &&
492 !(psb_crtc &&
493 psb_crtc->mode_flags & MODE_SETTING_ON_GOING) &&
494 !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
495 ret = mdfld_dpu_update_pipe(dbi_output[i],
496 dpu_info, dbi_output[i]->channel_num ? 2 : 0);
497 if (!ret)
498 pipe_updated[i] = true;
499 }
500 }
501
502 for (i = 0; i < dpu_info->dbi_output_num; i++)
503 if (pipe_updated[i])
504 mdfld_dbi_flush_cb(dbi_output[i],
505 dbi_output[i]->channel_num ? 2 : 0);
506
507 spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
508 gma_power_end(dev);
509 return 0;
510}
511
512static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
513 int pipe)
514{
515 struct drm_device *dev = dbi_output->dev;
516 struct drm_crtc *crtc = dbi_output->base.base.crtc;
517 struct psb_intel_crtc *psb_crtc = (crtc) ? to_psb_intel_crtc(crtc)
518 : NULL;
519 u32 reg_val;
520 u32 dpll_reg = MRST_DPLL_A;
521 u32 pipeconf_reg = PIPEACONF;
522 u32 dspcntr_reg = DSPACNTR;
523 u32 dspbase_reg = DSPABASE;
524 u32 dspsurf_reg = DSPASURF;
525 u32 reg_offset = 0;
526
527 if (!dbi_output)
528 return 0;
529
530 /* If mode setting on-going, back off */
531 if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
532 (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
533 return -EAGAIN;
534
535 if (pipe == 2) {
536 dpll_reg = MRST_DPLL_A;
537 pipeconf_reg = PIPECCONF;
538 dspcntr_reg = DSPCCNTR;
539 dspbase_reg = MDFLD_DSPCBASE;
540 dspsurf_reg = DSPCSURF;
541
542 reg_offset = MIPIC_REG_OFFSET;
543 }
544
545 if (!gma_power_begin(dev, true))
546 return -EAGAIN;
547
548 /* Enable DPLL */
549 reg_val = REG_READ(dpll_reg);
550 if (!(reg_val & DPLL_VCO_ENABLE)) {
551
552 if (reg_val & MDFLD_PWR_GATE_EN) {
553 reg_val &= ~MDFLD_PWR_GATE_EN;
554 REG_WRITE(dpll_reg, reg_val);
555 REG_READ(dpll_reg);
556 udelay(500);
557 }
558
559 reg_val |= DPLL_VCO_ENABLE;
560 REG_WRITE(dpll_reg, reg_val);
561 REG_READ(dpll_reg);
562 udelay(500);
563
564 /* FIXME: add timeout */
565 while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
566 cpu_relax();
567 }
568
569 /* Enable pipe */
570 reg_val = REG_READ(pipeconf_reg);
571 if (!(reg_val & PIPEACONF_ENABLE)) {
572 reg_val |= PIPEACONF_ENABLE;
573 REG_WRITE(pipeconf_reg, reg_val);
574 REG_READ(pipeconf_reg);
575 udelay(500);
576 mdfldWaitForPipeEnable(dev, pipe);
577 }
578
579 /* Enable plane */
580 reg_val = REG_READ(dspcntr_reg);
581 if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
582 reg_val |= DISPLAY_PLANE_ENABLE;
583 REG_WRITE(dspcntr_reg, reg_val);
584 REG_READ(dspcntr_reg);
585 udelay(500);
586 }
587
588 gma_power_end(dev);
589
590 /* Clean IN_DSR flag */
591 dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
592
593 return 0;
594}
595
596int mdfld_dpu_exit_dsr(struct drm_device *dev)
597{
598 struct mdfld_dsi_dbi_output **dbi_output;
599 struct drm_psb_private *dev_priv = dev->dev_private;
600 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
601 int i;
602 int pipe;
603
604 dbi_output = dpu_info->dbi_outputs;
605
606 for (i = 0; i < dpu_info->dbi_output_num; i++) {
607 /* If this output is not in DSR mode, don't call exit dsr */
608 if (dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)
609 __mdfld_dbi_exit_dsr(dbi_output[i],
610 dbi_output[i]->channel_num ? 2 : 0);
611 }
612
613 /* Enable TE interrupt */
614 for (i = 0; i < dpu_info->dbi_output_num; i++) {
615 /* If this output is not in DSR mode, don't call exit dsr */
616 pipe = dbi_output[i]->channel_num ? 2 : 0;
617 if (dbi_output[i]->dbi_panel_on && pipe) {
618 mdfld_disable_te(dev, 0);
619 mdfld_enable_te(dev, 2);
620 } else if (dbi_output[i]->dbi_panel_on && !pipe) {
621 mdfld_disable_te(dev, 2);
622 mdfld_enable_te(dev, 0);
623 }
624 }
625 return 0;
626}
627
628static int mdfld_dpu_enter_dsr(struct drm_device *dev)
629{
630 struct drm_psb_private *dev_priv = dev->dev_private;
631 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
632 struct mdfld_dsi_dbi_output **dbi_output;
633 int i;
634
635 dbi_output = dpu_info->dbi_outputs;
636
637 for (i = 0; i < dpu_info->dbi_output_num; i++) {
638 /* If output is off or already in DSR state, don't re-enter */
639 if (dbi_output[i]->dbi_panel_on &&
640 !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
641 mdfld_dsi_dbi_enter_dsr(dbi_output[i],
642 dbi_output[i]->channel_num ? 2 : 0);
643 }
644 }
645
646 return 0;
647}
648
649static void mdfld_dbi_dpu_timer_func(unsigned long data)
650{
651 struct drm_device *dev = (struct drm_device *)data;
652 struct drm_psb_private *dev_priv = dev->dev_private;
653 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
654 struct timer_list *dpu_timer = &dpu_info->dpu_timer;
655 unsigned long flags;
656
657 if (dpu_info->pending) {
658 dpu_info->idle_count = 0;
659 /* Update panel fb with damaged area */
660 mdfld_dpu_update_fb(dev);
661 } else {
662 dpu_info->idle_count++;
663 }
664
665 if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
666 mdfld_dpu_enter_dsr(dev);
667 /* Stop timer by return */
668 return;
669 }
670
671 spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
672 if (!timer_pending(dpu_timer)) {
673 dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
674 add_timer(dpu_timer);
675 }
676 spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
677}
678
679void mdfld_dpu_update_panel(struct drm_device *dev)
680{
681 struct drm_psb_private *dev_priv = dev->dev_private;
682 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
683
684 if (dpu_info->pending) {
685 dpu_info->idle_count = 0;
686
687 /*update panel fb with damaged area*/
688 mdfld_dpu_update_fb(dev);
689 } else {
690 dpu_info->idle_count++;
691 }
692
693 if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
694 /*enter dsr*/
695 mdfld_dpu_enter_dsr(dev);
696 }
697}
698
699static int mdfld_dbi_dpu_timer_init(struct drm_device *dev,
700 struct mdfld_dbi_dpu_info *dpu_info)
701{
702 struct timer_list *dpu_timer = &dpu_info->dpu_timer;
703 unsigned long flags;
704
705 spin_lock_init(&dpu_info->dpu_timer_lock);
706 spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
707
708 init_timer(dpu_timer);
709
710 dpu_timer->data = (unsigned long)dev;
711 dpu_timer->function = mdfld_dbi_dpu_timer_func;
712 dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
713
714 spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
715
716 return 0;
717}
718
719void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info)
720{
721 struct timer_list *dpu_timer = &dpu_info->dpu_timer;
722 unsigned long flags;
723
724 spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
725 if (!timer_pending(dpu_timer)) {
726 dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
727 add_timer(dpu_timer);
728 }
729 spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
730}
731
732int mdfld_dbi_dpu_init(struct drm_device *dev)
733{
734 struct drm_psb_private *dev_priv = dev->dev_private;
735 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
736
737 if (!dpu_info || IS_ERR(dpu_info)) {
738 dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info),
739 GFP_KERNEL);
740 if (!dpu_info) {
741 DRM_ERROR("No memory\n");
742 return -ENOMEM;
743 }
744 dev_priv->dbi_dpu_info = dpu_info;
745 }
746
747 dpu_info->dev = dev;
748
749 dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
750 dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
751
752 /*init dpu_update_lock*/
753 spin_lock_init(&dpu_info->dpu_update_lock);
754
755 /*init dpu refresh timer*/
756 mdfld_dbi_dpu_timer_init(dev, dpu_info);
757
758 /*init pipe damage area*/
759 mdfld_dpu_init_damage(dpu_info, 0);
760 mdfld_dpu_init_damage(dpu_info, 2);
761
762 return 0;
763}
764
765void mdfld_dbi_dpu_exit(struct drm_device *dev)
766{
767 struct drm_psb_private *dev_priv = dev->dev_private;
768 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
769
770 if (!dpu_info)
771 return;
772
773 del_timer_sync(&dpu_info->dpu_timer);
774 kfree(dpu_info);
775 dev_priv->dbi_dpu_info = NULL;
776}
777
778
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
deleted file mode 100644
index 42367ed48c08..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#ifndef __MDFLD_DSI_DBI_DPU_H__
29#define __MDFLD_DSI_DBI_DPU_H__
30
31#include "mdfld_dsi_dbi.h"
32
33typedef enum {
34 MDFLD_PLANEA,
35 MDFLD_PLANEC,
36 MDFLD_CURSORA,
37 MDFLD_CURSORC,
38 MDFLD_OVERLAYA,
39 MDFLD_OVERLAYC,
40 MDFLD_PLANE_NUM,
41} mdfld_plane_t;
42
43#define MDFLD_PIPEA_PLANE_MASK 0x15
44#define MDFLD_PIPEC_PLANE_MASK 0x2A
45
46struct mdfld_cursor_info {
47 int x, y;
48 int size;
49};
50
51#define MDFLD_CURSOR_SIZE 64
52
53/*
54 * enter DSR mode if screen has no update for 2 frames.
55 */
56#define MDFLD_MAX_IDLE_COUNT 2
57
58struct mdfld_dbi_dpu_info {
59 struct drm_device *dev;
60 /* Lock */
61 spinlock_t dpu_update_lock;
62
63 /* Cursor postion */
64 struct mdfld_cursor_info cursors[2];
65
66 /* Damaged area for each plane */
67 struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
68
69 /* Final damaged area */
70 struct psb_drm_dpu_rect damage_pipea;
71 struct psb_drm_dpu_rect damage_pipec;
72
73 /* Pending */
74 u32 pending;
75
76 /* DPU timer */
77 struct timer_list dpu_timer;
78 spinlock_t dpu_timer_lock;
79
80 /* DPU idle count */
81 u32 idle_count;
82
83 /* DSI outputs */
84 struct mdfld_dsi_dbi_output *dbi_outputs[2];
85 int dbi_output_num;
86};
87
88static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect *origin,
89 struct psb_drm_dpu_rect *rect)
90{
91 int x1, y1, x2, y2;
92
93 x1 = origin->x + origin->width;
94 y1 = origin->y + origin->height;
95
96 x2 = rect->x + rect->width;
97 y2 = rect->y + rect->height;
98
99 origin->x = min(origin->x, rect->x);
100 origin->y = min(origin->y, rect->y);
101 origin->width = max(x1, x2) - origin->x;
102 origin->height = max(y1, y2) - origin->y;
103
104 return 0;
105}
106
107static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info *dpu_info,
108 struct psb_drm_dpu_rect *rect)
109{
110 if (rect->x < 0)
111 rect->x = 0;
112 if (rect->y < 0)
113 rect->y = 0;
114
115 if (rect->x + rect->width > 864)
116 rect->width = 864 - rect->x;
117 if (rect->y + rect->height > 480)
118 rect->height = 480 - rect->height;
119
120 if (!rect->width)
121 rect->width = 1;
122 if (!rect->height)
123 rect->height = 1;
124}
125
126static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info *dpu_info,
127 int pipe)
128{
129 struct psb_drm_dpu_rect *rect;
130
131 if (pipe == 0)
132 rect = &dpu_info->damage_pipea;
133 else
134 rect = &dpu_info->damage_pipec;
135
136 rect->x = 864;
137 rect->y = 480;
138 rect->width = -864;
139 rect->height = -480;
140}
141
142extern int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
143 struct psb_drm_dpu_rect *rect);
144extern int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
145 mdfld_plane_t plane,
146 struct psb_drm_dpu_rect *rect);
147extern int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev);
148extern int mdfld_dpu_exit_dsr(struct drm_device *dev);
149extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info);
150extern int mdfld_dbi_dpu_init(struct drm_device *dev);
151extern void mdfld_dbi_dpu_exit(struct drm_device *dev);
152extern void mdfld_dpu_update_panel(struct drm_device *dev);
153
154#endif /*__MDFLD_DSI_DBI_DPU_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
deleted file mode 100644
index e685f1217baa..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ /dev/null
@@ -1,805 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#include "mdfld_dsi_dpi.h"
29#include "mdfld_output.h"
30#include "mdfld_dsi_pkg_sender.h"
31
32
33static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
34{
35 u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
36 int timeout = 0;
37
38 if (pipe == 2)
39 gen_fifo_stat_reg += MIPIC_REG_OFFSET;
40
41 udelay(500);
42
43 /* This will time out after approximately 2+ seconds */
44 while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
45 udelay(100);
46 timeout++;
47 }
48
49 if (timeout == 20000)
50 dev_warn(dev->dev, "MIPI: HS Data FIFO was never cleared!\n");
51}
52
53static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
54{
55 u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
56 int timeout = 0;
57
58 if (pipe == 2)
59 gen_fifo_stat_reg += MIPIC_REG_OFFSET;
60
61 udelay(500);
62
63 /* This will time out after approximately 2+ seconds */
64 while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) {
65 udelay(100);
66 timeout++;
67 }
68 if (timeout == 20000)
69 dev_warn(dev->dev, "MIPI: HS CMD FIFO was never cleared!\n");
70}
71
72static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
73{
74 u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
75 int timeout = 0;
76
77 if (pipe == 2)
78 gen_fifo_stat_reg += MIPIC_REG_OFFSET;
79
80 udelay(500);
81
82 /* This will time out after approximately 2+ seconds */
83 while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
84 != DPI_FIFO_EMPTY)) {
85 udelay(100);
86 timeout++;
87 }
88
89 if (timeout == 20000)
90 dev_warn(dev->dev, "MIPI: DPI FIFO was never cleared!\n");
91}
92
93static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
94{
95 u32 intr_stat_reg = MIPIA_INTR_STAT_REG;
96 int timeout = 0;
97
98 if (pipe == 2)
99 intr_stat_reg += MIPIC_REG_OFFSET;
100
101 udelay(500);
102
103 /* This will time out after approximately 2+ seconds */
104 while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) {
105 udelay(100);
106 timeout++;
107 }
108
109 if (timeout == 20000)
110 dev_warn(dev->dev, "MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
111}
112
113
114/* ************************************************************************* *\
115 * FUNCTION: mdfld_dsi_tpo_ic_init
116 *
117 * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
118 * restore_display_registers. since this function does not
119 * acquire the mutex, it is important that the calling function
120 * does!
121\* ************************************************************************* */
122void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
123{
124 struct drm_device *dev = dsi_config->dev;
125 u32 dcsChannelNumber = dsi_config->channel_num;
126 u32 gen_data_reg = MIPIA_HS_GEN_DATA_REG;
127 u32 gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
128 u32 gen_ctrl_val = GEN_LONG_WRITE;
129
130 if (pipe == 2) {
131 gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
132 gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
133 }
134
135 gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
136
137 /* Flip page order */
138 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
139 REG_WRITE(gen_data_reg, 0x00008036);
140 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
141 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
142
143 /* 0xF0 */
144 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
145 REG_WRITE(gen_data_reg, 0x005a5af0);
146 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
147 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
148
149 /* Write protection key */
150 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
151 REG_WRITE(gen_data_reg, 0x005a5af1);
152 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
153 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
154
155 /* 0xFC */
156 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
157 REG_WRITE(gen_data_reg, 0x005a5afc);
158 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
159 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
160
161 /* 0xB7 */
162 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
163 REG_WRITE(gen_data_reg, 0x770000b7);
164 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
165 REG_WRITE(gen_data_reg, 0x00000044);
166 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
167 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
168
169 /* 0xB6 */
170 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
171 REG_WRITE(gen_data_reg, 0x000a0ab6);
172 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
173 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
174
175 /* 0xF2 */
176 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
177 REG_WRITE(gen_data_reg, 0x081010f2);
178 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
179 REG_WRITE(gen_data_reg, 0x4a070708);
180 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
181 REG_WRITE(gen_data_reg, 0x000000c5);
182 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
183 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
184
185 /* 0xF8 */
186 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
187 REG_WRITE(gen_data_reg, 0x024003f8);
188 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
189 REG_WRITE(gen_data_reg, 0x01030a04);
190 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
191 REG_WRITE(gen_data_reg, 0x0e020220);
192 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
193 REG_WRITE(gen_data_reg, 0x00000004);
194 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
195 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
196
197 /* 0xE2 */
198 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
199 REG_WRITE(gen_data_reg, 0x398fc3e2);
200 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
201 REG_WRITE(gen_data_reg, 0x0000916f);
202 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
203 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
204
205 /* 0xB0 */
206 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
207 REG_WRITE(gen_data_reg, 0x000000b0);
208 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
209 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
210
211 /* 0xF4 */
212 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
213 REG_WRITE(gen_data_reg, 0x240242f4);
214 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
215 REG_WRITE(gen_data_reg, 0x78ee2002);
216 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
217 REG_WRITE(gen_data_reg, 0x2a071050);
218 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
219 REG_WRITE(gen_data_reg, 0x507fee10);
220 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
221 REG_WRITE(gen_data_reg, 0x10300710);
222 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
223 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
224
225 /* 0xBA */
226 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
227 REG_WRITE(gen_data_reg, 0x19fe07ba);
228 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
229 REG_WRITE(gen_data_reg, 0x101c0a31);
230 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
231 REG_WRITE(gen_data_reg, 0x00000010);
232 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
233 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
234
235 /* 0xBB */
236 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
237 REG_WRITE(gen_data_reg, 0x28ff07bb);
238 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
239 REG_WRITE(gen_data_reg, 0x24280a31);
240 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
241 REG_WRITE(gen_data_reg, 0x00000034);
242 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
243 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
244
245 /* 0xFB */
246 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
247 REG_WRITE(gen_data_reg, 0x535d05fb);
248 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
249 REG_WRITE(gen_data_reg, 0x1b1a2130);
250 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
251 REG_WRITE(gen_data_reg, 0x221e180e);
252 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
253 REG_WRITE(gen_data_reg, 0x131d2120);
254 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
255 REG_WRITE(gen_data_reg, 0x535d0508);
256 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
257 REG_WRITE(gen_data_reg, 0x1c1a2131);
258 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
259 REG_WRITE(gen_data_reg, 0x231f160d);
260 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
261 REG_WRITE(gen_data_reg, 0x111b2220);
262 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
263 REG_WRITE(gen_data_reg, 0x535c2008);
264 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
265 REG_WRITE(gen_data_reg, 0x1f1d2433);
266 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
267 REG_WRITE(gen_data_reg, 0x2c251a10);
268 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
269 REG_WRITE(gen_data_reg, 0x2c34372d);
270 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
271 REG_WRITE(gen_data_reg, 0x00000023);
272 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
273 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
274
275 /* 0xFA */
276 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
277 REG_WRITE(gen_data_reg, 0x525c0bfa);
278 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
279 REG_WRITE(gen_data_reg, 0x1c1c232f);
280 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
281 REG_WRITE(gen_data_reg, 0x2623190e);
282 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
283 REG_WRITE(gen_data_reg, 0x18212625);
284 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
285 REG_WRITE(gen_data_reg, 0x545d0d0e);
286 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
287 REG_WRITE(gen_data_reg, 0x1e1d2333);
288 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
289 REG_WRITE(gen_data_reg, 0x26231a10);
290 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
291 REG_WRITE(gen_data_reg, 0x1a222725);
292 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
293 REG_WRITE(gen_data_reg, 0x545d280f);
294 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
295 REG_WRITE(gen_data_reg, 0x21202635);
296 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
297 REG_WRITE(gen_data_reg, 0x31292013);
298 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
299 REG_WRITE(gen_data_reg, 0x31393d33);
300 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
301 REG_WRITE(gen_data_reg, 0x00000029);
302 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
303 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
304
305 /* Set DM */
306 mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
307 REG_WRITE(gen_data_reg, 0x000100f7);
308 mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
309 REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
310}
311
312static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
313 int num_lane, int bpp)
314{
315 return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
316}
317
318/*
319 * Calculate the dpi time basing on a given drm mode @mode
320 * return 0 on success.
321 * FIXME: I was using proposed mode value for calculation, may need to
322 * use crtc mode values later
323 */
324int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
325 struct mdfld_dsi_dpi_timing *dpi_timing,
326 int num_lane, int bpp)
327{
328 int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
329 int pclk_vsync, pclk_vfp, pclk_vbp, pclk_vactive;
330
331 if(!mode || !dpi_timing) {
332 DRM_ERROR("Invalid parameter\n");
333 return -EINVAL;
334 }
335
336 pclk_hactive = mode->hdisplay;
337 pclk_hfp = mode->hsync_start - mode->hdisplay;
338 pclk_hsync = mode->hsync_end - mode->hsync_start;
339 pclk_hbp = mode->htotal - mode->hsync_end;
340
341 pclk_vactive = mode->vdisplay;
342 pclk_vfp = mode->vsync_start - mode->vdisplay;
343 pclk_vsync = mode->vsync_end - mode->vsync_start;
344 pclk_vbp = mode->vtotal - mode->vsync_end;
345
346 /*
347 * byte clock counts were calculated by following formula
348 * bclock_count = pclk_count * bpp / num_lane / 8
349 */
350 dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hsync, num_lane, bpp);
351 dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hbp, num_lane, bpp);
352 dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hfp, num_lane, bpp);
353 dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hactive, num_lane, bpp);
354 dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vsync, num_lane, bpp);
355 dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vbp, num_lane, bpp);
356 dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vfp, num_lane, bpp);
357
358 return 0;
359}
360
361void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
362{
363 struct drm_device *dev = dsi_config->dev;
364 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
365 int lane_count = dsi_config->lane_count;
366 struct mdfld_dsi_dpi_timing dpi_timing;
367 struct drm_display_mode *mode = dsi_config->mode;
368 u32 val = 0;
369
370 /*un-ready device*/
371 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
372
373 /*init dsi adapter before kicking off*/
374 REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
375
376 /*enable all interrupts*/
377 REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
378
379
380 /*set up func_prg*/
381 val |= lane_count;
382 val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
383
384 switch(dsi_config->bpp) {
385 case 16:
386 val |= DSI_DPI_COLOR_FORMAT_RGB565;
387 break;
388 case 18:
389 val |= DSI_DPI_COLOR_FORMAT_RGB666;
390 break;
391 case 24:
392 val |= DSI_DPI_COLOR_FORMAT_RGB888;
393 break;
394 default:
395 DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
396 }
397 REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
398
399 REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
400 (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
401 REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
402
403 /*max value: 20 clock cycles of txclkesc*/
404 REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
405
406 /*min 21 txclkesc, max: ffffh*/
407 REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
408
409 REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
410
411 /*set DPI timing registers*/
412 mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
413
414 REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
415 REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
416 REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
417 REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
418 REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
419 REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
420 REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
421
422 REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
423
424 /*min: 7d0 max: 4e20*/
425 REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
426
427 /*set up video mode*/
428 val = 0;
429 val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
430 REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
431
432 REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
433
434 REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
435
436 /*TODO: figure out how to setup these registers*/
437 REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
438
439 REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
440 /*set device ready*/
441 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
442}
443
444void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
445{
446 struct drm_device *dev = output->dev;
447 u32 reg_offset = 0;
448
449 if(output->panel_on)
450 return;
451
452 if(pipe)
453 reg_offset = MIPIC_REG_OFFSET;
454
455 /* clear special packet sent bit */
456 if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
457 REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
458 }
459
460 /*send turn on package*/
461 REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
462
463 /*wait for SPL_PKG_SENT interrupt*/
464 mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
465
466 if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
467 REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
468 }
469
470 output->panel_on = 1;
471
472 /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
473 /* if(pipe == 2) */
474 /* dev_priv->dpi_panel_on2 = true; */
475 /* else if (pipe == 0) */
476 /* dev_priv->dpi_panel_on = true; */
477}
478
479static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe)
480{
481 struct drm_device *dev = output->dev;
482 u32 reg_offset = 0;
483
484 /*if output is on, or mode setting didn't happen, ignore this*/
485 if((!output->panel_on) || output->first_boot) {
486 output->first_boot = 0;
487 return;
488 }
489
490 if(pipe)
491 reg_offset = MIPIC_REG_OFFSET;
492
493 /* Wait for dpi fifo to empty */
494 mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
495
496 /* Clear the special packet interrupt bit if set */
497 if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
498 REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
499 }
500
501 if(REG_READ(MIPIA_DPI_CONTROL_REG + reg_offset) == DSI_DPI_CTRL_HS_SHUTDOWN) {
502 dev_warn(dev->dev, "try to send the same package again, abort!");
503 goto shutdown_out;
504 }
505
506 REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
507
508shutdown_out:
509 output->panel_on = 0;
510 output->first_boot = 0;
511
512 /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
513 /* if(pipe == 2) */
514 /* dev_priv->dpi_panel_on2 = false; */
515 /* else if (pipe == 0) */
516 /* dev_priv->dpi_panel_on = false; */
517 /* #ifdef CONFIG_PM_RUNTIME*/
518 /* if (drm_psb_ospm && !enable_gfx_rtpm) { */
519 /* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
520 /* schedule_delayed_work(&dev_priv->rtpm_work, 30 * 1000); */
521 /* } */
522 /*if (enable_gfx_rtpm) */
523 /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
524 /* #endif */
525}
526
527void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
528{
529 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
530 struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
531 struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
532 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
533 struct drm_device *dev = dsi_config->dev;
534 struct drm_psb_private *dev_priv = dev->dev_private;
535 u32 mipi_reg = MIPI;
536 u32 pipeconf_reg = PIPEACONF;
537
538 if(pipe) {
539 mipi_reg = MIPI_C;
540 pipeconf_reg = PIPECCONF;
541 }
542
543 /* Start up display island if it was shutdown */
544 if (!gma_power_begin(dev, true))
545 return;
546
547 if(on) {
548 if (mdfld_get_panel_type(dev, pipe) == TMD_VID){
549 mdfld_dsi_dpi_turn_on(dpi_output, pipe);
550 } else {
551 /* Enable mipi port */
552 REG_WRITE(mipi_reg, (REG_READ(mipi_reg) | (1 << 31)));
553 REG_READ(mipi_reg);
554
555 mdfld_dsi_dpi_turn_on(dpi_output, pipe);
556 mdfld_dsi_tpo_ic_init(dsi_config, pipe);
557 }
558
559 if(pipe == 2) {
560 dev_priv->dpi_panel_on2 = true;
561 }
562 else {
563 dev_priv->dpi_panel_on = true;
564 }
565
566 } else {
567 if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
568 mdfld_dsi_dpi_shut_down(dpi_output, pipe);
569 } else {
570 mdfld_dsi_dpi_shut_down(dpi_output, pipe);
571 /* Disable mipi port */
572 REG_WRITE(mipi_reg, (REG_READ(mipi_reg) & ~(1<<31)));
573 REG_READ(mipi_reg);
574 }
575
576 if(pipe == 2)
577 dev_priv->dpi_panel_on2 = false;
578 else
579 dev_priv->dpi_panel_on = false;
580 }
581 gma_power_end(dev);
582}
583
584void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
585{
586 dev_dbg(encoder->dev->dev, "DPMS %s\n",
587 (mode == DRM_MODE_DPMS_ON ? "on":"off"));
588
589 if (mode == DRM_MODE_DPMS_ON)
590 mdfld_dsi_dpi_set_power(encoder, true);
591 else {
592 mdfld_dsi_dpi_set_power(encoder, false);
593#if 0 /* FIXME */
594#ifdef CONFIG_PM_RUNTIME
595 if (enable_gfx_rtpm)
596 pm_schedule_suspend(&gpDrmDevice->pdev->dev, gfxrtdelay);
597#endif
598#endif
599 }
600}
601
602bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
603 struct drm_display_mode *mode,
604 struct drm_display_mode *adjusted_mode)
605{
606 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
607 struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
608 struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
609
610 if(fixed_mode) {
611 adjusted_mode->hdisplay = fixed_mode->hdisplay;
612 adjusted_mode->hsync_start = fixed_mode->hsync_start;
613 adjusted_mode->hsync_end = fixed_mode->hsync_end;
614 adjusted_mode->htotal = fixed_mode->htotal;
615 adjusted_mode->vdisplay = fixed_mode->vdisplay;
616 adjusted_mode->vsync_start = fixed_mode->vsync_start;
617 adjusted_mode->vsync_end = fixed_mode->vsync_end;
618 adjusted_mode->vtotal = fixed_mode->vtotal;
619 adjusted_mode->clock = fixed_mode->clock;
620 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
621 }
622
623 return true;
624}
625
626void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
627{
628 mdfld_dsi_dpi_set_power(encoder, false);
629}
630
631void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
632{
633 mdfld_dsi_dpi_set_power(encoder, true);
634}
635
636void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
637 struct drm_display_mode *mode,
638 struct drm_display_mode *adjusted_mode)
639{
640 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
641 struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
642 struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
643 struct drm_device *dev = dsi_config->dev;
644 struct drm_psb_private *dev_priv = dev->dev_private;
645 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
646
647 u32 pipeconf_reg = PIPEACONF;
648 u32 dspcntr_reg = DSPACNTR;
649 u32 mipi_reg = MIPI;
650 u32 reg_offset = 0;
651
652 u32 pipeconf = dev_priv->pipeconf;
653 u32 dspcntr = dev_priv->dspcntr;
654 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
655
656 dev_dbg(dev->dev, "set mode %dx%d on pipe %d\n",
657 mode->hdisplay, mode->vdisplay, pipe);
658
659 if(pipe) {
660 pipeconf_reg = PIPECCONF;
661 dspcntr_reg = DSPCCNTR;
662 mipi_reg = MIPI_C;
663 reg_offset = MIPIC_REG_OFFSET;
664 } else {
665 mipi |= 2;
666 }
667
668 if (!gma_power_begin(dev, true))
669 return;
670
671 /* Set up mipi port FIXME: do at init time */
672 REG_WRITE(mipi_reg, mipi);
673 REG_READ(mipi_reg);
674
675 /* Set up DSI controller DPI interface */
676 mdfld_dsi_dpi_controller_init(dsi_config, pipe);
677
678 if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
679 /* Turn on DPI interface */
680 mdfld_dsi_dpi_turn_on(dpi_output, pipe);
681 }
682
683 /* Set up pipe */
684 REG_WRITE(pipeconf_reg, pipeconf);
685 REG_READ(pipeconf_reg);
686
687 /* Set up display plane */
688 REG_WRITE(dspcntr_reg, dspcntr);
689 REG_READ(dspcntr_reg);
690
691 msleep(20); /* FIXME: this should wait for vblank */
692
693 dev_dbg(dev->dev, "State %x, power %d\n",
694 REG_READ(MIPIA_INTR_STAT_REG + reg_offset),
695 dpi_output->panel_on);
696
697 if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
698 /* Init driver ic */
699 mdfld_dsi_tpo_ic_init(dsi_config, pipe);
700 /* Init backlight */
701 mdfld_dsi_brightness_init(dsi_config, pipe);
702 }
703 gma_power_end(dev);
704}
705
706
707/*
708 * Init DSI DPI encoder.
709 * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
710 * return pointer of newly allocated DPI encoder, NULL on error
711 */
712struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
713 struct mdfld_dsi_connector *dsi_connector,
714 struct panel_funcs *p_funcs)
715{
716 struct mdfld_dsi_dpi_output *dpi_output = NULL;
717 struct mdfld_dsi_config *dsi_config;
718 struct drm_connector *connector = NULL;
719 struct drm_encoder *encoder = NULL;
720 struct drm_display_mode *fixed_mode = NULL;
721 int pipe;
722 u32 data;
723 int ret;
724
725 if (!dsi_connector || !p_funcs) {
726 WARN_ON(1);
727 return NULL;
728 }
729
730 dsi_config = mdfld_dsi_get_config(dsi_connector);
731 pipe = dsi_connector->pipe;
732
733 /* Panel hard-reset */
734 if (p_funcs->reset) {
735 ret = p_funcs->reset(pipe);
736 if (ret) {
737 DRM_ERROR("Panel %d hard-reset failed\n", pipe);
738 return NULL;
739 }
740 }
741
742 /* Panel drvIC init */
743 if (p_funcs->drv_ic_init)
744 p_funcs->drv_ic_init(dsi_config, pipe);
745
746 /* Panel power mode detect */
747 ret = mdfld_dsi_get_power_mode(dsi_config,
748 &data,
749 MDFLD_DSI_LP_TRANSMISSION);
750 if (ret) {
751 DRM_ERROR("Panel %d get power mode failed\n", pipe);
752 dsi_connector->status = connector_status_disconnected;
753 } else {
754 DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
755 dsi_connector->status = connector_status_connected;
756 }
757
758 dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
759 if(!dpi_output) {
760 dev_err(dev->dev, "No memory for dsi_dpi_output\n");
761 return NULL;
762 }
763
764 if(dsi_connector->pipe)
765 dpi_output->panel_on = 0;
766 else
767 dpi_output->panel_on = 0;
768
769 dpi_output->dev = dev;
770 dpi_output->p_funcs = p_funcs;
771 dpi_output->first_boot = 1;
772
773 /* Get fixed mode */
774 dsi_config = mdfld_dsi_get_config(dsi_connector);
775 fixed_mode = dsi_config->fixed_mode;
776
777 /* Create drm encoder object */
778 connector = &dsi_connector->base.base;
779 encoder = &dpi_output->base.base;
780 /*
781 * On existing hardware this will be a panel of some form,
782 * if future devices also have HDMI bridges this will need
783 * revisiting
784 */
785 drm_encoder_init(dev,
786 encoder,
787 p_funcs->encoder_funcs,
788 DRM_MODE_ENCODER_LVDS);
789 drm_encoder_helper_add(encoder,
790 p_funcs->encoder_helper_funcs);
791
792 /* Attach to given connector */
793 drm_mode_connector_attach_encoder(connector, encoder);
794
795 /* Set possible crtcs and clones */
796 if(dsi_connector->pipe) {
797 encoder->possible_crtcs = (1 << 2);
798 encoder->possible_clones = (1 << 1);
799 } else {
800 encoder->possible_crtcs = (1 << 0);
801 encoder->possible_clones = (1 << 0);
802 }
803 return &dpi_output->base;
804}
805
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.h b/drivers/staging/gma500/mdfld_dsi_dpi.h
deleted file mode 100644
index ed92d45ee74a..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_dpi.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#ifndef __MDFLD_DSI_DPI_H__
29#define __MDFLD_DSI_DPI_H__
30
31#include "mdfld_dsi_output.h"
32#include "mdfld_output.h"
33
34struct mdfld_dsi_dpi_timing {
35 u16 hsync_count;
36 u16 hbp_count;
37 u16 hfp_count;
38 u16 hactive_count;
39 u16 vsync_count;
40 u16 vbp_count;
41 u16 vfp_count;
42};
43
44struct mdfld_dsi_dpi_output {
45 struct mdfld_dsi_encoder base;
46 struct drm_device *dev;
47
48 int panel_on;
49 int first_boot;
50
51 struct panel_funcs *p_funcs;
52};
53
54#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder) \
55 container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
56
57extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
58 struct mdfld_dsi_dpi_timing *dpi_timing,
59 int num_lane, int bpp);
60extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
61 struct mdfld_dsi_connector *dsi_connector,
62 struct panel_funcs *p_funcs);
63
64/* Medfield DPI helper functions */
65extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
66extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
67 struct drm_display_mode *mode,
68 struct drm_display_mode *adjusted_mode);
69extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
70extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
71extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
72 struct drm_display_mode *mode,
73 struct drm_display_mode *adjusted_mode);
74extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
75 int pipe);
76extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *si_config,
77 int pipe);
78#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
deleted file mode 100644
index 3f979db2c3a5..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ /dev/null
@@ -1,1014 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#include "mdfld_dsi_output.h"
29#include "mdfld_dsi_dbi.h"
30#include "mdfld_dsi_dpi.h"
31#include "mdfld_output.h"
32#include <asm/intel_scu_ipc.h>
33#include "mdfld_dsi_pkg_sender.h"
34#include <linux/pm_runtime.h>
35#include <linux/moduleparam.h>
36
37#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
38
39static int CABC_control = 1;
40static int LABC_control = 1;
41
42module_param (CABC_control, int, 0644);
43module_param (LABC_control, int, 0644);
44
45/**
46 * make these MCS command global
47 * we don't need 'movl' everytime we send them.
48 * FIXME: these datas were provided by OEM, we should get them from GCT.
49 **/
50static u32 mdfld_dbi_mcs_hysteresis[] = {
51 0x42000f57, 0x8c006400, 0xff00bf00, 0xffffffff,
52 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
53 0x38000aff, 0x82005000, 0xff00ab00, 0xffffffff,
54 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
55 0x000000ff,
56};
57
58static u32 mdfld_dbi_mcs_display_profile[] = {
59 0x50281450, 0x0000c882, 0x00000000, 0x00000000,
60 0x00000000,
61};
62
63static u32 mdfld_dbi_mcs_kbbc_profile[] = {
64 0x00ffcc60, 0x00000000, 0x00000000, 0x00000000,
65};
66
67static u32 mdfld_dbi_mcs_gamma_profile[] = {
68 0x81111158, 0x88888888, 0x88888888,
69};
70
71/*
72 * write hysteresis values.
73 */
74static void mdfld_dsi_write_hysteresis (struct mdfld_dsi_config *dsi_config,
75 int pipe)
76{
77 struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
78
79 if(!sender) {
80 WARN_ON(1);
81 return;
82 }
83 mdfld_dsi_send_mcs_long_hs(sender,
84 mdfld_dbi_mcs_hysteresis,
85 17,
86 MDFLD_DSI_SEND_PACKAGE);
87}
88
89/*
90 * write display profile values.
91 */
92static void mdfld_dsi_write_display_profile(struct mdfld_dsi_config *dsi_config, int pipe)
93{
94 struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
95
96 if(!sender) {
97 WARN_ON(1);
98 return;
99 }
100 mdfld_dsi_send_mcs_long_hs(sender,
101 mdfld_dbi_mcs_display_profile,
102 5,
103 MDFLD_DSI_SEND_PACKAGE);
104}
105
106/*
107 * write KBBC profile values.
108 */
109static void mdfld_dsi_write_kbbc_profile (struct mdfld_dsi_config * dsi_config, int pipe)
110{
111 struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
112
113 if(!sender) {
114 WARN_ON(1);
115 return;
116 }
117 mdfld_dsi_send_mcs_long_hs(sender,
118 mdfld_dbi_mcs_kbbc_profile,
119 4,
120 MDFLD_DSI_SEND_PACKAGE);
121}
122
123/*
124 * write gamma setting.
125 */
126static void mdfld_dsi_write_gamma_setting (struct mdfld_dsi_config *dsi_config, int pipe)
127{
128 struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
129
130 if(!sender) {
131 WARN_ON(1);
132 return;
133 }
134 mdfld_dsi_send_mcs_long_hs(sender,
135 mdfld_dbi_mcs_gamma_profile,
136 3,
137 MDFLD_DSI_SEND_PACKAGE);
138}
139
140/*
141 * Check and see if the generic control or data buffer is empty and ready.
142 */
143void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
144{
145 u32 GEN_BF_time_out_count = 0;
146
147 /* Check MIPI Adatper command registers */
148 for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
149 {
150 if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
151 break;
152 udelay (100);
153 }
154
155 if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
156 dev_err(dev->dev,
157 "mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n",
158 gen_fifo_stat_reg);
159}
160
161/*
162 * Manage the DSI MIPI keyboard and display brightness.
163 * FIXME: this is exported to OSPM code. should work out an specific
164 * display interface to OSPM.
165 */
166void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
167{
168 struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
169 struct drm_device *dev = sender->dev;
170 struct drm_psb_private *dev_priv = dev->dev_private;
171 u32 gen_ctrl_val;
172
173 if(!sender) {
174 WARN_ON(1);
175 return;
176 }
177 /* Set default display backlight value to 85% (0xd8)*/
178 mdfld_dsi_send_mcs_short_hs(sender,
179 write_display_brightness,
180 0xd8,
181 1,
182 MDFLD_DSI_SEND_PACKAGE);
183
184 /* Set minimum brightness setting of CABC function to 20% (0x33)*/
185 mdfld_dsi_send_mcs_short_hs(sender,
186 write_cabc_min_bright,
187 0x33,
188 1,
189 MDFLD_DSI_SEND_PACKAGE);
190
191 mdfld_dsi_write_hysteresis(dsi_config, pipe);
192 mdfld_dsi_write_display_profile (dsi_config, pipe);
193 mdfld_dsi_write_kbbc_profile (dsi_config, pipe);
194 mdfld_dsi_write_gamma_setting (dsi_config, pipe);
195
196 /* Enable backlight or/and LABC */
197 gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON;
198 if (LABC_control == 1 || CABC_control == 1)
199 gen_ctrl_val |= DISPLAY_DIMMING_ON| DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
200
201 if (LABC_control == 1)
202 gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
203
204 dev_priv->mipi_ctrl_display = gen_ctrl_val;
205
206 mdfld_dsi_send_mcs_short_hs(sender,
207 write_ctrl_display,
208 (u8)gen_ctrl_val,
209 1,
210 MDFLD_DSI_SEND_PACKAGE);
211
212 if (CABC_control == 0)
213 return;
214 mdfld_dsi_send_mcs_short_hs(sender,
215 write_ctrl_cabc,
216 UI_IMAGE,
217 1,
218 MDFLD_DSI_SEND_PACKAGE);
219}
220
221/*
222 * Manage the mipi display brightness.
223 * TODO: refine this interface later
224 */
225void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
226{
227 struct mdfld_dsi_pkg_sender *sender;
228 struct drm_psb_private *dev_priv;
229 struct mdfld_dsi_config *dsi_config;
230 u32 gen_ctrl_val;
231 int p_type;
232
233 if (!dev || (pipe != 0 && pipe != 2)) {
234 dev_err(dev->dev, "Invalid parameter\n");
235 return;
236 }
237
238 p_type = mdfld_get_panel_type(dev, 0);
239
240 dev_priv = dev->dev_private;
241
242 if(pipe)
243 dsi_config = dev_priv->dsi_configs[1];
244 else
245 dsi_config = dev_priv->dsi_configs[0];
246
247 sender = mdfld_dsi_get_pkg_sender(dsi_config);
248
249 if(!sender) {
250 WARN_ON(1);
251 return;
252 }
253
254 gen_ctrl_val = ((level * 0xff) / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
255
256 dev_dbg(dev->dev,
257 "pipe = %d, gen_ctrl_val = %d. \n", pipe, gen_ctrl_val);
258
259 if(p_type == TMD_VID || p_type == TMD_CMD){
260 /* Set display backlight value */
261 mdfld_dsi_send_mcs_short_hs(sender,
262 tmd_write_display_brightness,
263 (u8)gen_ctrl_val,
264 1,
265 MDFLD_DSI_SEND_PACKAGE);
266 } else {
267 /* Set display backlight value */
268 mdfld_dsi_send_mcs_short_hs(sender,
269 write_display_brightness,
270 (u8)gen_ctrl_val,
271 1,
272 MDFLD_DSI_SEND_PACKAGE);
273
274
275 /* Enable backlight control */
276 if (level == 0)
277 gen_ctrl_val = 0;
278 else
279 gen_ctrl_val = dev_priv->mipi_ctrl_display;
280
281 mdfld_dsi_send_mcs_short_hs(sender,
282 write_ctrl_display,
283 (u8)gen_ctrl_val,
284 1,
285 MDFLD_DSI_SEND_PACKAGE);
286 }
287}
288
289/*
290 * shut down DSI controller
291 */
292void mdfld_dsi_controller_shutdown(struct mdfld_dsi_config * dsi_config, int pipe)
293{
294 struct drm_device * dev;
295 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
296 int retry = 100;
297
298 if (!dsi_config) {
299 WARN_ON(1);
300 return;
301 }
302
303 dev = dsi_config->dev;
304
305 if (!gma_power_begin(dev, true)) {
306 dev_err(dev->dev, "hw begin failed\n");
307 return;
308 }
309
310 if(!(REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
311 goto shutdown_out;
312
313 /* Send shut down package, clean packet send bit first */
314 if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
315 REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset),
316 (REG_READ(MIPIA_INTR_STAT_REG + reg_offset) | DSI_INTR_STATE_SPL_PKG_SENT));
317 }
318
319 /*send shut down package in HS*/
320 REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
321
322
323 /*
324 * make sure shut down is sent.
325 * FIXME: add max retry counter
326 */
327 while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
328 retry--;
329
330 if(!retry) {
331 dev_err(dev->dev, "timeout\n");
332 break;
333 }
334 }
335
336 /*sleep 1 ms to ensure shutdown finished*/
337 msleep(100);
338
339 /*un-ready device*/
340 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
341 (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & ~DSI_DEVICE_READY));
342
343shutdown_out:
344 gma_power_end(dev);
345}
346
347void mdfld_dsi_controller_startup(struct mdfld_dsi_config * dsi_config, int pipe)
348{
349 struct drm_device * dev;
350 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
351 int retry = 100;
352
353
354 if (!dsi_config) {
355 WARN_ON(1);
356 return;
357 }
358
359 dev = dsi_config->dev;
360 dev_dbg(dev->dev, "starting up DSI controller on pipe %d...\n", pipe);
361
362 if (!gma_power_begin(dev, true)) {
363 dev_err(dev->dev, "hw begin failed\n");
364 return;
365 }
366
367 if((REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
368 goto startup_out;
369
370 /*if config DPI, turn on DPI interface*/
371 if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
372 if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
373 REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
374 }
375
376 REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
377
378 /*
379 * make sure shut down is sent.
380 * FIXME: add max retry counter
381 */
382 while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
383 retry--;
384 if(!retry) {
385 dev_err(dev->dev, "timeout\n");
386 break;
387 }
388 }
389
390 msleep(100);
391 }
392
393 /*set device ready*/
394 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
395 (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) | DSI_DEVICE_READY));
396
397startup_out:
398 gma_power_end(dev);
399}
400
401
402static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
403 u8 dcs,
404 u32 *data,
405 u8 transmission)
406{
407 struct mdfld_dsi_pkg_sender *sender
408 = mdfld_dsi_get_pkg_sender(dsi_config);
409
410 if (!sender || !data) {
411 DRM_ERROR("Invalid parameter\n");
412 return -EINVAL;
413 }
414
415 if (transmission == MDFLD_DSI_HS_TRANSMISSION)
416 return mdfld_dsi_read_mcs_hs(sender, dcs, data, 1);
417 else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
418 return mdfld_dsi_read_mcs_lp(sender, dcs, data, 1);
419 else
420 return -EINVAL;
421}
422
423int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
424 u32 *mode,
425 u8 transmission)
426{
427 if (!dsi_config || !mode) {
428 DRM_ERROR("Invalid parameter\n");
429 return -EINVAL;
430 }
431
432 return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, transmission);
433}
434
435int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
436 u32 *result,
437 u8 transmission)
438{
439 if (!dsi_config || !result) {
440 DRM_ERROR("Invalid parameter\n");
441 return -EINVAL;
442 }
443
444 return mdfld_dsi_get_panel_status(dsi_config, 0x0f, result,
445 transmission);
446}
447
448/*
449 * NOTE: this function was used by OSPM.
450 * TODO: will be removed later, should work out display interfaces for OSPM
451 */
452void mdfld_dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
453{
454 if(!dsi_config || ((pipe != 0) && (pipe != 2))) {
455 WARN_ON(1);
456 return;
457 }
458
459 if(dsi_config->type)
460 mdfld_dsi_dpi_controller_init(dsi_config, pipe);
461 else
462 mdfld_dsi_controller_dbi_init(dsi_config, pipe);
463}
464
465static void mdfld_dsi_connector_save(struct drm_connector * connector)
466{
467}
468
469static void mdfld_dsi_connector_restore(struct drm_connector * connector)
470{
471}
472
473static enum drm_connector_status mdfld_dsi_connector_detect(struct drm_connector * connector, bool force)
474{
475 struct psb_intel_output *psb_output
476 = to_psb_intel_output(connector);
477 struct mdfld_dsi_connector *dsi_connector
478 = MDFLD_DSI_CONNECTOR(psb_output);
479 return dsi_connector->status;
480}
481
482static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
483 struct drm_property *property,
484 uint64_t value)
485{
486 struct drm_encoder *encoder = connector->encoder;
487
488 if (!strcmp(property->name, "scaling mode") && encoder) {
489 struct psb_intel_crtc * psb_crtc = to_psb_intel_crtc(encoder->crtc);
490 bool bTransitionFromToCentered;
491 uint64_t curValue;
492
493 if (!psb_crtc)
494 goto set_prop_error;
495
496 switch (value) {
497 case DRM_MODE_SCALE_FULLSCREEN:
498 break;
499 case DRM_MODE_SCALE_NO_SCALE:
500 break;
501 case DRM_MODE_SCALE_ASPECT:
502 break;
503 default:
504 goto set_prop_error;
505 }
506
507 if (drm_connector_property_get_value(connector, property, &curValue))
508 goto set_prop_error;
509
510 if (curValue == value)
511 goto set_prop_done;
512
513 if (drm_connector_property_set_value(connector, property, value))
514 goto set_prop_error;
515
516 bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
517 (value == DRM_MODE_SCALE_NO_SCALE);
518
519 if (psb_crtc->saved_mode.hdisplay != 0 &&
520 psb_crtc->saved_mode.vdisplay != 0) {
521 if (bTransitionFromToCentered) {
522 if (!drm_crtc_helper_set_mode(encoder->crtc, &psb_crtc->saved_mode,
523 encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
524 goto set_prop_error;
525 } else {
526 struct drm_encoder_helper_funcs *pEncHFuncs = encoder->helper_private;
527 pEncHFuncs->mode_set(encoder, &psb_crtc->saved_mode,
528 &psb_crtc->saved_adjusted_mode);
529 }
530 }
531#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
532 } else if (!strcmp(property->name, "backlight") && encoder) {
533 struct drm_psb_private *dev_priv = encoder->dev->dev_private;
534 struct backlight_device *psb_bd = dev_priv->backlight_device;
535 dev_dbg(encoder->dev->dev, "backlight level = %d\n", (int)value);
536 if (drm_connector_property_set_value(connector, property, value))
537 goto set_prop_error;
538 else {
539 dev_dbg(encoder->dev->dev,
540 "set brightness to %d", (int)value);
541 if (psb_bd) {
542 psb_bd->props.brightness = value;
543 backlight_update_status(psb_bd);
544 }
545 }
546#endif
547 }
548set_prop_done:
549 return 0;
550set_prop_error:
551 return -1;
552}
553
554static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
555{
556 struct psb_intel_output * psb_output = to_psb_intel_output(connector);
557 struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
558 struct mdfld_dsi_pkg_sender * sender;
559
560 if(!dsi_connector)
561 return;
562
563 drm_sysfs_connector_remove(connector);
564 drm_connector_cleanup(connector);
565
566 sender = dsi_connector->pkg_sender;
567
568 mdfld_dsi_pkg_sender_destroy(sender);
569
570 kfree(dsi_connector);
571}
572
573static int mdfld_dsi_connector_get_modes(struct drm_connector * connector)
574{
575 struct psb_intel_output * psb_output = to_psb_intel_output(connector);
576 struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
577 struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
578 struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
579 struct drm_display_mode * dup_mode = NULL;
580 struct drm_device * dev = connector->dev;
581
582 connector->display_info.min_vfreq = 0;
583 connector->display_info.max_vfreq = 200;
584 connector->display_info.min_hfreq = 0;
585 connector->display_info.max_hfreq = 200;
586
587 if(fixed_mode) {
588 dev_dbg(dev->dev, "fixed_mode %dx%d\n",
589 fixed_mode->hdisplay, fixed_mode->vdisplay);
590
591 dup_mode = drm_mode_duplicate(dev, fixed_mode);
592 drm_mode_probed_add(connector, dup_mode);
593 return 1;
594 }
595 dev_err(dev->dev, "Didn't get any modes!\n");
596 return 0;
597}
598
599static int mdfld_dsi_connector_mode_valid(struct drm_connector * connector, struct drm_display_mode * mode)
600{
601 struct psb_intel_output * psb_output = to_psb_intel_output(connector);
602 struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
603 struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
604 struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
605
606 dev_dbg(connector->dev->dev, "mode %p, fixed mode %p\n",
607 mode, fixed_mode);
608
609 if(mode->flags & DRM_MODE_FLAG_DBLSCAN)
610 return MODE_NO_DBLESCAN;
611
612 if(mode->flags & DRM_MODE_FLAG_INTERLACE)
613 return MODE_NO_INTERLACE;
614
615 /**
616 * FIXME: current DC has no fitting unit, reject any mode setting request
617 * will figure out a way to do up-scaling(pannel fitting) later.
618 **/
619 if(fixed_mode) {
620 if(mode->hdisplay != fixed_mode->hdisplay)
621 return MODE_PANEL;
622
623 if(mode->vdisplay != fixed_mode->vdisplay)
624 return MODE_PANEL;
625 }
626 dev_dbg(connector->dev->dev, "mode ok\n");
627
628 return MODE_OK;
629}
630
631static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
632{
633#ifdef CONFIG_PM_RUNTIME
634 struct drm_device * dev = connector->dev;
635 struct drm_psb_private * dev_priv = dev->dev_private;
636 bool panel_on, panel_on2;
637#endif
638 /* First, execute DPMS */
639 drm_helper_connector_dpms(connector, mode);
640
641#ifdef CONFIG_PM_RUNTIME
642 if(mdfld_panel_dpi(dev)) {
643 /* DPI panel */
644 panel_on = dev_priv->dpi_panel_on;
645 panel_on2 = dev_priv->dpi_panel_on2;
646 } else {
647 /* DBI panel */
648 panel_on = dev_priv->dbi_panel_on;
649 panel_on2 = dev_priv->dbi_panel_on2;
650 }
651
652 /* Then check all display panels + monitors status */
653 /* Make sure that the Display (B) sub-system status isn't i3 when
654 * R/W the DC register, otherwise "Fabric error" issue would occur
655 * during S0i3 state. */
656 if(!panel_on && !panel_on2 && !(REG_READ(HDMIB_CONTROL)
657 & HDMIB_PORT_EN)) {
658 /* Request rpm idle */
659 if(dev_priv->rpm_enabled)
660 pm_request_idle(&dev->pdev->dev);
661 }
662 /*
663 * if rpm wasn't enabled yet, try to allow it
664 * FIXME: won't enable rpm for DPI since DPI
665 * CRTC setting is a little messy now.
666 * Enable it later!
667 */
668#if 0
669 if(!dev_priv->rpm_enabled && !mdfld_panel_dpi(dev))
670 ospm_runtime_pm_allow(dev);
671#endif
672#endif
673}
674
675static struct drm_encoder *mdfld_dsi_connector_best_encoder(
676 struct drm_connector *connector)
677{
678 struct psb_intel_output * psb_output = to_psb_intel_output(connector);
679 struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
680 struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
681 struct mdfld_dsi_encoder * encoder = NULL;
682
683 if(dsi_config->type == MDFLD_DSI_ENCODER_DBI)
684 encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DBI];
685 else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
686 encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DPI];
687
688 dev_dbg(connector->dev->dev, "get encoder %p\n", encoder);
689
690 if(!encoder) {
691 dev_err(connector->dev->dev,
692 "Invalid encoder for type %d\n", dsi_config->type);
693 return NULL;
694 }
695 dsi_config->encoder = encoder;
696 return &encoder->base;
697}
698
699/* DSI connector funcs */
700static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
701 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
702 .save = mdfld_dsi_connector_save,
703 .restore = mdfld_dsi_connector_restore,
704 .detect = mdfld_dsi_connector_detect,
705 .fill_modes = drm_helper_probe_single_connector_modes,
706 .set_property = mdfld_dsi_connector_set_property,
707 .destroy = mdfld_dsi_connector_destroy,
708};
709
710/* DSI connector helper funcs */
711static const struct drm_connector_helper_funcs mdfld_dsi_connector_helper_funcs = {
712 .get_modes = mdfld_dsi_connector_get_modes,
713 .mode_valid = mdfld_dsi_connector_mode_valid,
714 .best_encoder = mdfld_dsi_connector_best_encoder,
715};
716
717static int mdfld_dsi_get_default_config(struct drm_device * dev,
718 struct mdfld_dsi_config * config, int pipe)
719{
720 if(!dev || !config) {
721 WARN_ON(1);
722 return -EINVAL;
723 }
724
725 config->bpp = 24;
726 config->type = mdfld_panel_dpi(dev);
727 config->lane_count = 2;
728 config->channel_num = 0;
729 /*NOTE: video mode is ignored when type is MDFLD_DSI_ENCODER_DBI*/
730 if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
731 config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
732 } else {
733 config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
734 }
735
736 return 0;
737}
738
739/*
740 * Returns the panel fixed mode from configuration.
741 */
742struct drm_display_mode *
743mdfld_dsi_get_configuration_mode(struct mdfld_dsi_config * dsi_config, int pipe)
744{
745 struct drm_device *dev = dsi_config->dev;
746 struct drm_display_mode *mode;
747 struct drm_psb_private *dev_priv = dev->dev_private;
748 struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
749 bool use_gct = false;
750
751 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
752 if (!mode) {
753 dev_err(dev->dev, "Out of memory for mode\n");
754 return NULL;
755 }
756 if (use_gct) {
757 dev_dbg(dev->dev, "gct find MIPI panel.\n");
758
759 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
760 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
761 mode->hsync_start = mode->hdisplay + \
762 ((ti->hsync_offset_hi << 8) | \
763 ti->hsync_offset_lo);
764 mode->hsync_end = mode->hsync_start + \
765 ((ti->hsync_pulse_width_hi << 8) | \
766 ti->hsync_pulse_width_lo);
767 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
768 ti->hblank_lo);
769 mode->vsync_start = \
770 mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
771 ti->vsync_offset_lo);
772 mode->vsync_end = \
773 mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
774 ti->vsync_pulse_width_lo);
775 mode->vtotal = mode->vdisplay + \
776 ((ti->vblank_hi << 8) | ti->vblank_lo);
777 mode->clock = ti->pixel_clock * 10;
778 } else {
779 if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
780 if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
781 mode->hdisplay = 480;
782 mode->vdisplay = 854;
783 mode->hsync_start = 487;
784 mode->hsync_end = 490;
785 mode->htotal = 499;
786 mode->vsync_start = 861;
787 mode->vsync_end = 865;
788 mode->vtotal = 873;
789 mode->clock = 33264;
790 } else {
791 mode->hdisplay = 864;
792 mode->vdisplay = 480;
793 mode->hsync_start = 873;
794 mode->hsync_end = 876;
795 mode->htotal = 887;
796 mode->vsync_start = 487;
797 mode->vsync_end = 490;
798 mode->vtotal = 499;
799 mode->clock = 33264;
800 }
801 } else if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
802 mode->hdisplay = 864;
803 mode->vdisplay = 480;
804 mode->hsync_start = 872;
805 mode->hsync_end = 876;
806 mode->htotal = 884;
807 mode->vsync_start = 482;
808 mode->vsync_end = 494;
809 mode->vtotal = 486;
810 mode->clock = 25777;
811
812 }
813 }
814
815 drm_mode_set_name(mode);
816 drm_mode_set_crtcinfo(mode, 0);
817
818 mode->type |= DRM_MODE_TYPE_PREFERRED;
819
820 return mode;
821}
822
823int mdfld_dsi_panel_reset(int pipe)
824{
825 unsigned gpio;
826 int ret = 0;
827
828 switch (pipe) {
829 case 0:
830 gpio = 128;
831 break;
832 case 2:
833 gpio = 34;
834 break;
835 default:
836 DRM_ERROR("Invalid output\n");
837 return -EINVAL;
838 }
839
840 ret = gpio_request(gpio, "gfx");
841 if (ret) {
842 DRM_ERROR("gpio_rqueset failed\n");
843 return ret;
844 }
845
846 ret = gpio_direction_output(gpio, 1);
847 if (ret) {
848 DRM_ERROR("gpio_direction_output failed\n");
849 goto gpio_error;
850 }
851
852 gpio_get_value(128);
853
854gpio_error:
855 if (gpio_is_valid(gpio))
856 gpio_free(gpio);
857
858 return ret;
859}
860
861/*
862 * MIPI output init
863 * @dev drm device
864 * @pipe pipe number. 0 or 2
865 * @config
866 *
867 * Do the initialization of a MIPI output, including create DRM mode objects
868 * initialization of DSI output on @pipe
869 */
870void mdfld_dsi_output_init(struct drm_device *dev,
871 int pipe,
872 struct mdfld_dsi_config *config,
873 struct panel_funcs* p_cmd_funcs,
874 struct panel_funcs* p_vid_funcs)
875{
876 struct mdfld_dsi_config * dsi_config;
877 struct mdfld_dsi_connector * dsi_connector;
878 struct psb_intel_output * psb_output;
879 struct drm_connector * connector;
880 struct mdfld_dsi_encoder * encoder;
881 struct drm_psb_private * dev_priv = dev->dev_private;
882 struct panel_info dsi_panel_info;
883 u32 width_mm, height_mm;
884
885 dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
886
887 if(!dev || ((pipe != 0) && (pipe != 2))) {
888 WARN_ON(1);
889 return;
890 }
891
892 /*create a new connetor*/
893 dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
894 if(!dsi_connector) {
895 DRM_ERROR("No memory");
896 return;
897 }
898
899 dsi_connector->pipe = pipe;
900
901 /*set DSI config*/
902 if(config) {
903 dsi_config = config;
904 } else {
905 dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), GFP_KERNEL);
906 if(!dsi_config) {
907 dev_err(dev->dev,
908 "cannot allocate memory for DSI config\n");
909 goto dsi_init_err0;
910 }
911
912 mdfld_dsi_get_default_config(dev, dsi_config, pipe);
913 }
914
915 dsi_connector->private = dsi_config;
916
917 dsi_config->changed = 1;
918 dsi_config->dev = dev;
919
920 /* Init fixed mode basing on DSI config type */
921 if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
922 dsi_config->fixed_mode = p_cmd_funcs->get_config_mode(dev);
923 if(p_cmd_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
924 goto dsi_init_err0;
925 } else if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
926 dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
927 if(p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
928 goto dsi_init_err0;
929 }
930
931 width_mm = dsi_panel_info.width_mm;
932 height_mm = dsi_panel_info.height_mm;
933
934 dsi_config->mode = dsi_config->fixed_mode;
935 dsi_config->connector = dsi_connector;
936
937 if(!dsi_config->fixed_mode) {
938 dev_err(dev->dev, "No pannel fixed mode was found\n");
939 goto dsi_init_err0;
940 }
941
942 if(pipe && dev_priv->dsi_configs[0]) {
943 dsi_config->dvr_ic_inited = 0;
944 dev_priv->dsi_configs[1] = dsi_config;
945 } else if(pipe == 0) {
946 dsi_config->dvr_ic_inited = 1;
947 dev_priv->dsi_configs[0] = dsi_config;
948 } else {
949 dev_err(dev->dev, "Trying to init MIPI1 before MIPI0\n");
950 goto dsi_init_err0;
951 }
952
953 /*init drm connector object*/
954 psb_output = &dsi_connector->base;
955
956 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
957
958 connector = &psb_output->base;
959 /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
960 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
961 DRM_MODE_CONNECTOR_LVDS);
962 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
963
964 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
965 connector->display_info.width_mm = width_mm;
966 connector->display_info.height_mm = height_mm;
967 connector->interlace_allowed = false;
968 connector->doublescan_allowed = false;
969
970 /* Attach properties */
971 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
972 drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
973
974 /* Init DSI package sender on this output */
975 if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
976 DRM_ERROR("Package Sender initialization failed on pipe %d\n", pipe);
977 goto dsi_init_err0;
978 }
979
980 /* Init DBI & DPI encoders */
981 if (p_cmd_funcs) {
982 encoder = mdfld_dsi_dbi_init(dev, dsi_connector, p_cmd_funcs);
983 if(!encoder) {
984 dev_err(dev->dev, "Create DBI encoder failed\n");
985 goto dsi_init_err1;
986 }
987 encoder->private = dsi_config;
988 dsi_config->encoders[MDFLD_DSI_ENCODER_DBI] = encoder;
989 }
990
991 if(p_vid_funcs) {
992 encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
993 if(!encoder) {
994 dev_err(dev->dev, "Create DPI encoder failed\n");
995 goto dsi_init_err1;
996 }
997 encoder->private = dsi_config;
998 dsi_config->encoders[MDFLD_DSI_ENCODER_DPI] = encoder;
999 }
1000
1001 drm_sysfs_connector_add(connector);
1002 return;
1003
1004 /*TODO: add code to destroy outputs on error*/
1005dsi_init_err1:
1006 /*destroy sender*/
1007 mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
1008
1009 drm_connector_cleanup(connector);
1010 kfree(dsi_config->fixed_mode);
1011 kfree(dsi_config);
1012dsi_init_err0:
1013 kfree(dsi_connector);
1014}
diff --git a/drivers/staging/gma500/mdfld_dsi_output.h b/drivers/staging/gma500/mdfld_dsi_output.h
deleted file mode 100644
index 4699267efd60..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_output.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#ifndef __MDFLD_DSI_OUTPUT_H__
29#define __MDFLD_DSI_OUTPUT_H__
30
31#include <linux/backlight.h>
32#include <drm/drmP.h>
33#include <drm/drm.h>
34#include <drm/drm_crtc.h>
35#include <drm/drm_edid.h>
36
37#include "psb_drv.h"
38#include "psb_intel_drv.h"
39#include "psb_intel_reg.h"
40#include "power.h"
41#include "mdfld_output.h"
42
43#include <asm/mrst.h>
44
45
46static inline struct mdfld_dsi_config *
47 mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
48{
49 if (!connector)
50 return NULL;
51 return (struct mdfld_dsi_config *)connector->private;
52}
53
54static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
55{
56 struct mdfld_dsi_connector *dsi_connector;
57
58 if (!config)
59 return NULL;
60
61 dsi_connector = config->connector;
62
63 if (!dsi_connector)
64 return NULL;
65
66 return dsi_connector->pkg_sender;
67}
68
69static inline struct mdfld_dsi_config *
70 mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
71{
72 if (!encoder)
73 return NULL;
74 return (struct mdfld_dsi_config *)encoder->private;
75}
76
77static inline struct mdfld_dsi_connector *
78 mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
79{
80 struct mdfld_dsi_config *config;
81
82 if (!encoder)
83 return NULL;
84
85 config = mdfld_dsi_encoder_get_config(encoder);
86 if (!config)
87 return NULL;
88
89 return config->connector;
90}
91
92static inline void *mdfld_dsi_encoder_get_pkg_sender(
93 struct mdfld_dsi_encoder *encoder)
94{
95 struct mdfld_dsi_config *dsi_config;
96
97 dsi_config = mdfld_dsi_encoder_get_config(encoder);
98 if (!dsi_config)
99 return NULL;
100
101 return mdfld_dsi_get_pkg_sender(dsi_config);
102}
103
104static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
105{
106 struct mdfld_dsi_connector *connector;
107
108 if (!encoder)
109 return -1;
110
111 connector = mdfld_dsi_encoder_get_connector(encoder);
112 if (!connector)
113 return -1;
114
115 return connector->pipe;
116}
117
118extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
119 u32 gen_fifo_stat_reg, u32 fifo_stat);
120extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
121 int pipe);
122extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
123 int level);
124extern void mdfld_dsi_output_init(struct drm_device *dev, int pipe,
125 struct mdfld_dsi_config *config,
126 struct panel_funcs *p_cmd_funcs,
127 struct panel_funcs *p_vid_funcs);
128extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
129 int pipe);
130extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
131 u32 *mode,
132 u8 transmission);
133extern int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
134 u32 *result,
135 u8 transmission);
136extern int mdfld_dsi_panel_reset(int pipe);
137
138#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c b/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
deleted file mode 100644
index 9b96a5c9abcd..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
+++ /dev/null
@@ -1,1484 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jackie Li<yaodong.li@intel.com>
25 */
26
27#include <linux/freezer.h>
28
29#include "mdfld_dsi_output.h"
30#include "mdfld_dsi_pkg_sender.h"
31#include "mdfld_dsi_dbi.h"
32#include "mdfld_dsi_dpi.h"
33
34#define MDFLD_DSI_DBI_FIFO_TIMEOUT 100
35#define MDFLD_DSI_MAX_RETURN_PACKET_SIZE 512
36#define MDFLD_DSI_READ_MAX_COUNT 5000
37
38static const char * const dsi_errors[] = {
39 "RX SOT Error",
40 "RX SOT Sync Error",
41 "RX EOT Sync Error",
42 "RX Escape Mode Entry Error",
43 "RX LP TX Sync Error",
44 "RX HS Receive Timeout Error",
45 "RX False Control Error",
46 "RX ECC Single Bit Error",
47 "RX ECC Multibit Error",
48 "RX Checksum Error",
49 "RX DSI Data Type Not Recognised",
50 "RX DSI VC ID Invalid",
51 "TX False Control Error",
52 "TX ECC Single Bit Error",
53 "TX ECC Multibit Error",
54 "TX Checksum Error",
55 "TX DSI Data Type Not Recognised",
56 "TX DSI VC ID invalid",
57 "High Contention",
58 "Low contention",
59 "DPI FIFO Under run",
60 "HS TX Timeout",
61 "LP RX Timeout",
62 "Turn Around ACK Timeout",
63 "ACK With No Error",
64 "RX Invalid TX Length",
65 "RX Prot Violation",
66 "HS Generic Write FIFO Full",
67 "LP Generic Write FIFO Full",
68 "Generic Read Data Avail",
69 "Special Packet Sent",
70 "Tearing Effect",
71};
72
73static int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
74 u32 mask)
75{
76 struct drm_device *dev = sender->dev;
77 u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
78 int retry = 0xffff;
79
80 while (retry--) {
81 if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
82 return 0;
83 udelay(100);
84 }
85 dev_err(dev->dev, "fifo is NOT empty 0x%08x\n",
86 REG_READ(gen_fifo_stat_reg));
87 return -EIO;
88}
89
90static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
91{
92 return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 10) | (1 << 18)
93 | (1 << 26) | (1 << 27) | (1 << 28));
94}
95
96static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
97{
98 return wait_for_gen_fifo_empty(sender, (1 << 10) | (1 << 26));
99}
100
101static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
102{
103 return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 18));
104}
105
106static int wait_for_dbi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
107{
108 return wait_for_gen_fifo_empty(sender, (1 << 27));
109}
110
111static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
112{
113 u32 intr_stat_reg = sender->mipi_intr_stat_reg;
114 struct drm_device *dev = sender->dev;
115
116 switch (mask) {
117 case (1 << 0):
118 case (1 << 1):
119 case (1 << 2):
120 case (1 << 3):
121 case (1 << 4):
122 case (1 << 5):
123 case (1 << 6):
124 case (1 << 7):
125 case (1 << 8):
126 case (1 << 9):
127 case (1 << 10):
128 case (1 << 11):
129 case (1 << 12):
130 case (1 << 13):
131 break;
132 case (1 << 14):
133 /*wait for all fifo empty*/
134 /*wait_for_all_fifos_empty(sender)*/;
135 break;
136 case (1 << 15):
137 break;
138 case (1 << 16):
139 break;
140 case (1 << 17):
141 break;
142 case (1 << 18):
143 case (1 << 19):
144 /*wait for contention recovery time*/
145 /*mdelay(10);*/
146 /*wait for all fifo empty*/
147 if (0)
148 wait_for_all_fifos_empty(sender);
149 break;
150 case (1 << 20):
151 break;
152 case (1 << 21):
153 /*wait for all fifo empty*/
154 /*wait_for_all_fifos_empty(sender);*/
155 break;
156 case (1 << 22):
157 break;
158 case (1 << 23):
159 case (1 << 24):
160 case (1 << 25):
161 case (1 << 26):
162 case (1 << 27):
163 /* HS Gen fifo full */
164 REG_WRITE(intr_stat_reg, mask);
165 wait_for_hs_fifos_empty(sender);
166 break;
167 case (1 << 28):
168 /* LP Gen fifo full\n */
169 REG_WRITE(intr_stat_reg, mask);
170 wait_for_lp_fifos_empty(sender);
171 break;
172 case (1 << 29):
173 case (1 << 30):
174 case (1 << 31):
175 break;
176 }
177
178 if (mask & REG_READ(intr_stat_reg))
179 dev_warn(dev->dev, "Cannot clean interrupt 0x%08x\n", mask);
180
181 return 0;
182}
183
184static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
185{
186 struct drm_device *dev = sender->dev;
187 u32 intr_stat_reg = sender->mipi_intr_stat_reg;
188 u32 mask;
189 u32 intr_stat;
190 int i;
191 int err = 0;
192
193 intr_stat = REG_READ(intr_stat_reg);
194
195 for (i = 0; i < 32; i++) {
196 mask = (0x00000001UL) << i;
197 if (intr_stat & mask) {
198 dev_dbg(dev->dev, "[DSI]: %s\n", dsi_errors[i]);
199 err = handle_dsi_error(sender, mask);
200 if (err)
201 dev_err(dev->dev, "Cannot handle error\n");
202 }
203 }
204 return err;
205}
206
207static inline int dbi_cmd_sent(struct mdfld_dsi_pkg_sender *sender)
208{
209 struct drm_device *dev = sender->dev;
210 u32 retry = 0xffff;
211 u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
212
213 /* Query the command execution status */
214 while (retry--) {
215 if (!(REG_READ(dbi_cmd_addr_reg) & (1 << 0)))
216 break;
217 }
218
219 if (!retry) {
220 dev_err(dev->dev, "Timeout waiting for DBI Command status\n");
221 return -EAGAIN;
222 }
223 return 0;
224}
225
226/*
227 * NOTE: this interface is abandoned expect for write_mem_start DCS
228 * other DCS are sent via generic pkg interfaces
229 */
230static int send_dcs_pkg(struct mdfld_dsi_pkg_sender *sender,
231 struct mdfld_dsi_pkg *pkg)
232{
233 struct drm_device *dev = sender->dev;
234 struct mdfld_dsi_dcs_pkg *dcs_pkg = &pkg->pkg.dcs_pkg;
235 u32 dbi_cmd_len_reg = sender->mipi_cmd_len_reg;
236 u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
237 u32 cb_phy = sender->dbi_cb_phy;
238 u32 index = 0;
239 u8 *cb = (u8 *)sender->dbi_cb_addr;
240 int i;
241 int ret;
242
243 if (!sender->dbi_pkg_support) {
244 dev_err(dev->dev, "Trying to send DCS on a non DBI output, abort!\n");
245 return -ENOTSUPP;
246 }
247
248 /*wait for DBI fifo empty*/
249 wait_for_dbi_fifo_empty(sender);
250
251 *(cb + (index++)) = dcs_pkg->cmd;
252 if (dcs_pkg->param_num) {
253 for (i = 0; i < dcs_pkg->param_num; i++)
254 *(cb + (index++)) = *(dcs_pkg->param + i);
255 }
256
257 REG_WRITE(dbi_cmd_len_reg, (1 + dcs_pkg->param_num));
258 REG_WRITE(dbi_cmd_addr_reg,
259 (cb_phy << CMD_MEM_ADDR_OFFSET)
260 | (1 << 0)
261 | ((dcs_pkg->data_src == CMD_DATA_SRC_PIPE) ? (1 << 1) : 0));
262
263 ret = dbi_cmd_sent(sender);
264 if (ret) {
265 dev_err(dev->dev, "command 0x%x not complete\n", dcs_pkg->cmd);
266 return -EAGAIN;
267 }
268 return 0;
269}
270
271static int __send_short_pkg(struct mdfld_dsi_pkg_sender *sender,
272 struct mdfld_dsi_pkg *pkg)
273{
274 struct drm_device *dev = sender->dev;
275 u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
276 u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
277 u32 gen_ctrl_val = 0;
278 struct mdfld_dsi_gen_short_pkg *short_pkg = &pkg->pkg.short_pkg;
279
280 gen_ctrl_val |= short_pkg->cmd << MCS_COMMANDS_POS;
281 gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
282 gen_ctrl_val |= pkg->pkg_type;
283 gen_ctrl_val |= short_pkg->param << MCS_PARAMETER_POS;
284
285 if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
286 /* wait for hs fifo empty */
287 /* wait_for_hs_fifos_empty(sender); */
288 /* Send pkg */
289 REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
290 } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
291 /* wait_for_lp_fifos_empty(sender); */
292 /* Send pkg*/
293 REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
294 } else {
295 dev_err(dev->dev, "Unknown transmission type %d\n",
296 pkg->transmission_type);
297 return -EINVAL;
298 }
299
300 return 0;
301}
302
303static int __send_long_pkg(struct mdfld_dsi_pkg_sender *sender,
304 struct mdfld_dsi_pkg *pkg)
305{
306 struct drm_device *dev = sender->dev;
307 u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
308 u32 hs_gen_data_reg = sender->mipi_hs_gen_data_reg;
309 u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
310 u32 lp_gen_data_reg = sender->mipi_lp_gen_data_reg;
311 u32 gen_ctrl_val = 0;
312 u32 *dp;
313 int i;
314 struct mdfld_dsi_gen_long_pkg *long_pkg = &pkg->pkg.long_pkg;
315
316 dp = long_pkg->data;
317
318 /*
319 * Set up word count for long pkg
320 * FIXME: double check word count field.
321 * currently, using the byte counts of the payload as the word count.
322 * ------------------------------------------------------------
323 * | DI | WC | ECC| PAYLOAD |CHECKSUM|
324 * ------------------------------------------------------------
325 */
326 gen_ctrl_val |= (long_pkg->len << 2) << WORD_COUNTS_POS;
327 gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
328 gen_ctrl_val |= pkg->pkg_type;
329
330 if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
331 /* Wait for hs ctrl and data fifos to be empty */
332 /* wait_for_hs_fifos_empty(sender); */
333 for (i = 0; i < long_pkg->len; i++)
334 REG_WRITE(hs_gen_data_reg, *(dp + i));
335 REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
336 } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
337 /* wait_for_lp_fifos_empty(sender); */
338 for (i = 0; i < long_pkg->len; i++)
339 REG_WRITE(lp_gen_data_reg, *(dp + i));
340 REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
341 } else {
342 dev_err(dev->dev, "Unknown transmission type %d\n",
343 pkg->transmission_type);
344 return -EINVAL;
345 }
346
347 return 0;
348
349}
350
351static int send_mcs_short_pkg(struct mdfld_dsi_pkg_sender *sender,
352 struct mdfld_dsi_pkg *pkg)
353{
354 return __send_short_pkg(sender, pkg);
355}
356
357static int send_mcs_long_pkg(struct mdfld_dsi_pkg_sender *sender,
358 struct mdfld_dsi_pkg *pkg)
359{
360 return __send_long_pkg(sender, pkg);
361}
362
363static int send_gen_short_pkg(struct mdfld_dsi_pkg_sender *sender,
364 struct mdfld_dsi_pkg *pkg)
365{
366 return __send_short_pkg(sender, pkg);
367}
368
369static int send_gen_long_pkg(struct mdfld_dsi_pkg_sender *sender,
370 struct mdfld_dsi_pkg *pkg)
371{
372 return __send_long_pkg(sender, pkg);
373}
374
375static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender,
376 struct mdfld_dsi_pkg *pkg)
377{
378 u8 cmd;
379 u8 *data;
380
381 switch (pkg->pkg_type) {
382 case MDFLD_DSI_PKG_DCS:
383 cmd = pkg->pkg.dcs_pkg.cmd;
384 break;
385 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
386 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
387 cmd = pkg->pkg.short_pkg.cmd;
388 break;
389 case MDFLD_DSI_PKG_MCS_LONG_WRITE:
390 data = (u8 *)pkg->pkg.long_pkg.data;
391 cmd = *data;
392 break;
393 default:
394 return 0;
395 }
396
397 /* This prevents other package sending while doing msleep */
398 sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
399
400 /* Check panel mode v.s. sending command */
401 if ((sender->panel_mode & MDFLD_DSI_PANEL_MODE_SLEEP) &&
402 cmd != exit_sleep_mode) {
403 dev_err(sender->dev->dev,
404 "sending 0x%x when panel sleep in\n", cmd);
405 sender->status = MDFLD_DSI_PKG_SENDER_FREE;
406 return -EINVAL;
407 }
408
409 /* Wait for 120 milliseconds in case exit_sleep_mode just be sent */
410 if (cmd == DCS_ENTER_SLEEP_MODE) {
411 /*TODO: replace it with msleep later*/
412 mdelay(120);
413 }
414 return 0;
415}
416
417static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender,
418 struct mdfld_dsi_pkg *pkg)
419{
420 u8 cmd;
421 u8 *data;
422
423 switch (pkg->pkg_type) {
424 case MDFLD_DSI_PKG_DCS:
425 cmd = pkg->pkg.dcs_pkg.cmd;
426 break;
427 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
428 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
429 cmd = pkg->pkg.short_pkg.cmd;
430 break;
431 case MDFLD_DSI_PKG_MCS_LONG_WRITE:
432 data = (u8 *)pkg->pkg.long_pkg.data;
433 cmd = *data;
434 break;
435 default:
436 return 0;
437 }
438
439 /* Update panel status */
440 if (cmd == DCS_ENTER_SLEEP_MODE) {
441 sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
442 /*TODO: replace it with msleep later*/
443 mdelay(120);
444 } else if (cmd == DCS_EXIT_SLEEP_MODE) {
445 sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
446 /*TODO: replace it with msleep later*/
447 mdelay(120);
448 } else if (unlikely(cmd == DCS_SOFT_RESET)) {
449 /*TODO: replace it with msleep later*/
450 mdelay(5);
451 }
452 sender->status = MDFLD_DSI_PKG_SENDER_FREE;
453 return 0;
454
455}
456
457static int do_send_pkg(struct mdfld_dsi_pkg_sender *sender,
458 struct mdfld_dsi_pkg *pkg)
459{
460 int ret;
461
462 if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
463 dev_err(sender->dev->dev, "sender is busy\n");
464 return -EAGAIN;
465 }
466
467 ret = send_pkg_prepare(sender, pkg);
468 if (ret) {
469 dev_err(sender->dev->dev, "send_pkg_prepare error\n");
470 return ret;
471 }
472
473 switch (pkg->pkg_type) {
474 case MDFLD_DSI_PKG_DCS:
475 ret = send_dcs_pkg(sender, pkg);
476 break;
477 case MDFLD_DSI_PKG_GEN_SHORT_WRITE_0:
478 case MDFLD_DSI_PKG_GEN_SHORT_WRITE_1:
479 case MDFLD_DSI_PKG_GEN_SHORT_WRITE_2:
480 case MDFLD_DSI_PKG_GEN_READ_0:
481 case MDFLD_DSI_PKG_GEN_READ_1:
482 case MDFLD_DSI_PKG_GEN_READ_2:
483 ret = send_gen_short_pkg(sender, pkg);
484 break;
485 case MDFLD_DSI_PKG_GEN_LONG_WRITE:
486 ret = send_gen_long_pkg(sender, pkg);
487 break;
488 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
489 case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
490 case MDFLD_DSI_PKG_MCS_READ:
491 ret = send_mcs_short_pkg(sender, pkg);
492 break;
493 case MDFLD_DSI_PKG_MCS_LONG_WRITE:
494 ret = send_mcs_long_pkg(sender, pkg);
495 break;
496 default:
497 dev_err(sender->dev->dev, "Invalid pkg type 0x%x\n",
498 pkg->pkg_type);
499 ret = -EINVAL;
500 }
501 send_pkg_done(sender, pkg);
502 return ret;
503}
504
505static int send_pkg(struct mdfld_dsi_pkg_sender *sender,
506 struct mdfld_dsi_pkg *pkg)
507{
508 int err ;
509
510 /* Handle DSI error */
511 err = dsi_error_handler(sender);
512 if (err) {
513 dev_err(sender->dev->dev, "Error handling failed\n");
514 err = -EAGAIN;
515 goto send_pkg_err;
516 }
517
518 /* Send pkg */
519 err = do_send_pkg(sender, pkg);
520 if (err) {
521 dev_err(sender->dev->dev, "sent pkg failed\n");
522 err = -EAGAIN;
523 goto send_pkg_err;
524 }
525
526 /* FIXME: should I query complete and fifo empty here? */
527send_pkg_err:
528 return err;
529}
530
531static struct mdfld_dsi_pkg *pkg_sender_get_pkg_locked(
532 struct mdfld_dsi_pkg_sender *sender)
533{
534 struct mdfld_dsi_pkg *pkg;
535
536 if (list_empty(&sender->free_list)) {
537 dev_err(sender->dev->dev, "No free pkg left\n");
538 return NULL;
539 }
540 pkg = list_first_entry(&sender->free_list, struct mdfld_dsi_pkg, entry);
541 /* Detach from free list */
542 list_del_init(&pkg->entry);
543 return pkg;
544}
545
546static void pkg_sender_put_pkg_locked(struct mdfld_dsi_pkg_sender *sender,
547 struct mdfld_dsi_pkg *pkg)
548{
549 memset(pkg, 0, sizeof(struct mdfld_dsi_pkg));
550 INIT_LIST_HEAD(&pkg->entry);
551 list_add_tail(&pkg->entry, &sender->free_list);
552}
553
554static int mdfld_dbi_cb_init(struct mdfld_dsi_pkg_sender *sender,
555 struct psb_gtt *pg, int pipe)
556{
557 unsigned long phys;
558 void *virt_addr = NULL;
559
560 switch (pipe) {
561 case 0:
562 /* FIXME: Doesn't this collide with stolen space ? */
563 phys = pg->gtt_phys_start - 0x1000;
564 break;
565 case 2:
566 phys = pg->gtt_phys_start - 0x800;
567 break;
568 default:
569 dev_err(sender->dev->dev, "Unsupported channel %d\n", pipe);
570 return -EINVAL;
571 }
572
573 virt_addr = ioremap_nocache(phys, 0x800);
574 if (!virt_addr) {
575 dev_err(sender->dev->dev, "Map DBI command buffer error\n");
576 return -ENOMEM;
577 }
578 sender->dbi_cb_phy = phys;
579 sender->dbi_cb_addr = virt_addr;
580 return 0;
581}
582
583static void mdfld_dbi_cb_destroy(struct mdfld_dsi_pkg_sender *sender)
584{
585 if (sender && sender->dbi_cb_addr)
586 iounmap(sender->dbi_cb_addr);
587}
588
589static void pkg_sender_queue_pkg(struct mdfld_dsi_pkg_sender *sender,
590 struct mdfld_dsi_pkg *pkg,
591 int delay)
592{
593 unsigned long flags;
594
595 spin_lock_irqsave(&sender->lock, flags);
596
597 if (!delay) {
598 send_pkg(sender, pkg);
599 pkg_sender_put_pkg_locked(sender, pkg);
600 } else {
601 /* Queue it */
602 list_add_tail(&pkg->entry, &sender->pkg_list);
603 }
604 spin_unlock_irqrestore(&sender->lock, flags);
605}
606
607static void process_pkg_list(struct mdfld_dsi_pkg_sender *sender)
608{
609 struct mdfld_dsi_pkg *pkg;
610 unsigned long flags;
611
612 spin_lock_irqsave(&sender->lock, flags);
613
614 while (!list_empty(&sender->pkg_list)) {
615 pkg = list_first_entry(&sender->pkg_list,
616 struct mdfld_dsi_pkg, entry);
617 send_pkg(sender, pkg);
618 list_del_init(&pkg->entry);
619 pkg_sender_put_pkg_locked(sender, pkg);
620 }
621
622 spin_unlock_irqrestore(&sender->lock, flags);
623}
624
625static int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender,
626 u32 *data, u32 len, u8 transmission, int delay)
627{
628 struct mdfld_dsi_pkg *pkg;
629 unsigned long flags;
630
631 spin_lock_irqsave(&sender->lock, flags);
632 pkg = pkg_sender_get_pkg_locked(sender);
633 spin_unlock_irqrestore(&sender->lock, flags);
634
635 if (!pkg) {
636 dev_err(sender->dev->dev, "No memory\n");
637 return -ENOMEM;
638 }
639 pkg->pkg_type = MDFLD_DSI_PKG_MCS_LONG_WRITE;
640 pkg->transmission_type = transmission;
641 pkg->pkg.long_pkg.data = data;
642 pkg->pkg.long_pkg.len = len;
643 INIT_LIST_HEAD(&pkg->entry);
644
645 pkg_sender_queue_pkg(sender, pkg, delay);
646 return 0;
647}
648
649static int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender,
650 u8 cmd, u8 param, u8 param_num,
651 u8 transmission,
652 int delay)
653{
654 struct mdfld_dsi_pkg *pkg;
655 unsigned long flags;
656
657 spin_lock_irqsave(&sender->lock, flags);
658 pkg = pkg_sender_get_pkg_locked(sender);
659 spin_unlock_irqrestore(&sender->lock, flags);
660
661 if (!pkg) {
662 dev_err(sender->dev->dev, "No memory\n");
663 return -ENOMEM;
664 }
665
666 if (param_num) {
667 pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_1;
668 pkg->pkg.short_pkg.param = param;
669 } else {
670 pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_0;
671 pkg->pkg.short_pkg.param = 0;
672 }
673 pkg->transmission_type = transmission;
674 pkg->pkg.short_pkg.cmd = cmd;
675 INIT_LIST_HEAD(&pkg->entry);
676
677 pkg_sender_queue_pkg(sender, pkg, delay);
678 return 0;
679}
680
681static int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender,
682 u8 param0, u8 param1, u8 param_num,
683 u8 transmission,
684 int delay)
685{
686 struct mdfld_dsi_pkg *pkg;
687 unsigned long flags;
688
689 spin_lock_irqsave(&sender->lock, flags);
690 pkg = pkg_sender_get_pkg_locked(sender);
691 spin_unlock_irqrestore(&sender->lock, flags);
692
693 if (!pkg) {
694 dev_err(sender->dev->dev, "No pkg memory\n");
695 return -ENOMEM;
696 }
697
698 switch (param_num) {
699 case 0:
700 pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_0;
701 pkg->pkg.short_pkg.cmd = 0;
702 pkg->pkg.short_pkg.param = 0;
703 break;
704 case 1:
705 pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_1;
706 pkg->pkg.short_pkg.cmd = param0;
707 pkg->pkg.short_pkg.param = 0;
708 break;
709 case 2:
710 pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_2;
711 pkg->pkg.short_pkg.cmd = param0;
712 pkg->pkg.short_pkg.param = param1;
713 break;
714 }
715
716 pkg->transmission_type = transmission;
717 INIT_LIST_HEAD(&pkg->entry);
718
719 pkg_sender_queue_pkg(sender, pkg, delay);
720 return 0;
721}
722
723static int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender,
724 u32 *data, u32 len, u8 transmission, int delay)
725{
726 struct mdfld_dsi_pkg *pkg;
727 unsigned long flags;
728
729 spin_lock_irqsave(&sender->lock, flags);
730 pkg = pkg_sender_get_pkg_locked(sender);
731 spin_unlock_irqrestore(&sender->lock, flags);
732
733 if (!pkg) {
734 dev_err(sender->dev->dev, "No pkg memory\n");
735 return -ENOMEM;
736 }
737
738 pkg->pkg_type = MDFLD_DSI_PKG_GEN_LONG_WRITE;
739 pkg->transmission_type = transmission;
740 pkg->pkg.long_pkg.data = data;
741 pkg->pkg.long_pkg.len = len;
742
743 INIT_LIST_HEAD(&pkg->entry);
744
745 pkg_sender_queue_pkg(sender, pkg, delay);
746
747 return 0;
748}
749
750static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender,
751 struct mdfld_dsi_pkg *pkg,
752 u32 *data,
753 u16 len)
754{
755 unsigned long flags;
756 struct drm_device *dev = sender->dev;
757 int i;
758 u32 gen_data_reg;
759 int retry = MDFLD_DSI_READ_MAX_COUNT;
760 u8 transmission = pkg->transmission_type;
761
762 /*
763 * do reading.
764 * 0) send out generic read request
765 * 1) polling read data avail interrupt
766 * 2) read data
767 */
768 spin_lock_irqsave(&sender->lock, flags);
769
770 REG_WRITE(sender->mipi_intr_stat_reg, 1 << 29);
771
772 if ((REG_READ(sender->mipi_intr_stat_reg) & (1 << 29)))
773 DRM_ERROR("Can NOT clean read data valid interrupt\n");
774
775 /*send out read request*/
776 send_pkg(sender, pkg);
777
778 pkg_sender_put_pkg_locked(sender, pkg);
779
780 /*polling read data avail interrupt*/
781 while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & (1 << 29))) {
782 udelay(100);
783 retry--;
784 }
785
786 if (!retry) {
787 spin_unlock_irqrestore(&sender->lock, flags);
788 return -ETIMEDOUT;
789 }
790
791 REG_WRITE(sender->mipi_intr_stat_reg, (1 << 29));
792
793 /*read data*/
794 if (transmission == MDFLD_DSI_HS_TRANSMISSION)
795 gen_data_reg = sender->mipi_hs_gen_data_reg;
796 else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
797 gen_data_reg = sender->mipi_lp_gen_data_reg;
798 else {
799 DRM_ERROR("Unknown transmission");
800 spin_unlock_irqrestore(&sender->lock, flags);
801 return -EINVAL;
802 }
803
804 for (i=0; i<len; i++)
805 *(data + i) = REG_READ(gen_data_reg);
806
807 spin_unlock_irqrestore(&sender->lock, flags);
808
809 return 0;
810}
811
812static int mdfld_dsi_read_gen(struct mdfld_dsi_pkg_sender *sender,
813 u8 param0,
814 u8 param1,
815 u8 param_num,
816 u32 *data,
817 u16 len,
818 u8 transmission)
819{
820 struct mdfld_dsi_pkg *pkg;
821 unsigned long flags;
822
823 spin_lock_irqsave(&sender->lock, flags);
824
825 pkg = pkg_sender_get_pkg_locked(sender);
826
827 spin_unlock_irqrestore(&sender->lock,flags);
828
829 if (!pkg) {
830 dev_err(sender->dev->dev, "No pkg memory\n");
831 return -ENOMEM;
832 }
833
834 switch (param_num) {
835 case 0:
836 pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_0;
837 pkg->pkg.short_pkg.cmd = 0;
838 pkg->pkg.short_pkg.param = 0;
839 break;
840 case 1:
841 pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_1;
842 pkg->pkg.short_pkg.cmd = param0;
843 pkg->pkg.short_pkg.param = 0;
844 break;
845 case 2:
846 pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_2;
847 pkg->pkg.short_pkg.cmd = param0;
848 pkg->pkg.short_pkg.param = param1;
849 break;
850 }
851
852 pkg->transmission_type = transmission;
853
854 INIT_LIST_HEAD(&pkg->entry);
855
856 return __read_panel_data(sender, pkg, data, len);
857}
858
859static int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender,
860 u8 cmd,
861 u32 *data,
862 u16 len,
863 u8 transmission)
864{
865 struct mdfld_dsi_pkg *pkg;
866 unsigned long flags;
867
868 spin_lock_irqsave(&sender->lock, flags);
869
870 pkg = pkg_sender_get_pkg_locked(sender);
871
872 spin_unlock_irqrestore(&sender->lock, flags);
873
874 if (!pkg) {
875 dev_err(sender->dev->dev, "No pkg memory\n");
876 return -ENOMEM;
877 }
878
879 pkg->pkg_type = MDFLD_DSI_PKG_MCS_READ;
880 pkg->pkg.short_pkg.cmd = cmd;
881 pkg->pkg.short_pkg.param = 0;
882
883 pkg->transmission_type = transmission;
884
885 INIT_LIST_HEAD(&pkg->entry);
886
887 return __read_panel_data(sender, pkg, data, len);
888}
889
890void dsi_controller_dbi_init(struct mdfld_dsi_config * dsi_config, int pipe)
891{
892 struct drm_device * dev = dsi_config->dev;
893 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
894 int lane_count = dsi_config->lane_count;
895 u32 val = 0;
896
897 /*un-ready device*/
898 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
899
900 /*init dsi adapter before kicking off*/
901 REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
902
903 /*TODO: figure out how to setup these registers*/
904 REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
905 REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), 0x000a0014);
906 REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
907 REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
908 REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
909
910 /*enable all interrupts*/
911 REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
912 /*max value: 20 clock cycles of txclkesc*/
913 REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
914 /*min 21 txclkesc, max: ffffh*/
915 REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
916 /*min: 7d0 max: 4e20*/
917 REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
918
919 /*set up max return packet size*/
920 REG_WRITE((MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset),
921 MDFLD_DSI_MAX_RETURN_PACKET_SIZE);
922
923 /*set up func_prg*/
924 val |= lane_count;
925 val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
926 val |= DSI_DBI_COLOR_FORMAT_OPTION2;
927 REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
928
929 REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
930 REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
931
932 REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
933 REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
934 REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
935 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
936}
937
938void dsi_controller_dpi_init(struct mdfld_dsi_config * dsi_config, int pipe)
939{
940 struct drm_device * dev = dsi_config->dev;
941 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
942 int lane_count = dsi_config->lane_count;
943 struct mdfld_dsi_dpi_timing dpi_timing;
944 struct drm_display_mode * mode = dsi_config->mode;
945 u32 val = 0;
946
947 /*un-ready device*/
948 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
949
950 /*init dsi adapter before kicking off*/
951 REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
952
953 /*enable all interrupts*/
954 REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
955
956 /*set up func_prg*/
957 val |= lane_count;
958 val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
959
960 switch(dsi_config->bpp) {
961 case 16:
962 val |= DSI_DPI_COLOR_FORMAT_RGB565;
963 break;
964 case 18:
965 val |= DSI_DPI_COLOR_FORMAT_RGB666;
966 break;
967 case 24:
968 val |= DSI_DPI_COLOR_FORMAT_RGB888;
969 break;
970 default:
971 DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
972 }
973
974 REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
975
976 REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
977 (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
978 REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
979
980 /*max value: 20 clock cycles of txclkesc*/
981 REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
982
983 /*min 21 txclkesc, max: ffffh*/
984 REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
985
986 REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
987
988 /*set DPI timing registers*/
989 mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
990
991 REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
992 REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
993 REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
994 REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
995 REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
996 REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
997 REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
998
999 REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
1000
1001 /*min: 7d0 max: 4e20*/
1002 REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
1003
1004 /*set up video mode*/
1005 val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
1006 REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
1007
1008 REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
1009
1010 REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
1011
1012 /*TODO: figure out how to setup these registers*/
1013 REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
1014
1015 REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
1016
1017 /*set device ready*/
1018 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
1019}
1020
1021static void dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
1022{
1023 if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
1024 DRM_ERROR("Invalid parameters\n");
1025 return;
1026 }
1027
1028 if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
1029 dsi_controller_dpi_init(dsi_config, pipe);
1030 else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
1031 dsi_controller_dbi_init(dsi_config, pipe);
1032 else
1033 DRM_ERROR("Bad DSI encoder type\n");
1034}
1035
1036void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender)
1037{
1038 process_pkg_list(sender);
1039}
1040
1041int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
1042 u8 dcs, u8 *param, u32 param_num, u8 data_src,
1043 int delay)
1044{
1045 struct mdfld_dsi_pkg *pkg;
1046 u32 cb_phy = sender->dbi_cb_phy;
1047 struct drm_device *dev = sender->dev;
1048 u32 index = 0;
1049 u8 *cb = (u8 *)sender->dbi_cb_addr;
1050 unsigned long flags;
1051 int retry;
1052 u8 *dst = NULL;
1053 u32 len;
1054
1055 if (!sender) {
1056 WARN_ON(1);
1057 return -EINVAL;
1058 }
1059
1060 if (!sender->dbi_pkg_support) {
1061 dev_err(dev->dev, "No DBI pkg sending on this sender\n");
1062 return -ENOTSUPP;
1063 }
1064
1065 if (param_num > MDFLD_MAX_DCS_PARAM) {
1066 dev_err(dev->dev, "Sender only supports up to %d DCS params\n",
1067 MDFLD_MAX_DCS_PARAM);
1068 return -EINVAL;
1069 }
1070
1071 /*
1072 * If dcs is write_mem_start, send it directly using DSI adapter
1073 * interface
1074 */
1075 if (dcs == DCS_WRITE_MEM_START) {
1076 if (!spin_trylock(&sender->lock))
1077 return -EAGAIN;
1078
1079 /*
1080 * query whether DBI FIFO is empty,
1081 * if not wait it becoming empty
1082 */
1083 retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
1084 while (retry &&
1085 !(REG_READ(sender->mipi_gen_fifo_stat_reg) & (1 << 27))) {
1086 udelay(500);
1087 retry--;
1088 }
1089
1090 /* If DBI FIFO timeout, drop this frame */
1091 if (!retry) {
1092 spin_unlock(&sender->lock);
1093 return 0;
1094 }
1095
1096 *(cb + (index++)) = write_mem_start;
1097
1098 REG_WRITE(sender->mipi_cmd_len_reg, 1);
1099 REG_WRITE(sender->mipi_cmd_addr_reg,
1100 cb_phy | (1 << 0) | (1 << 1));
1101
1102 retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
1103 while (retry &&
1104 (REG_READ(sender->mipi_cmd_addr_reg) & (1 << 0))) {
1105 udelay(1);
1106 retry--;
1107 }
1108
1109 spin_unlock(&sender->lock);
1110 return 0;
1111 }
1112
1113 /* Get a free pkg */
1114 spin_lock_irqsave(&sender->lock, flags);
1115 pkg = pkg_sender_get_pkg_locked(sender);
1116 spin_unlock_irqrestore(&sender->lock, flags);
1117
1118 if (!pkg) {
1119 dev_err(dev->dev, "No packages memory\n");
1120 return -ENOMEM;
1121 }
1122
1123 dst = pkg->pkg.dcs_pkg.param;
1124 memcpy(dst, param, param_num);
1125
1126 pkg->pkg_type = MDFLD_DSI_PKG_DCS;
1127 pkg->transmission_type = MDFLD_DSI_DCS;
1128 pkg->pkg.dcs_pkg.cmd = dcs;
1129 pkg->pkg.dcs_pkg.param_num = param_num;
1130 pkg->pkg.dcs_pkg.data_src = data_src;
1131
1132 INIT_LIST_HEAD(&pkg->entry);
1133
1134 if (param_num == 0)
1135 return mdfld_dsi_send_mcs_short_hs(sender, dcs, 0, 0, delay);
1136 else if (param_num == 1)
1137 return mdfld_dsi_send_mcs_short_hs(sender, dcs,
1138 param[0], 1, delay);
1139 else if (param_num > 1) {
1140 len = (param_num + 1) / 4;
1141 if ((param_num + 1) % 4)
1142 len++;
1143 return mdfld_dsi_send_mcs_long_hs(sender,
1144 (u32 *)&pkg->pkg.dcs_pkg, len, delay);
1145 }
1146 return 0;
1147}
1148
1149int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
1150 u8 cmd, u8 param, u8 param_num, int delay)
1151{
1152 if (!sender) {
1153 WARN_ON(1);
1154 return -EINVAL;
1155 }
1156 return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
1157 MDFLD_DSI_HS_TRANSMISSION, delay);
1158}
1159
1160int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
1161 u8 cmd, u8 param, u8 param_num, int delay)
1162{
1163 if (!sender) {
1164 WARN_ON(1);
1165 return -EINVAL;
1166 }
1167 return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
1168 MDFLD_DSI_LP_TRANSMISSION, delay);
1169}
1170
1171int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
1172 u32 *data,
1173 u32 len,
1174 int delay)
1175{
1176 if (!sender || !data || !len) {
1177 DRM_ERROR("Invalid parameters\n");
1178 return -EINVAL;
1179 }
1180 return mdfld_dsi_send_mcs_long(sender, data, len,
1181 MDFLD_DSI_HS_TRANSMISSION, delay);
1182}
1183
1184int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
1185 u32 *data,
1186 u32 len,
1187 int delay)
1188{
1189 if (!sender || !data || !len) {
1190 WARN_ON(1);
1191 return -EINVAL;
1192 }
1193 return mdfld_dsi_send_mcs_long(sender, data, len,
1194 MDFLD_DSI_LP_TRANSMISSION, delay);
1195}
1196
1197int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
1198 u8 param0, u8 param1, u8 param_num, int delay)
1199{
1200 if (!sender) {
1201 WARN_ON(1);
1202 return -EINVAL;
1203 }
1204 return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
1205 MDFLD_DSI_HS_TRANSMISSION, delay);
1206}
1207
1208int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
1209 u8 param0, u8 param1, u8 param_num, int delay)
1210{
1211 if (!sender || param_num < 0 || param_num > 2) {
1212 WARN_ON(1);
1213 return -EINVAL;
1214 }
1215 return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
1216 MDFLD_DSI_LP_TRANSMISSION, delay);
1217}
1218
1219int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
1220 u32 *data,
1221 u32 len,
1222 int delay)
1223{
1224 if (!sender || !data || !len) {
1225 WARN_ON(1);
1226 return -EINVAL;
1227 }
1228 return mdfld_dsi_send_gen_long(sender, data, len,
1229 MDFLD_DSI_HS_TRANSMISSION, delay);
1230}
1231
1232int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
1233 u32 *data,
1234 u32 len,
1235 int delay)
1236{
1237 if (!sender || !data || !len) {
1238 WARN_ON(1);
1239 return -EINVAL;
1240 }
1241 return mdfld_dsi_send_gen_long(sender, data, len,
1242 MDFLD_DSI_LP_TRANSMISSION, delay);
1243}
1244
1245int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
1246 u8 param0,
1247 u8 param1,
1248 u8 param_num,
1249 u32 *data,
1250 u16 len)
1251{
1252 if (!sender || !data || param_num < 0 || param_num > 2
1253 || !data || !len) {
1254 DRM_ERROR("Invalid parameters\n");
1255 return -EINVAL;
1256 }
1257
1258 return mdfld_dsi_read_gen(sender, param0, param1, param_num,
1259 data, len, MDFLD_DSI_HS_TRANSMISSION);
1260
1261}
1262
1263int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
1264 u8 param0,
1265 u8 param1,
1266 u8 param_num,
1267 u32 *data,
1268 u16 len)
1269{
1270 if (!sender || !data || param_num < 0 || param_num > 2
1271 || !data || !len) {
1272 DRM_ERROR("Invalid parameters\n");
1273 return -EINVAL;
1274 }
1275
1276 return mdfld_dsi_read_gen(sender, param0, param1, param_num,
1277 data, len, MDFLD_DSI_LP_TRANSMISSION);
1278}
1279
1280int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
1281 u8 cmd,
1282 u32 *data,
1283 u16 len)
1284{
1285 if (!sender || !data || !len) {
1286 DRM_ERROR("Invalid parameters\n");
1287 return -EINVAL;
1288 }
1289
1290 return mdfld_dsi_read_mcs(sender, cmd, data, len,
1291 MDFLD_DSI_HS_TRANSMISSION);
1292}
1293
1294int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
1295 u8 cmd,
1296 u32 *data,
1297 u16 len)
1298{
1299 if (!sender || !data || !len) {
1300 WARN_ON(1);
1301 return -EINVAL;
1302 }
1303
1304 return mdfld_dsi_read_mcs(sender, cmd, data, len,
1305 MDFLD_DSI_LP_TRANSMISSION);
1306}
1307
1308int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
1309 int pipe)
1310{
1311 int ret;
1312 struct mdfld_dsi_pkg_sender *pkg_sender;
1313 struct mdfld_dsi_config *dsi_config =
1314 mdfld_dsi_get_config(dsi_connector);
1315 struct drm_device *dev = dsi_config->dev;
1316 struct drm_psb_private *dev_priv = dev->dev_private;
1317 struct psb_gtt *pg = &dev_priv->gtt;
1318 int i;
1319 struct mdfld_dsi_pkg *pkg, *tmp;
1320 u32 mipi_val = 0;
1321
1322 if (!dsi_connector) {
1323 WARN_ON(1);
1324 return -EINVAL;
1325 }
1326
1327 pkg_sender = dsi_connector->pkg_sender;
1328
1329 if (!pkg_sender || IS_ERR(pkg_sender)) {
1330 pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
1331 GFP_KERNEL);
1332 if (!pkg_sender) {
1333 dev_err(dev->dev, "Create DSI pkg sender failed\n");
1334 return -ENOMEM;
1335 }
1336
1337 dsi_connector->pkg_sender = (void *)pkg_sender;
1338 }
1339
1340 pkg_sender->dev = dev;
1341 pkg_sender->dsi_connector = dsi_connector;
1342 pkg_sender->pipe = pipe;
1343 pkg_sender->pkg_num = 0;
1344 pkg_sender->panel_mode = 0;
1345 pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
1346
1347 /* Init dbi command buffer*/
1348
1349 if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
1350 pkg_sender->dbi_pkg_support = 1;
1351 ret = mdfld_dbi_cb_init(pkg_sender, pg, pipe);
1352 if (ret) {
1353 dev_err(dev->dev, "DBI command buffer map failed\n");
1354 goto mapping_err;
1355 }
1356 }
1357
1358 /* Init regs */
1359 if (pipe == 0) {
1360 pkg_sender->dpll_reg = MRST_DPLL_A;
1361 pkg_sender->dspcntr_reg = DSPACNTR;
1362 pkg_sender->pipeconf_reg = PIPEACONF;
1363 pkg_sender->dsplinoff_reg = DSPALINOFF;
1364 pkg_sender->dspsurf_reg = DSPASURF;
1365 pkg_sender->pipestat_reg = PIPEASTAT;
1366
1367 pkg_sender->mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
1368 pkg_sender->mipi_lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG;
1369 pkg_sender->mipi_hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG;
1370 pkg_sender->mipi_lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG;
1371 pkg_sender->mipi_hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
1372 pkg_sender->mipi_gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
1373 pkg_sender->mipi_data_addr_reg = MIPIA_DATA_ADD_REG;
1374 pkg_sender->mipi_data_len_reg = MIPIA_DATA_LEN_REG;
1375 pkg_sender->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG;
1376 pkg_sender->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG;
1377 } else if (pipe == 2) {
1378 pkg_sender->dpll_reg = MRST_DPLL_A;
1379 pkg_sender->dspcntr_reg = DSPCCNTR;
1380 pkg_sender->pipeconf_reg = PIPECCONF;
1381 pkg_sender->dsplinoff_reg = DSPCLINOFF;
1382 pkg_sender->dspsurf_reg = DSPCSURF;
1383 pkg_sender->pipestat_reg = PIPECSTAT;
1384
1385 pkg_sender->mipi_intr_stat_reg =
1386 MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
1387 pkg_sender->mipi_lp_gen_data_reg =
1388 MIPIA_LP_GEN_DATA_REG + MIPIC_REG_OFFSET;
1389 pkg_sender->mipi_hs_gen_data_reg =
1390 MIPIA_HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
1391 pkg_sender->mipi_lp_gen_ctrl_reg =
1392 MIPIA_LP_GEN_CTRL_REG + MIPIC_REG_OFFSET;
1393 pkg_sender->mipi_hs_gen_ctrl_reg =
1394 MIPIA_HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
1395 pkg_sender->mipi_gen_fifo_stat_reg =
1396 MIPIA_GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
1397 pkg_sender->mipi_data_addr_reg =
1398 MIPIA_DATA_ADD_REG + MIPIC_REG_OFFSET;
1399 pkg_sender->mipi_data_len_reg =
1400 MIPIA_DATA_LEN_REG + MIPIC_REG_OFFSET;
1401 pkg_sender->mipi_cmd_addr_reg =
1402 MIPIA_CMD_ADD_REG + MIPIC_REG_OFFSET;
1403 pkg_sender->mipi_cmd_len_reg =
1404 MIPIA_CMD_LEN_REG + MIPIC_REG_OFFSET;
1405 }
1406
1407 /* Init pkg list */
1408 INIT_LIST_HEAD(&pkg_sender->pkg_list);
1409 INIT_LIST_HEAD(&pkg_sender->free_list);
1410
1411 spin_lock_init(&pkg_sender->lock);
1412
1413 /* Allocate free pkg pool */
1414 for (i = 0; i < MDFLD_MAX_PKG_NUM; i++) {
1415 pkg = kzalloc(sizeof(struct mdfld_dsi_pkg), GFP_KERNEL);
1416 if (!pkg) {
1417 dev_err(dev->dev, "Out of memory allocating pkg pool");
1418 ret = -ENOMEM;
1419 goto pkg_alloc_err;
1420 }
1421 INIT_LIST_HEAD(&pkg->entry);
1422 list_add_tail(&pkg->entry, &pkg_sender->free_list);
1423 }
1424
1425 /*
1426 * For video mode, don't enable DPI timing output here,
1427 * will init the DPI timing output during mode setting.
1428 */
1429 if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
1430 mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
1431 else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
1432 mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX
1433 | TE_TRIGGER_GPIO_PIN;
1434 else
1435 DRM_ERROR("Bad DSI encoder type\n");
1436
1437 if (pipe == 0) {
1438 mipi_val |= 0x2;
1439 REG_WRITE(MIPI, mipi_val);
1440 REG_READ(MIPI);
1441 } else if (pipe == 2) {
1442 REG_WRITE(MIPI_C, mipi_val);
1443 REG_READ(MIPI_C);
1444 }
1445
1446 /*do dsi controller init*/
1447 dsi_controller_init(dsi_config, pipe);
1448
1449 return 0;
1450
1451pkg_alloc_err:
1452 list_for_each_entry_safe(pkg, tmp, &pkg_sender->free_list, entry) {
1453 list_del(&pkg->entry);
1454 kfree(pkg);
1455 }
1456
1457 /* Free mapped command buffer */
1458 mdfld_dbi_cb_destroy(pkg_sender);
1459mapping_err:
1460 kfree(pkg_sender);
1461 dsi_connector->pkg_sender = NULL;
1462 return ret;
1463}
1464
1465void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
1466{
1467 struct mdfld_dsi_pkg *pkg, *tmp;
1468
1469 if (!sender || IS_ERR(sender))
1470 return;
1471
1472 /* Free pkg pool */
1473 list_for_each_entry_safe(pkg, tmp, &sender->free_list, entry) {
1474 list_del(&pkg->entry);
1475 kfree(pkg);
1476 }
1477 /* Free pkg list */
1478 list_for_each_entry_safe(pkg, tmp, &sender->pkg_list, entry) {
1479 list_del(&pkg->entry);
1480 kfree(pkg);
1481 }
1482 mdfld_dbi_cb_destroy(sender); /* free mapped command buffer */
1483 kfree(sender);
1484}
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h b/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
deleted file mode 100644
index f24abc700684..000000000000
--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jackie Li<yaodong.li@intel.com>
25 */
26#ifndef __MDFLD_DSI_PKG_SENDER_H__
27#define __MDFLD_DSI_PKG_SENDER_H__
28
29#include <linux/kthread.h>
30
31#define MDFLD_MAX_DCS_PARAM 8
32#define MDFLD_MAX_PKG_NUM 2048
33
34enum {
35 MDFLD_DSI_PKG_DCS,
36 MDFLD_DSI_PKG_GEN_SHORT_WRITE_0 = 0x03,
37 MDFLD_DSI_PKG_GEN_SHORT_WRITE_1 = 0x13,
38 MDFLD_DSI_PKG_GEN_SHORT_WRITE_2 = 0x23,
39 MDFLD_DSI_PKG_GEN_READ_0 = 0x04,
40 MDFLD_DSI_PKG_GEN_READ_1 = 0x14,
41 MDFLD_DSI_PKG_GEN_READ_2 = 0x24,
42 MDFLD_DSI_PKG_GEN_LONG_WRITE = 0x29,
43 MDFLD_DSI_PKG_MCS_SHORT_WRITE_0 = 0x05,
44 MDFLD_DSI_PKG_MCS_SHORT_WRITE_1 = 0x15,
45 MDFLD_DSI_PKG_MCS_READ = 0x06,
46 MDFLD_DSI_PKG_MCS_LONG_WRITE = 0x39,
47};
48
49enum {
50 MDFLD_DSI_LP_TRANSMISSION,
51 MDFLD_DSI_HS_TRANSMISSION,
52 MDFLD_DSI_DCS,
53};
54
55enum {
56 MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
57};
58
59enum {
60 MDFLD_DSI_PKG_SENDER_FREE = 0x0,
61 MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
62};
63
64enum {
65 MDFLD_DSI_SEND_PACKAGE,
66 MDFLD_DSI_QUEUE_PACKAGE,
67};
68
69struct mdfld_dsi_gen_short_pkg {
70 u8 cmd;
71 u8 param;
72};
73
74struct mdfld_dsi_gen_long_pkg {
75 u32 *data;
76 u32 len;
77};
78
79struct mdfld_dsi_dcs_pkg {
80 u8 cmd;
81 u8 param[MDFLD_MAX_DCS_PARAM];
82 u32 param_num;
83 u8 data_src;
84};
85
86struct mdfld_dsi_pkg {
87 u8 pkg_type;
88 u8 transmission_type;
89
90 union {
91 struct mdfld_dsi_gen_short_pkg short_pkg;
92 struct mdfld_dsi_gen_long_pkg long_pkg;
93 struct mdfld_dsi_dcs_pkg dcs_pkg;
94 } pkg;
95
96 struct list_head entry;
97};
98
99struct mdfld_dsi_pkg_sender {
100 struct drm_device *dev;
101 struct mdfld_dsi_connector *dsi_connector;
102 u32 status;
103
104 u32 panel_mode;
105
106 int pipe;
107
108 spinlock_t lock;
109 struct list_head pkg_list;
110 struct list_head free_list;
111
112 u32 pkg_num;
113
114 int dbi_pkg_support;
115
116 u32 dbi_cb_phy;
117 void *dbi_cb_addr;
118
119 /* Registers */
120 u32 dpll_reg;
121 u32 dspcntr_reg;
122 u32 pipeconf_reg;
123 u32 pipestat_reg;
124 u32 dsplinoff_reg;
125 u32 dspsurf_reg;
126
127 u32 mipi_intr_stat_reg;
128 u32 mipi_lp_gen_data_reg;
129 u32 mipi_hs_gen_data_reg;
130 u32 mipi_lp_gen_ctrl_reg;
131 u32 mipi_hs_gen_ctrl_reg;
132 u32 mipi_gen_fifo_stat_reg;
133 u32 mipi_data_addr_reg;
134 u32 mipi_data_len_reg;
135 u32 mipi_cmd_addr_reg;
136 u32 mipi_cmd_len_reg;
137};
138
139/* DCS definitions */
140#define DCS_SOFT_RESET 0x01
141#define DCS_ENTER_SLEEP_MODE 0x10
142#define DCS_EXIT_SLEEP_MODE 0x11
143#define DCS_SET_DISPLAY_OFF 0x28
144#define DCS_SET_DISPLAY_ON 0x29
145#define DCS_SET_COLUMN_ADDRESS 0x2a
146#define DCS_SET_PAGE_ADDRESS 0x2b
147#define DCS_WRITE_MEM_START 0x2c
148#define DCS_SET_TEAR_OFF 0x34
149#define DCS_SET_TEAR_ON 0x35
150
151extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
152 int pipe);
153extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
154extern int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender, u8 dcs,
155 u8 *param, u32 param_num, u8 data_src, int delay);
156extern int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
157 u8 cmd, u8 param, u8 param_num, int delay);
158extern int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
159 u8 cmd, u8 param, u8 param_num, int delay);
160extern int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
161 u32 *data, u32 len, int delay);
162extern int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
163 u32 *data, u32 len, int delay);
164extern int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
165 u8 param0, u8 param1, u8 param_num, int delay);
166extern int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
167 u8 param0, u8 param1, u8 param_num, int delay);
168extern int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
169 u32 *data, u32 len, int delay);
170extern int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
171 u32 *data, u32 len, int delay);
172
173extern int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
174 u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
175extern int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
176 u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
177extern int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
178 u8 cmd, u32 *data, u16 len);
179extern int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
180 u8 cmd, u32 *data, u16 len);
181
182extern void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender);
183
184#endif /* __MDFLD_DSI_PKG_SENDER_H__ */
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
deleted file mode 100644
index 0b37b7b6b02a..000000000000
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ /dev/null
@@ -1,1404 +0,0 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include "framebuffer.h"
28#include "psb_intel_display.h"
29#include "mdfld_dsi_dbi.h"
30#include "mdfld_dsi_dpi.h"
31#include "mdfld_dsi_dbi_dpu.h"
32
33#include <linux/pm_runtime.h>
34
35#ifdef MIN
36#undef MIN
37#endif
38
39#define MIN(x, y) (((x) < (y)) ? (x) : (y))
40
41/* Hardcoded currently */
42static int ksel = KSEL_CRYSTAL_19;
43
44extern void mdfld_save_display(struct drm_device *dev);
45extern bool gbgfxsuspended;
46
47struct psb_intel_range_t {
48 int min, max;
49};
50
51struct mdfld_limit_t {
52 struct psb_intel_range_t dot, m, p1;
53};
54
55struct mdfld_intel_clock_t {
56 /* given values */
57 int n;
58 int m1, m2;
59 int p1, p2;
60 /* derived values */
61 int dot;
62 int vco;
63 int m;
64 int p;
65};
66
67
68
69#define COUNT_MAX 0x10000000
70
71void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
72{
73 int count, temp;
74 u32 pipeconf_reg = PIPEACONF;
75
76 switch (pipe) {
77 case 0:
78 break;
79 case 1:
80 pipeconf_reg = PIPEBCONF;
81 break;
82 case 2:
83 pipeconf_reg = PIPECCONF;
84 break;
85 default:
86 DRM_ERROR("Illegal Pipe Number. \n");
87 return;
88 }
89
90 /* FIXME JLIU7_PO */
91 psb_intel_wait_for_vblank(dev);
92 return;
93
94 /* Wait for for the pipe disable to take effect. */
95 for (count = 0; count < COUNT_MAX; count++) {
96 temp = REG_READ(pipeconf_reg);
97 if ((temp & PIPEACONF_PIPE_STATE) == 0)
98 break;
99 }
100}
101
102void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
103{
104 int count, temp;
105 u32 pipeconf_reg = PIPEACONF;
106
107 switch (pipe) {
108 case 0:
109 break;
110 case 1:
111 pipeconf_reg = PIPEBCONF;
112 break;
113 case 2:
114 pipeconf_reg = PIPECCONF;
115 break;
116 default:
117 dev_err(dev->dev, "Illegal Pipe Number.\n");
118 return;
119 }
120
121 /* FIXME JLIU7_PO */
122 psb_intel_wait_for_vblank(dev);
123 return;
124
125 /* Wait for for the pipe enable to take effect. */
126 for (count = 0; count < COUNT_MAX; count++) {
127 temp = REG_READ(pipeconf_reg);
128 if ((temp & PIPEACONF_PIPE_STATE) == 1)
129 break;
130 }
131}
132
133
134static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
135 struct drm_file *file_priv,
136 uint32_t handle,
137 uint32_t width, uint32_t height)
138{
139 struct drm_device *dev = crtc->dev;
140 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
141 int pipe = psb_intel_crtc->pipe;
142 uint32_t control = CURACNTR;
143 uint32_t base = CURABASE;
144 uint32_t temp;
145 size_t addr = 0;
146 struct gtt_range *gt;
147 struct drm_gem_object *obj;
148 int ret;
149
150 switch (pipe) {
151 case 0:
152 break;
153 case 1:
154 control = CURBCNTR;
155 base = CURBBASE;
156 break;
157 case 2:
158 control = CURCCNTR;
159 base = CURCBASE;
160 break;
161 default:
162 dev_err(dev->dev, "Illegal Pipe Number. \n");
163 return -EINVAL;
164 }
165
166#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
167 if (pipe != 0)
168 return 0;
169#endif
170 /* if we want to turn of the cursor ignore width and height */
171 if (!handle) {
172 dev_dbg(dev->dev, "cursor off\n");
173 /* turn off the cursor */
174 temp = 0;
175 temp |= CURSOR_MODE_DISABLE;
176
177 if (gma_power_begin(dev, true)) {
178 REG_WRITE(control, temp);
179 REG_WRITE(base, 0);
180 gma_power_end(dev);
181 }
182 /* Unpin the old GEM object */
183 if (psb_intel_crtc->cursor_obj) {
184 gt = container_of(psb_intel_crtc->cursor_obj,
185 struct gtt_range, gem);
186 psb_gtt_unpin(gt);
187 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
188 psb_intel_crtc->cursor_obj = NULL;
189 }
190 return 0;
191 }
192
193 /* Currently we only support 64x64 cursors */
194 if (width != 64 || height != 64) {
195 DRM_ERROR("we currently only support 64x64 cursors\n");
196 return -EINVAL;
197 }
198
199 obj = drm_gem_object_lookup(dev, file_priv, handle);
200 if (!obj)
201 return -ENOENT;
202
203 if (obj->size < width * height * 4) {
204 dev_dbg(dev->dev, "buffer is to small\n");
205 return -ENOMEM;
206 }
207
208 gt = container_of(obj, struct gtt_range, gem);
209
210 /* Pin the memory into the GTT */
211 ret = psb_gtt_pin(gt);
212 if (ret) {
213 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
214 return ret;
215 }
216
217
218 addr = gt->offset; /* Or resource.start ??? */
219
220 psb_intel_crtc->cursor_addr = addr;
221
222 temp = 0;
223 /* set the pipe for the cursor */
224 temp |= (pipe << 28);
225 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
226
227 if (gma_power_begin(dev, true)) {
228 REG_WRITE(control, temp);
229 REG_WRITE(base, addr);
230 gma_power_end(dev);
231 }
232 /* unpin the old GEM object */
233 if (psb_intel_crtc->cursor_obj) {
234 gt = container_of(psb_intel_crtc->cursor_obj,
235 struct gtt_range, gem);
236 psb_gtt_unpin(gt);
237 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
238 psb_intel_crtc->cursor_obj = obj;
239 }
240 return 0;
241}
242
243static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
244{
245 struct drm_device *dev = crtc->dev;
246 struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
247 struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
248 struct psb_drm_dpu_rect rect;
249 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
250 int pipe = psb_intel_crtc->pipe;
251 uint32_t pos = CURAPOS;
252 uint32_t base = CURABASE;
253 uint32_t temp = 0;
254 uint32_t addr;
255
256 switch (pipe) {
257 case 0:
258 if (dpu_info) {
259 rect.x = x;
260 rect.y = y;
261
262 mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
263 mdfld_dpu_exit_dsr(dev);
264 } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
265 mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_0);
266 break;
267 case 1:
268 pos = CURBPOS;
269 base = CURBBASE;
270 break;
271 case 2:
272 if (dpu_info) {
273 mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
274 mdfld_dpu_exit_dsr(dev);
275 } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
276 mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_2);
277 pos = CURCPOS;
278 base = CURCBASE;
279 break;
280 default:
281 DRM_ERROR("Illegal Pipe Number. \n");
282 return -EINVAL;
283 }
284
285#if 1 /* FIXME_JLIU7 can't enable cursorB/C HW issue. need to remove after HW fix */
286 if (pipe != 0)
287 return 0;
288#endif
289 if (x < 0) {
290 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
291 x = -x;
292 }
293 if (y < 0) {
294 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
295 y = -y;
296 }
297
298 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
299 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
300
301 addr = psb_intel_crtc->cursor_addr;
302
303 if (gma_power_begin(dev, true)) {
304 REG_WRITE(pos, temp);
305 REG_WRITE(base, addr);
306 gma_power_end(dev);
307 }
308
309 return 0;
310}
311
312const struct drm_crtc_funcs mdfld_intel_crtc_funcs = {
313 .cursor_set = mdfld_intel_crtc_cursor_set,
314 .cursor_move = mdfld_intel_crtc_cursor_move,
315 .gamma_set = psb_intel_crtc_gamma_set,
316 .set_config = drm_crtc_helper_set_config,
317 .destroy = psb_intel_crtc_destroy,
318};
319
320static struct drm_device globle_dev;
321
322void mdfld__intel_plane_set_alpha(int enable)
323{
324 struct drm_device *dev = &globle_dev;
325 int dspcntr_reg = DSPACNTR;
326 u32 dspcntr;
327
328 dspcntr = REG_READ(dspcntr_reg);
329
330 if (enable) {
331 dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
332 dspcntr |= DISPPLANE_32BPP;
333 } else {
334 dspcntr &= ~DISPPLANE_32BPP;
335 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
336 }
337
338 REG_WRITE(dspcntr_reg, dspcntr);
339}
340
341int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
342{
343 struct drm_device *dev = crtc->dev;
344 /* struct drm_i915_master_private *master_priv; */
345 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
346 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
347 int pipe = psb_intel_crtc->pipe;
348 unsigned long start, offset;
349 int dsplinoff = DSPALINOFF;
350 int dspsurf = DSPASURF;
351 int dspstride = DSPASTRIDE;
352 int dspcntr_reg = DSPACNTR;
353 u32 dspcntr;
354 int ret = 0;
355
356 memcpy(&globle_dev, dev, sizeof(struct drm_device));
357
358 if (!gma_power_begin(dev, true))
359 return 0;
360
361 /* no fb bound */
362 if (!crtc->fb) {
363 dev_err(dev->dev, "No FB bound\n");
364 goto psb_intel_pipe_cleaner;
365 }
366
367 switch (pipe) {
368 case 0:
369 dsplinoff = DSPALINOFF;
370 break;
371 case 1:
372 dsplinoff = DSPBLINOFF;
373 dspsurf = DSPBSURF;
374 dspstride = DSPBSTRIDE;
375 dspcntr_reg = DSPBCNTR;
376 break;
377 case 2:
378 dsplinoff = DSPCLINOFF;
379 dspsurf = DSPCSURF;
380 dspstride = DSPCSTRIDE;
381 dspcntr_reg = DSPCCNTR;
382 break;
383 default:
384 dev_err(dev->dev, "Illegal Pipe Number.\n");
385 return -EINVAL;
386 }
387
388 ret = psb_gtt_pin(psbfb->gtt);
389 if (ret < 0)
390 goto psb_intel_pipe_set_base_exit;
391
392 start = psbfb->gtt->offset;
393 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
394
395 REG_WRITE(dspstride, crtc->fb->pitches[0]);
396 dspcntr = REG_READ(dspcntr_reg);
397 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
398
399 switch (crtc->fb->bits_per_pixel) {
400 case 8:
401 dspcntr |= DISPPLANE_8BPP;
402 break;
403 case 16:
404 if (crtc->fb->depth == 15)
405 dspcntr |= DISPPLANE_15_16BPP;
406 else
407 dspcntr |= DISPPLANE_16BPP;
408 break;
409 case 24:
410 case 32:
411 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
412 break;
413 default:
414 dev_err(dev->dev, "Unknown color depth\n");
415 ret = -EINVAL;
416 goto psb_intel_pipe_set_base_exit;
417 }
418 REG_WRITE(dspcntr_reg, dspcntr);
419
420 dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
421 start, offset, x, y);
422
423 REG_WRITE(dsplinoff, offset);
424 REG_READ(dsplinoff);
425 REG_WRITE(dspsurf, start);
426 REG_READ(dspsurf);
427
428psb_intel_pipe_cleaner:
429 /* If there was a previous display we can now unpin it */
430 if (old_fb)
431 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
432
433psb_intel_pipe_set_base_exit:
434 gma_power_end(dev);
435 return ret;
436}
437
438/**
439 * Disable the pipe, plane and pll.
440 *
441 */
442void mdfld_disable_crtc (struct drm_device *dev, int pipe)
443{
444 int dpll_reg = MRST_DPLL_A;
445 int dspcntr_reg = DSPACNTR;
446 int dspbase_reg = MRST_DSPABASE;
447 int pipeconf_reg = PIPEACONF;
448 u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
449 u32 temp;
450
451 switch (pipe) {
452 case 0:
453 break;
454 case 1:
455 dpll_reg = MDFLD_DPLL_B;
456 dspcntr_reg = DSPBCNTR;
457 dspbase_reg = DSPBSURF;
458 pipeconf_reg = PIPEBCONF;
459 break;
460 case 2:
461 dpll_reg = MRST_DPLL_A;
462 dspcntr_reg = DSPCCNTR;
463 dspbase_reg = MDFLD_DSPCBASE;
464 pipeconf_reg = PIPECCONF;
465 gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
466 break;
467 default:
468 dev_err(dev->dev, "Illegal Pipe Number. \n");
469 return;
470 }
471
472 if (pipe != 1)
473 mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
474
475 /* Disable display plane */
476 temp = REG_READ(dspcntr_reg);
477 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
478 REG_WRITE(dspcntr_reg,
479 temp & ~DISPLAY_PLANE_ENABLE);
480 /* Flush the plane changes */
481 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
482 REG_READ(dspbase_reg);
483 }
484
485 /* FIXME_JLIU7 MDFLD_PO revisit */
486 /* Wait for vblank for the disable to take effect */
487/* MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev); */
488
489 /* Next, disable display pipes */
490 temp = REG_READ(pipeconf_reg);
491 if ((temp & PIPEACONF_ENABLE) != 0) {
492 temp &= ~PIPEACONF_ENABLE;
493 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
494 REG_WRITE(pipeconf_reg, temp);
495 REG_READ(pipeconf_reg);
496
497 /* Wait for for the pipe disable to take effect. */
498 mdfldWaitForPipeDisable(dev, pipe);
499 }
500
501 temp = REG_READ(dpll_reg);
502 if (temp & DPLL_VCO_ENABLE) {
503 if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
504 || (pipe == 1)){
505 temp &= ~(DPLL_VCO_ENABLE);
506 REG_WRITE(dpll_reg, temp);
507 REG_READ(dpll_reg);
508 /* Wait for the clocks to turn off. */
509 /* FIXME_MDFLD PO may need more delay */
510 udelay(500);
511
512 if (!(temp & MDFLD_PWR_GATE_EN)) {
513 /* gating power of DPLL */
514 REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
515 /* FIXME_MDFLD PO - change 500 to 1 after PO */
516 udelay(5000);
517 }
518 }
519 }
520
521}
522
523/**
524 * Sets the power management mode of the pipe and plane.
525 *
526 * This code should probably grow support for turning the cursor off and back
527 * on appropriately at the same time as we're turning the pipe off/on.
528 */
529static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
530{
531 struct drm_device *dev = crtc->dev;
532 struct drm_psb_private *dev_priv = dev->dev_private;
533 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
534 int pipe = psb_intel_crtc->pipe;
535 int dpll_reg = MRST_DPLL_A;
536 int dspcntr_reg = DSPACNTR;
537 int dspbase_reg = MRST_DSPABASE;
538 int pipeconf_reg = PIPEACONF;
539 u32 pipestat_reg = PIPEASTAT;
540 u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
541 u32 pipeconf = dev_priv->pipeconf;
542 u32 dspcntr = dev_priv->dspcntr;
543 u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
544 u32 temp;
545 bool enabled;
546 int timeout = 0;
547
548 if (!gma_power_begin(dev, true))
549 return;
550
551 /* Ignore if system is already in DSR and in suspended state. */
552 if(/*gbgfxsuspended */0 && dev_priv->dispstatus == false && mode == 3){
553 if(dev_priv->rpm_enabled && pipe == 1){
554 // dev_priv->is_mipi_on = false;
555 pm_request_idle(&dev->pdev->dev);
556 }
557 return;
558 }else if(mode == 0) {
559 //do not need to set gbdispstatus=true in crtc.
560 //this will be set in encoder such as mdfld_dsi_dbi_dpms
561 //gbdispstatus = true;
562 }
563
564/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
565/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
566
567 switch (pipe) {
568 case 0:
569 break;
570 case 1:
571 dpll_reg = DPLL_B;
572 dspcntr_reg = DSPBCNTR;
573 dspbase_reg = MRST_DSPBBASE;
574 pipeconf_reg = PIPEBCONF;
575 pipeconf = dev_priv->pipeconf1;
576 dspcntr = dev_priv->dspcntr1;
577 dpll_reg = MDFLD_DPLL_B;
578 break;
579 case 2:
580 dpll_reg = MRST_DPLL_A;
581 dspcntr_reg = DSPCCNTR;
582 dspbase_reg = MDFLD_DSPCBASE;
583 pipeconf_reg = PIPECCONF;
584 pipestat_reg = PIPECSTAT;
585 pipeconf = dev_priv->pipeconf2;
586 dspcntr = dev_priv->dspcntr2;
587 gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
588 mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
589 break;
590 default:
591 dev_err(dev->dev, "Illegal Pipe Number.\n");
592 return;
593 }
594
595 /* XXX: When our outputs are all unaware of DPMS modes other than off
596 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
597 */
598 switch (mode) {
599 case DRM_MODE_DPMS_ON:
600 case DRM_MODE_DPMS_STANDBY:
601 case DRM_MODE_DPMS_SUSPEND:
602 /* Enable the DPLL */
603 temp = REG_READ(dpll_reg);
604
605 if ((temp & DPLL_VCO_ENABLE) == 0) {
606 /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
607 if (temp & MDFLD_PWR_GATE_EN) {
608 temp &= ~MDFLD_PWR_GATE_EN;
609 REG_WRITE(dpll_reg, temp);
610 /* FIXME_MDFLD PO - change 500 to 1 after PO */
611 udelay(500);
612 }
613
614 REG_WRITE(dpll_reg, temp);
615 REG_READ(dpll_reg);
616 /* FIXME_MDFLD PO - change 500 to 1 after PO */
617 udelay(500);
618
619 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
620 REG_READ(dpll_reg);
621
622 /**
623 * wait for DSI PLL to lock
624 * NOTE: only need to poll status of pipe 0 and pipe 1,
625 * since both MIPI pipes share the same PLL.
626 */
627 while ((pipe != 2) && (timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
628 udelay(150);
629 timeout ++;
630 }
631 }
632
633 /* Enable the plane */
634 temp = REG_READ(dspcntr_reg);
635 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
636 REG_WRITE(dspcntr_reg,
637 temp | DISPLAY_PLANE_ENABLE);
638 /* Flush the plane changes */
639 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
640 }
641
642 /* Enable the pipe */
643 temp = REG_READ(pipeconf_reg);
644 if ((temp & PIPEACONF_ENABLE) == 0) {
645 REG_WRITE(pipeconf_reg, pipeconf);
646
647 /* Wait for for the pipe enable to take effect. */
648 mdfldWaitForPipeEnable(dev, pipe);
649 }
650
651 /*workaround for sighting 3741701 Random X blank display*/
652 /*perform w/a in video mode only on pipe A or C*/
653 if ((pipe == 0 || pipe == 2) &&
654 (mdfld_panel_dpi(dev) == true)) {
655 REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
656 msleep(100);
657 if(PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
658 printk(KERN_ALERT "OK");
659 } else {
660 printk(KERN_ALERT "STUCK!!!!");
661 /*shutdown controller*/
662 temp = REG_READ(dspcntr_reg);
663 REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
664 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
665 /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
666 REG_WRITE(0xb048, 1);
667 msleep(100);
668 temp = REG_READ(pipeconf_reg);
669 temp &= ~PIPEACONF_ENABLE;
670 REG_WRITE(pipeconf_reg, temp);
671 msleep(100); /*wait for pipe disable*/
672 /*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
673 printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));*/
674 REG_WRITE(mipi_enable_reg, 0);
675 msleep(100);
676 printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
677 printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));
678 REG_WRITE(0xb004, REG_READ(0xb004));
679 /* try to bring the controller back up again*/
680 REG_WRITE(mipi_enable_reg, 1);
681 temp = REG_READ(dspcntr_reg);
682 REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
683 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
684 /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
685 REG_WRITE(0xb048, 2);
686 msleep(100);
687 temp = REG_READ(pipeconf_reg);
688 temp |= PIPEACONF_ENABLE;
689 REG_WRITE(pipeconf_reg, temp);
690 }
691 }
692
693 psb_intel_crtc_load_lut(crtc);
694
695 /* Give the overlay scaler a chance to enable
696 if it's on this pipe */
697 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
698
699 break;
700 case DRM_MODE_DPMS_OFF:
701 /* Give the overlay scaler a chance to disable
702 * if it's on this pipe */
703 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
704 if (pipe != 1)
705 mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
706
707 /* Disable the VGA plane that we never use */
708 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
709
710 /* Disable display plane */
711 temp = REG_READ(dspcntr_reg);
712 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
713 REG_WRITE(dspcntr_reg,
714 temp & ~DISPLAY_PLANE_ENABLE);
715 /* Flush the plane changes */
716 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
717 REG_READ(dspbase_reg);
718 }
719
720 /* FIXME_JLIU7 MDFLD_PO revisit */
721 /* Wait for vblank for the disable to take effect */
722// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
723
724 /* Next, disable display pipes */
725 temp = REG_READ(pipeconf_reg);
726 if ((temp & PIPEACONF_ENABLE) != 0) {
727 temp &= ~PIPEACONF_ENABLE;
728 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
729 REG_WRITE(pipeconf_reg, temp);
730// REG_WRITE(pipeconf_reg, 0);
731 REG_READ(pipeconf_reg);
732
733 /* Wait for for the pipe disable to take effect. */
734 mdfldWaitForPipeDisable(dev, pipe);
735 }
736
737 temp = REG_READ(dpll_reg);
738 if (temp & DPLL_VCO_ENABLE) {
739 if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
740 || (pipe == 1)){
741 temp &= ~(DPLL_VCO_ENABLE);
742 REG_WRITE(dpll_reg, temp);
743 REG_READ(dpll_reg);
744 /* Wait for the clocks to turn off. */
745 /* FIXME_MDFLD PO may need more delay */
746 udelay(500);
747#if 0 /* MDFLD_PO_JLIU7 */
748 if (!(temp & MDFLD_PWR_GATE_EN)) {
749 /* gating power of DPLL */
750 REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
751 /* FIXME_MDFLD PO - change 500 to 1 after PO */
752 udelay(5000);
753 }
754#endif /* MDFLD_PO_JLIU7 */
755 }
756 }
757 break;
758 }
759
760 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
761
762#if 0 /* JB: Add vblank support later */
763 if (enabled)
764 dev_priv->vblank_pipe |= (1 << pipe);
765 else
766 dev_priv->vblank_pipe &= ~(1 << pipe);
767#endif
768
769 gma_power_end(dev);
770}
771
772
773#define MDFLD_LIMT_DPLL_19 0
774#define MDFLD_LIMT_DPLL_25 1
775#define MDFLD_LIMT_DPLL_83 2
776#define MDFLD_LIMT_DPLL_100 3
777#define MDFLD_LIMT_DSIPLL_19 4
778#define MDFLD_LIMT_DSIPLL_25 5
779#define MDFLD_LIMT_DSIPLL_83 6
780#define MDFLD_LIMT_DSIPLL_100 7
781
782#define MDFLD_DOT_MIN 19750 /* FIXME_MDFLD JLIU7 need to find out min & max for MDFLD */
783#define MDFLD_DOT_MAX 120000
784#define MDFLD_DPLL_M_MIN_19 113
785#define MDFLD_DPLL_M_MAX_19 155
786#define MDFLD_DPLL_P1_MIN_19 2
787#define MDFLD_DPLL_P1_MAX_19 10
788#define MDFLD_DPLL_M_MIN_25 101
789#define MDFLD_DPLL_M_MAX_25 130
790#define MDFLD_DPLL_P1_MIN_25 2
791#define MDFLD_DPLL_P1_MAX_25 10
792#define MDFLD_DPLL_M_MIN_83 64
793#define MDFLD_DPLL_M_MAX_83 64
794#define MDFLD_DPLL_P1_MIN_83 2
795#define MDFLD_DPLL_P1_MAX_83 2
796#define MDFLD_DPLL_M_MIN_100 64
797#define MDFLD_DPLL_M_MAX_100 64
798#define MDFLD_DPLL_P1_MIN_100 2
799#define MDFLD_DPLL_P1_MAX_100 2
800#define MDFLD_DSIPLL_M_MIN_19 131
801#define MDFLD_DSIPLL_M_MAX_19 175
802#define MDFLD_DSIPLL_P1_MIN_19 3
803#define MDFLD_DSIPLL_P1_MAX_19 8
804#define MDFLD_DSIPLL_M_MIN_25 97
805#define MDFLD_DSIPLL_M_MAX_25 140
806#define MDFLD_DSIPLL_P1_MIN_25 3
807#define MDFLD_DSIPLL_P1_MAX_25 9
808#define MDFLD_DSIPLL_M_MIN_83 33
809#define MDFLD_DSIPLL_M_MAX_83 92
810#define MDFLD_DSIPLL_P1_MIN_83 2
811#define MDFLD_DSIPLL_P1_MAX_83 3
812#define MDFLD_DSIPLL_M_MIN_100 97
813#define MDFLD_DSIPLL_M_MAX_100 140
814#define MDFLD_DSIPLL_P1_MIN_100 3
815#define MDFLD_DSIPLL_P1_MAX_100 9
816
817static const struct mdfld_limit_t mdfld_limits[] = {
818 { /* MDFLD_LIMT_DPLL_19 */
819 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
820 .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
821 .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
822 },
823 { /* MDFLD_LIMT_DPLL_25 */
824 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
825 .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
826 .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
827 },
828 { /* MDFLD_LIMT_DPLL_83 */
829 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
830 .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
831 .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
832 },
833 { /* MDFLD_LIMT_DPLL_100 */
834 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
835 .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
836 .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
837 },
838 { /* MDFLD_LIMT_DSIPLL_19 */
839 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
840 .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
841 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
842 },
843 { /* MDFLD_LIMT_DSIPLL_25 */
844 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
845 .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
846 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
847 },
848 { /* MDFLD_LIMT_DSIPLL_83 */
849 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
850 .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
851 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
852 },
853 { /* MDFLD_LIMT_DSIPLL_100 */
854 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
855 .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
856 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
857 },
858};
859
860#define MDFLD_M_MIN 21
861#define MDFLD_M_MAX 180
862static const u32 mdfld_m_converts[] = {
863/* M configuration table from 9-bit LFSR table */
864 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
865 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
866 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
867 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
868 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
869 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
870 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
871 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
872 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
873 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
874 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
875 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
876 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
877 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
878 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
879 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
880};
881
882static const struct mdfld_limit_t *mdfld_limit(struct drm_crtc *crtc)
883{
884 const struct mdfld_limit_t *limit = NULL;
885 struct drm_device *dev = crtc->dev;
886 struct drm_psb_private *dev_priv = dev->dev_private;
887
888 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
889 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
890 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
891 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
892 else if (ksel == KSEL_BYPASS_25)
893 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
894 else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
895 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
896 else if ((ksel == KSEL_BYPASS_83_100) &&
897 (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
898 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
899 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
900 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
901 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
902 else if (ksel == KSEL_BYPASS_25)
903 limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
904 else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
905 limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
906 else if ((ksel == KSEL_BYPASS_83_100) &&
907 (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
908 limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
909 } else {
910 limit = NULL;
911 dev_err(dev->dev, "mdfld_limit Wrong display type.\n");
912 }
913
914 return limit;
915}
916
917/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
918static void mdfld_clock(int refclk, struct mdfld_intel_clock_t *clock)
919{
920 clock->dot = (refclk * clock->m) / clock->p1;
921}
922
923/**
924 * Returns a set of divisors for the desired target clock with the given refclk,
925 * or FALSE. Divisor values are the actual divisors for
926 */
927static bool
928mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
929 struct mdfld_intel_clock_t *best_clock)
930{
931 struct mdfld_intel_clock_t clock;
932 const struct mdfld_limit_t *limit = mdfld_limit(crtc);
933 int err = target;
934
935 memset(best_clock, 0, sizeof(*best_clock));
936
937 for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
938 for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
939 clock.p1++) {
940 int this_err;
941
942 mdfld_clock(refclk, &clock);
943
944 this_err = abs(clock.dot - target);
945 if (this_err < err) {
946 *best_clock = clock;
947 err = this_err;
948 }
949 }
950 }
951 return err != target;
952}
953
954/**
955 * Return the pipe currently connected to the panel fitter,
956 * or -1 if the panel fitter is not present or not in use
957 */
958static int mdfld_panel_fitter_pipe(struct drm_device *dev)
959{
960 u32 pfit_control;
961
962 pfit_control = REG_READ(PFIT_CONTROL);
963
964 /* See if the panel fitter is in use */
965 if ((pfit_control & PFIT_ENABLE) == 0)
966 return -1;
967 return (pfit_control >> 29) & 3;
968}
969
970static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
971 struct drm_display_mode *mode,
972 struct drm_display_mode *adjusted_mode,
973 int x, int y,
974 struct drm_framebuffer *old_fb)
975{
976 struct drm_device *dev = crtc->dev;
977 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
978 struct drm_psb_private *dev_priv = dev->dev_private;
979 int pipe = psb_intel_crtc->pipe;
980 int fp_reg = MRST_FPA0;
981 int dpll_reg = MRST_DPLL_A;
982 int dspcntr_reg = DSPACNTR;
983 int pipeconf_reg = PIPEACONF;
984 int htot_reg = HTOTAL_A;
985 int hblank_reg = HBLANK_A;
986 int hsync_reg = HSYNC_A;
987 int vtot_reg = VTOTAL_A;
988 int vblank_reg = VBLANK_A;
989 int vsync_reg = VSYNC_A;
990 int dspsize_reg = DSPASIZE;
991 int dsppos_reg = DSPAPOS;
992 int pipesrc_reg = PIPEASRC;
993 u32 *pipeconf = &dev_priv->pipeconf;
994 u32 *dspcntr = &dev_priv->dspcntr;
995 int refclk = 0;
996 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0;
997 struct mdfld_intel_clock_t clock;
998 bool ok;
999 u32 dpll = 0, fp = 0;
1000 bool is_crt = false, is_lvds = false, is_tv = false;
1001 bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
1002 struct drm_mode_config *mode_config = &dev->mode_config;
1003 struct psb_intel_output *psb_intel_output = NULL;
1004 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
1005 struct drm_encoder *encoder;
1006 struct drm_connector *connector;
1007 int timeout = 0;
1008
1009 dev_dbg(dev->dev, "pipe = 0x%x \n", pipe);
1010
1011 switch (pipe) {
1012 case 0:
1013 break;
1014 case 1:
1015 fp_reg = FPB0;
1016 dpll_reg = DPLL_B;
1017 dspcntr_reg = DSPBCNTR;
1018 pipeconf_reg = PIPEBCONF;
1019 htot_reg = HTOTAL_B;
1020 hblank_reg = HBLANK_B;
1021 hsync_reg = HSYNC_B;
1022 vtot_reg = VTOTAL_B;
1023 vblank_reg = VBLANK_B;
1024 vsync_reg = VSYNC_B;
1025 dspsize_reg = DSPBSIZE;
1026 dsppos_reg = DSPBPOS;
1027 pipesrc_reg = PIPEBSRC;
1028 pipeconf = &dev_priv->pipeconf1;
1029 dspcntr = &dev_priv->dspcntr1;
1030 fp_reg = MDFLD_DPLL_DIV0;
1031 dpll_reg = MDFLD_DPLL_B;
1032 break;
1033 case 2:
1034 dpll_reg = MRST_DPLL_A;
1035 dspcntr_reg = DSPCCNTR;
1036 pipeconf_reg = PIPECCONF;
1037 htot_reg = HTOTAL_C;
1038 hblank_reg = HBLANK_C;
1039 hsync_reg = HSYNC_C;
1040 vtot_reg = VTOTAL_C;
1041 vblank_reg = VBLANK_C;
1042 vsync_reg = VSYNC_C;
1043 dspsize_reg = DSPCSIZE;
1044 dsppos_reg = DSPCPOS;
1045 pipesrc_reg = PIPECSRC;
1046 pipeconf = &dev_priv->pipeconf2;
1047 dspcntr = &dev_priv->dspcntr2;
1048 break;
1049 default:
1050 DRM_ERROR("Illegal Pipe Number. \n");
1051 return 0;
1052 }
1053
1054 dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
1055 adjusted_mode->hdisplay);
1056 dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
1057 adjusted_mode->vdisplay);
1058 dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
1059 adjusted_mode->hsync_start);
1060 dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
1061 adjusted_mode->hsync_end);
1062 dev_dbg(dev->dev, "adjusted_htotal = %d\n",
1063 adjusted_mode->htotal);
1064 dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
1065 adjusted_mode->vsync_start);
1066 dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
1067 adjusted_mode->vsync_end);
1068 dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
1069 adjusted_mode->vtotal);
1070 dev_dbg(dev->dev, "adjusted_clock = %d\n",
1071 adjusted_mode->clock);
1072 dev_dbg(dev->dev, "hdisplay = %d\n",
1073 mode->hdisplay);
1074 dev_dbg(dev->dev, "vdisplay = %d\n",
1075 mode->vdisplay);
1076
1077 if (!gma_power_begin(dev, true))
1078 return 0;
1079
1080 memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
1081 memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
1082
1083 list_for_each_entry(connector, &mode_config->connector_list, head) {
1084
1085 encoder = connector->encoder;
1086
1087 if(!encoder)
1088 continue;
1089
1090 if (encoder->crtc != crtc)
1091 continue;
1092
1093 psb_intel_output = to_psb_intel_output(connector);
1094
1095 dev_dbg(dev->dev, "output->type = 0x%x \n", psb_intel_output->type);
1096
1097 switch (psb_intel_output->type) {
1098 case INTEL_OUTPUT_LVDS:
1099 is_lvds = true;
1100 break;
1101 case INTEL_OUTPUT_TVOUT:
1102 is_tv = true;
1103 break;
1104 case INTEL_OUTPUT_ANALOG:
1105 is_crt = true;
1106 break;
1107 case INTEL_OUTPUT_MIPI:
1108 is_mipi = true;
1109 break;
1110 case INTEL_OUTPUT_MIPI2:
1111 is_mipi2 = true;
1112 break;
1113 case INTEL_OUTPUT_HDMI:
1114 is_hdmi = true;
1115 break;
1116 }
1117 }
1118
1119 /* Disable the VGA plane that we never use */
1120 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
1121
1122 /* Disable the panel fitter if it was on our pipe */
1123 if (mdfld_panel_fitter_pipe(dev) == pipe)
1124 REG_WRITE(PFIT_CONTROL, 0);
1125
1126 /* pipesrc and dspsize control the size that is scaled from,
1127 * which should always be the user's requested size.
1128 */
1129 if (pipe == 1) {
1130 /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 (PYR) or 480x854 (TMD), set the sprite
1131 * width/height and souce image size registers with the adjusted mode for pipe B. */
1132
1133 /* The defined sprite rectangle must always be completely contained within the displayable
1134 * area of the screen image (frame buffer). */
1135 REG_WRITE(dspsize_reg, ((MIN(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
1136 | (MIN(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
1137 /* Set the CRTC with encoder mode. */
1138 REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
1139 | (mode->crtc_vdisplay - 1));
1140 } else {
1141 REG_WRITE(dspsize_reg, ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1));
1142 REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
1143 }
1144
1145 REG_WRITE(dsppos_reg, 0);
1146
1147 if (psb_intel_output)
1148 drm_connector_property_get_value(&psb_intel_output->base,
1149 dev->mode_config.scaling_mode_property, &scalingType);
1150
1151 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
1152 /*
1153 * Medfield doesn't have register support for centering so
1154 * we need to mess with the h/vblank and h/vsync start and
1155 * ends to get central
1156 */
1157 int offsetX = 0, offsetY = 0;
1158
1159 offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1160 offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1161
1162 REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
1163 ((adjusted_mode->crtc_htotal - 1) << 16));
1164 REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
1165 ((adjusted_mode->crtc_vtotal - 1) << 16));
1166 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
1167 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
1168 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
1169 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
1170 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
1171 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
1172 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
1173 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
1174 } else {
1175 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
1176 ((adjusted_mode->crtc_htotal - 1) << 16));
1177 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
1178 ((adjusted_mode->crtc_vtotal - 1) << 16));
1179 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
1180 ((adjusted_mode->crtc_hblank_end - 1) << 16));
1181 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
1182 ((adjusted_mode->crtc_hsync_end - 1) << 16));
1183 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
1184 ((adjusted_mode->crtc_vblank_end - 1) << 16));
1185 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
1186 ((adjusted_mode->crtc_vsync_end - 1) << 16));
1187 }
1188
1189 /* Flush the plane changes */
1190 {
1191 struct drm_crtc_helper_funcs *crtc_funcs =
1192 crtc->helper_private;
1193 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
1194 }
1195
1196 /* setup pipeconf */
1197 *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
1198
1199 /* Set up the display plane register */
1200 *dspcntr = REG_READ(dspcntr_reg);
1201 *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
1202 *dspcntr |= DISPLAY_PLANE_ENABLE;
1203/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_BOTTOM; */
1204/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_GAMMA_ENABLE; */
1205
1206 if (is_mipi2)
1207 {
1208 goto mrst_crtc_mode_set_exit;
1209 }
1210/* FIXME JLIU7 Add MDFLD HDMI supports */
1211/* FIXME_MDFLD JLIU7 DSIPLL clock *= 8? */
1212/* FIXME_MDFLD JLIU7 need to revist for dual MIPI supports */
1213 clk = adjusted_mode->clock;
1214
1215 if (is_hdmi) {
1216 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
1217 {
1218 refclk = 19200;
1219
1220 if (is_mipi || is_mipi2)
1221 {
1222 clk_n = 1, clk_p2 = 8;
1223 } else if (is_hdmi) {
1224 clk_n = 1, clk_p2 = 10;
1225 }
1226 } else if (ksel == KSEL_BYPASS_25) {
1227 refclk = 25000;
1228
1229 if (is_mipi || is_mipi2)
1230 {
1231 clk_n = 1, clk_p2 = 8;
1232 } else if (is_hdmi) {
1233 clk_n = 1, clk_p2 = 10;
1234 }
1235 } else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) {
1236 refclk = 83000;
1237
1238 if (is_mipi || is_mipi2)
1239 {
1240 clk_n = 4, clk_p2 = 8;
1241 } else if (is_hdmi) {
1242 clk_n = 4, clk_p2 = 10;
1243 }
1244 } else if ((ksel == KSEL_BYPASS_83_100) &&
1245 (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
1246 refclk = 100000;
1247 if (is_mipi || is_mipi2)
1248 {
1249 clk_n = 4, clk_p2 = 8;
1250 } else if (is_hdmi) {
1251 clk_n = 4, clk_p2 = 10;
1252 }
1253 }
1254
1255 if (is_mipi)
1256 clk_byte = dev_priv->bpp / 8;
1257 else if (is_mipi2)
1258 clk_byte = dev_priv->bpp2 / 8;
1259
1260 clk_tmp = clk * clk_n * clk_p2 * clk_byte;
1261
1262 dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n, clk_p2);
1263 dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d. \n", adjusted_mode->clock, clk_tmp);
1264
1265 ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
1266
1267 if (!ok) {
1268 dev_err(dev->dev,
1269 "mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
1270 } else {
1271 m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
1272
1273 dev_dbg(dev->dev, "dot clock = %d,"
1274 "m = %d, p1 = %d, m_conv = %d. \n", clock.dot, clock.m,
1275 clock.p1, m_conv);
1276 }
1277
1278 dpll = REG_READ(dpll_reg);
1279
1280 if (dpll & DPLL_VCO_ENABLE) {
1281 dpll &= ~DPLL_VCO_ENABLE;
1282 REG_WRITE(dpll_reg, dpll);
1283 REG_READ(dpll_reg);
1284
1285 /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
1286 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1287 udelay(500);
1288
1289 /* reset M1, N1 & P1 */
1290 REG_WRITE(fp_reg, 0);
1291 dpll &= ~MDFLD_P1_MASK;
1292 REG_WRITE(dpll_reg, dpll);
1293 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1294 udelay(500);
1295 }
1296
1297 /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
1298 if (dpll & MDFLD_PWR_GATE_EN) {
1299 dpll &= ~MDFLD_PWR_GATE_EN;
1300 REG_WRITE(dpll_reg, dpll);
1301 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1302 udelay(500);
1303 }
1304
1305 dpll = 0;
1306
1307#if 0 /* FIXME revisit later */
1308 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19) || (ksel == KSEL_BYPASS_25)) {
1309 dpll &= ~MDFLD_INPUT_REF_SEL;
1310 } else if (ksel == KSEL_BYPASS_83_100) {
1311 dpll |= MDFLD_INPUT_REF_SEL;
1312 }
1313#endif /* FIXME revisit later */
1314
1315 if (is_hdmi)
1316 dpll |= MDFLD_VCO_SEL;
1317
1318 fp = (clk_n / 2) << 16;
1319 fp |= m_conv;
1320
1321 /* compute bitmask from p1 value */
1322 dpll |= (1 << (clock.p1 - 2)) << 17;
1323
1324#if 0 /* 1080p30 & 720p */
1325 dpll = 0x00050000;
1326 fp = 0x000001be;
1327#endif
1328#if 0 /* 480p */
1329 dpll = 0x02010000;
1330 fp = 0x000000d2;
1331#endif
1332 } else {
1333#if 0 /*DBI_TPO_480x864*/
1334 dpll = 0x00020000;
1335 fp = 0x00000156;
1336#endif /* DBI_TPO_480x864 */ /* get from spec. */
1337
1338 dpll = 0x00800000;
1339 fp = 0x000000c1;
1340}
1341
1342 REG_WRITE(fp_reg, fp);
1343 REG_WRITE(dpll_reg, dpll);
1344 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1345 udelay(500);
1346
1347 dpll |= DPLL_VCO_ENABLE;
1348 REG_WRITE(dpll_reg, dpll);
1349 REG_READ(dpll_reg);
1350
1351 /* wait for DSI PLL to lock */
1352 while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
1353 udelay(150);
1354 timeout ++;
1355 }
1356
1357 if (is_mipi)
1358 goto mrst_crtc_mode_set_exit;
1359
1360 dev_dbg(dev->dev, "is_mipi = 0x%x \n", is_mipi);
1361
1362 REG_WRITE(pipeconf_reg, *pipeconf);
1363 REG_READ(pipeconf_reg);
1364
1365 /* Wait for for the pipe enable to take effect. */
1366//FIXME_JLIU7 HDMI mrstWaitForPipeEnable(dev);
1367
1368 REG_WRITE(dspcntr_reg, *dspcntr);
1369 psb_intel_wait_for_vblank(dev);
1370
1371mrst_crtc_mode_set_exit:
1372
1373 gma_power_end(dev);
1374
1375 return 0;
1376}
1377
1378static void mdfld_crtc_prepare(struct drm_crtc *crtc)
1379{
1380 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1381 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1382}
1383
1384static void mdfld_crtc_commit(struct drm_crtc *crtc)
1385{
1386 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1387 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1388}
1389
1390static bool mdfld_crtc_mode_fixup(struct drm_crtc *crtc,
1391 struct drm_display_mode *mode,
1392 struct drm_display_mode *adjusted_mode)
1393{
1394 return true;
1395}
1396
1397const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
1398 .dpms = mdfld_crtc_dpms,
1399 .mode_fixup = mdfld_crtc_mode_fixup,
1400 .mode_set = mdfld_crtc_mode_set,
1401 .mode_set_base = mdfld__intel_pipe_set_base,
1402 .prepare = mdfld_crtc_prepare,
1403 .commit = mdfld_crtc_commit,
1404};
diff --git a/drivers/staging/gma500/mdfld_msic.h b/drivers/staging/gma500/mdfld_msic.h
deleted file mode 100644
index a7ad65472491..000000000000
--- a/drivers/staging/gma500/mdfld_msic.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jim Liu <jim.liu@intel.com>
25 */
26
27#define MSIC_PCI_DEVICE_ID 0x831
28
29int msic_regsiter_driver(void);
30int msic_unregister_driver(void);
31extern void hpd_notify_um(void);
diff --git a/drivers/staging/gma500/mdfld_output.c b/drivers/staging/gma500/mdfld_output.c
deleted file mode 100644
index eabf53d58f92..000000000000
--- a/drivers/staging/gma500/mdfld_output.c
+++ /dev/null
@@ -1,171 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#include <linux/init.h>
29#include <linux/moduleparam.h>
30#include "mdfld_dsi_dbi.h"
31#include "mdfld_dsi_dpi.h"
32#include "mdfld_dsi_output.h"
33#include "mdfld_output.h"
34#include "mdfld_dsi_dbi_dpu.h"
35
36#include "displays/tpo_cmd.h"
37#include "displays/tpo_vid.h"
38#include "displays/tmd_cmd.h"
39#include "displays/tmd_vid.h"
40#include "displays/pyr_cmd.h"
41#include "displays/pyr_vid.h"
42/* #include "displays/hdmi.h" */
43
44static int mdfld_dual_mipi;
45static int mdfld_hdmi;
46static int mdfld_dpu;
47
48module_param(mdfld_dual_mipi, int, 0600);
49MODULE_PARM_DESC(mdfld_dual_mipi, "Enable dual MIPI configuration");
50module_param(mdfld_hdmi, int, 0600);
51MODULE_PARM_DESC(mdfld_hdmi, "Enable Medfield HDMI");
52module_param(mdfld_dpu, int, 0600);
53MODULE_PARM_DESC(mdfld_dpu, "Enable Medfield DPU");
54
55/* For now a single type per device is all we cope with */
56int mdfld_get_panel_type(struct drm_device *dev, int pipe)
57{
58 struct drm_psb_private *dev_priv = dev->dev_private;
59 return dev_priv->panel_id;
60}
61
62int mdfld_panel_dpi(struct drm_device *dev)
63{
64 struct drm_psb_private *dev_priv = dev->dev_private;
65
66 switch (dev_priv->panel_id) {
67 case TMD_VID:
68 case TPO_VID:
69 case PYR_VID:
70 return true;
71 case TMD_CMD:
72 case TPO_CMD:
73 case PYR_CMD:
74 default:
75 return false;
76 }
77}
78
79static int init_panel(struct drm_device *dev, int mipi_pipe, int p_type)
80{
81 struct panel_funcs *p_cmd_funcs;
82 struct panel_funcs *p_vid_funcs;
83
84 /* Oh boy ... FIXME */
85 p_cmd_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
86 if (p_cmd_funcs == NULL)
87 return -ENODEV;
88 p_vid_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
89 if (p_vid_funcs == NULL) {
90 kfree(p_cmd_funcs);
91 return -ENODEV;
92 }
93
94 switch (p_type) {
95 case TPO_CMD:
96 tpo_cmd_init(dev, p_cmd_funcs);
97 mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
98 break;
99 case TPO_VID:
100 tpo_vid_init(dev, p_vid_funcs);
101 mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
102 break;
103 case TMD_CMD:
104 /*tmd_cmd_init(dev, p_cmd_funcs); */
105 mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
106 break;
107 case TMD_VID:
108 tmd_vid_init(dev, p_vid_funcs);
109 mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
110 break;
111 case PYR_CMD:
112 pyr_cmd_init(dev, p_cmd_funcs);
113 mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
114 break;
115 case PYR_VID:
116 mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
117 break;
118 case TPO: /* TPO panel supports both cmd & vid interfaces */
119 tpo_cmd_init(dev, p_cmd_funcs);
120 tpo_vid_init(dev, p_vid_funcs);
121 mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs,
122 p_vid_funcs);
123 break;
124 case TMD:
125 break;
126 case PYR:
127 break;
128#if 0
129 case HDMI:
130 dev_dbg(dev->dev, "Initializing HDMI");
131 mdfld_hdmi_init(dev, &dev_priv->mode_dev);
132 break;
133#endif
134 default:
135 dev_err(dev->dev, "Unsupported interface %d", p_type);
136 return -ENODEV;
137 }
138 return 0;
139}
140
141int mdfld_output_init(struct drm_device *dev)
142{
143 int type;
144
145 /* MIPI panel 1 */
146 type = mdfld_get_panel_type(dev, 0);
147 dev_info(dev->dev, "panel 1: type is %d\n", type);
148 init_panel(dev, 0, type);
149
150 if (mdfld_dual_mipi) {
151 /* MIPI panel 2 */
152 type = mdfld_get_panel_type(dev, 2);
153 dev_info(dev->dev, "panel 2: type is %d\n", type);
154 init_panel(dev, 2, type);
155 }
156 if (mdfld_hdmi)
157 /* HDMI panel */
158 init_panel(dev, 0, HDMI);
159 return 0;
160}
161
162void mdfld_output_setup(struct drm_device *dev)
163{
164 /* FIXME: this is not the right place for this stuff ! */
165 if (IS_MFLD(dev)) {
166 if (mdfld_dpu)
167 mdfld_dbi_dpu_init(dev);
168 else
169 mdfld_dbi_dsr_init(dev);
170 }
171}
diff --git a/drivers/staging/gma500/mdfld_output.h b/drivers/staging/gma500/mdfld_output.h
deleted file mode 100644
index daf33e7df9d5..000000000000
--- a/drivers/staging/gma500/mdfld_output.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#ifndef MDFLD_OUTPUT_H
29#define MDFLD_OUTPUT_H
30
31int mdfld_output_init(struct drm_device *dev);
32int mdfld_panel_dpi(struct drm_device *dev);
33int mdfld_get_panel_type(struct drm_device *dev, int pipe);
34void mdfld_disable_crtc (struct drm_device *dev, int pipe);
35
36extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
37extern const struct drm_crtc_funcs mdfld_intel_crtc_funcs;
38
39extern void mdfld_output_setup(struct drm_device *dev);
40
41#endif
diff --git a/drivers/staging/gma500/mdfld_pyr_cmd.c b/drivers/staging/gma500/mdfld_pyr_cmd.c
deleted file mode 100644
index 523f2d8fe4f1..000000000000
--- a/drivers/staging/gma500/mdfld_pyr_cmd.c
+++ /dev/null
@@ -1,558 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26*/
27
28#include "mdfld_dsi_dbi.h"
29#include "mdfld_dsi_dpi.h"
30#include "mdfld_dsi_output.h"
31#include "mdfld_output.h"
32#include "mdfld_dsi_dbi_dpu.h"
33#include "mdfld_dsi_pkg_sender.h"
34
35#include "displays/pyr_cmd.h"
36
37static struct drm_display_mode *pyr_cmd_get_config_mode(struct drm_device *dev)
38{
39 struct drm_display_mode *mode;
40
41 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
42 if (!mode) {
43 dev_err(dev->dev, "Out of memory\n");
44 return NULL;
45 }
46
47 dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
48 dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
49 dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
50 dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
51 dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
52 dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
53 dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
54 dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
55 dev_dbg(dev->dev, "clock is %d\n", mode->clock);
56
57 mode->hdisplay = 480;
58 mode->vdisplay = 864;
59 mode->hsync_start = 487;
60 mode->hsync_end = 490;
61 mode->htotal = 499;
62 mode->vsync_start = 874;
63 mode->vsync_end = 878;
64 mode->vtotal = 886;
65 mode->clock = 25777;
66
67 drm_mode_set_name(mode);
68 drm_mode_set_crtcinfo(mode, 0);
69
70 mode->type |= DRM_MODE_TYPE_PREFERRED;
71
72 return mode;
73}
74
75static bool pyr_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
76 struct drm_display_mode *mode,
77 struct drm_display_mode *adjusted_mode)
78{
79 struct drm_device *dev = encoder->dev;
80 struct drm_display_mode *fixed_mode = pyr_cmd_get_config_mode(dev);
81
82 if (fixed_mode) {
83 adjusted_mode->hdisplay = fixed_mode->hdisplay;
84 adjusted_mode->hsync_start = fixed_mode->hsync_start;
85 adjusted_mode->hsync_end = fixed_mode->hsync_end;
86 adjusted_mode->htotal = fixed_mode->htotal;
87 adjusted_mode->vdisplay = fixed_mode->vdisplay;
88 adjusted_mode->vsync_start = fixed_mode->vsync_start;
89 adjusted_mode->vsync_end = fixed_mode->vsync_end;
90 adjusted_mode->vtotal = fixed_mode->vtotal;
91 adjusted_mode->clock = fixed_mode->clock;
92 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
93 kfree(fixed_mode);
94 }
95 return true;
96}
97
98static void pyr_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
99{
100 int ret = 0;
101 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
102 struct mdfld_dsi_dbi_output *dbi_output =
103 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
104 struct drm_device *dev = encoder->dev;
105 struct drm_psb_private *dev_priv = dev->dev_private;
106 u32 reg_offset = 0;
107 int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
108
109 dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n", pipe,
110 on ? "On" : "Off",
111 dbi_output->dbi_panel_on ? "True" : "False");
112
113 if (pipe == 2) {
114 if (on)
115 dev_priv->dual_mipi = true;
116 else
117 dev_priv->dual_mipi = false;
118
119 reg_offset = MIPIC_REG_OFFSET;
120 } else {
121 if (!on)
122 dev_priv->dual_mipi = false;
123 }
124
125 if (!gma_power_begin(dev, true)) {
126 dev_err(dev->dev, "hw begin failed\n");
127 return;
128 }
129
130
131 if (on) {
132 if (dbi_output->dbi_panel_on)
133 goto out_err;
134
135 ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
136 if (ret) {
137 dev_err(dev->dev, "power on error\n");
138 goto out_err;
139 }
140
141 dbi_output->dbi_panel_on = true;
142
143 if (pipe == 2) {
144 dev_priv->dbi_panel_on2 = true;
145 } else {
146 dev_priv->dbi_panel_on = true;
147 mdfld_enable_te(dev, 0);
148 }
149 } else {
150 if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
151 goto out_err;
152
153 dbi_output->dbi_panel_on = false;
154 dbi_output->first_boot = false;
155
156 if (pipe == 2) {
157 dev_priv->dbi_panel_on2 = false;
158 mdfld_disable_te(dev, 2);
159 } else {
160 dev_priv->dbi_panel_on = false;
161 mdfld_disable_te(dev, 0);
162
163 if (dev_priv->dbi_panel_on2)
164 mdfld_enable_te(dev, 2);
165 }
166
167 ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
168 if (ret) {
169 dev_err(dev->dev, "power on error\n");
170 goto out_err;
171 }
172 }
173
174out_err:
175 gma_power_end(dev);
176
177 if (ret)
178 dev_err(dev->dev, "failed\n");
179}
180
181static void pyr_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
182 int pipe)
183{
184 struct drm_device *dev = dsi_config->dev;
185 u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
186 int lane_count = dsi_config->lane_count;
187 u32 val = 0;
188
189 dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
190
191 /* Un-ready device */
192 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
193
194 /* Init dsi adapter before kicking off */
195 REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
196
197 /* TODO: figure out how to setup these registers */
198 REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c600F);
199 REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
200 0x000a0014);
201 REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
202 REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
203
204 /* Enable all interrupts */
205 REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
206 /* Max value: 20 clock cycles of txclkesc */
207 REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
208 /* Min 21 txclkesc, max: ffffh */
209 REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
210 /* Min: 7d0 max: 4e20 */
211 REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
212
213 /* Set up func_prg */
214 val |= lane_count;
215 val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
216 val |= DSI_DBI_COLOR_FORMAT_OPTION2;
217 REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
218
219 REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
220 REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
221
222 /* De-assert dbi_stall when half of DBI FIFO is empty */
223 /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
224
225 REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
226 REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000002);
227 REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
228 REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
229}
230
231static void pyr_dsi_dbi_mode_set(struct drm_encoder *encoder,
232 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode)
234{
235 int ret = 0;
236 struct drm_device *dev = encoder->dev;
237 struct drm_psb_private *dev_priv = dev->dev_private;
238 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
239 struct mdfld_dsi_dbi_output *dsi_output =
240 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
241 struct mdfld_dsi_config *dsi_config =
242 mdfld_dsi_encoder_get_config(dsi_encoder);
243 struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
244 int pipe = dsi_connector->pipe;
245 u8 param = 0;
246
247 /* Regs */
248 u32 mipi_reg = MIPI;
249 u32 dspcntr_reg = DSPACNTR;
250 u32 pipeconf_reg = PIPEACONF;
251 u32 reg_offset = 0;
252
253 /* Values */
254 u32 dspcntr_val = dev_priv->dspcntr;
255 u32 pipeconf_val = dev_priv->pipeconf;
256 u32 h_active_area = mode->hdisplay;
257 u32 v_active_area = mode->vdisplay;
258 u32 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
259 TE_TRIGGER_GPIO_PIN);
260
261 dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
262
263 dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
264 dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
265
266 if (pipe == 2) {
267 mipi_reg = MIPI_C;
268 dspcntr_reg = DSPCCNTR;
269 pipeconf_reg = PIPECCONF;
270
271 reg_offset = MIPIC_REG_OFFSET;
272
273 dspcntr_val = dev_priv->dspcntr2;
274 pipeconf_val = dev_priv->pipeconf2;
275 } else {
276 mipi_val |= 0x2; /* Two lanes for port A and C respectively */
277 }
278
279 if (!gma_power_begin(dev, true)) {
280 dev_err(dev->dev, "hw begin failed\n");
281 return;
282 }
283
284 /* Set up pipe related registers */
285 REG_WRITE(mipi_reg, mipi_val);
286 REG_READ(mipi_reg);
287
288 pyr_dsi_controller_dbi_init(dsi_config, pipe);
289
290 msleep(20);
291
292 REG_WRITE(dspcntr_reg, dspcntr_val);
293 REG_READ(dspcntr_reg);
294
295 /* 20ms delay before sending exit_sleep_mode */
296 msleep(20);
297
298 /* Send exit_sleep_mode DCS */
299 ret = mdfld_dsi_dbi_send_dcs(dsi_output, exit_sleep_mode, NULL,
300 0, CMD_DATA_SRC_SYSTEM_MEM);
301 if (ret) {
302 dev_err(dev->dev, "sent exit_sleep_mode faild\n");
303 goto out_err;
304 }
305
306 /*send set_tear_on DCS*/
307 ret = mdfld_dsi_dbi_send_dcs(dsi_output, set_tear_on,
308 &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
309 if (ret) {
310 dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
311 goto out_err;
312 }
313
314 /* Do some init stuff */
315 mdfld_dsi_brightness_init(dsi_config, pipe);
316 mdfld_dsi_gen_fifo_ready(dev, (MIPIA_GEN_FIFO_STAT_REG + reg_offset),
317 HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
318
319 REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
320 REG_READ(pipeconf_reg);
321
322 /* TODO: this looks ugly, try to move it to CRTC mode setting */
323 if (pipe == 2)
324 dev_priv->pipeconf2 |= PIPEACONF_DSR;
325 else
326 dev_priv->pipeconf |= PIPEACONF_DSR;
327
328 dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
329
330 ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
331 h_active_area - 1, v_active_area - 1);
332 if (ret) {
333 dev_err(dev->dev, "update area failed\n");
334 goto out_err;
335 }
336
337out_err:
338 gma_power_end(dev);
339
340 if (ret)
341 dev_err(dev->dev, "mode set failed\n");
342 else
343 dev_dbg(dev->dev, "mode set done successfully\n");
344}
345
346static void pyr_dsi_dbi_prepare(struct drm_encoder *encoder)
347{
348 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
349 struct mdfld_dsi_dbi_output *dbi_output =
350 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
351
352 dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
353 dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
354
355 pyr_dsi_dbi_set_power(encoder, false);
356}
357
358static void pyr_dsi_dbi_commit(struct drm_encoder *encoder)
359{
360 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
361 struct mdfld_dsi_dbi_output *dbi_output =
362 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
363 struct drm_device *dev = dbi_output->dev;
364 struct drm_psb_private *dev_priv = dev->dev_private;
365 struct psb_drm_dpu_rect rect;
366
367 pyr_dsi_dbi_set_power(encoder, true);
368
369 dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
370
371 rect.x = rect.y = 0;
372 rect.width = 864;
373 rect.height = 480;
374
375 if (dbi_output->channel_num == 1) {
376 dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
377 /* If DPU enabled report a fullscreen damage */
378 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
379 } else {
380 dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
381 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
382 }
383 dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
384}
385
386static void pyr_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
387{
388 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
389 struct mdfld_dsi_dbi_output *dbi_output =
390 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
391 struct drm_device *dev = dbi_output->dev;
392
393 dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
394
395 if (mode == DRM_MODE_DPMS_ON)
396 pyr_dsi_dbi_set_power(encoder, true);
397 else
398 pyr_dsi_dbi_set_power(encoder, false);
399}
400
401/*
402 * Update the DBI MIPI Panel Frame Buffer.
403 */
404static void pyr_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
405 int pipe)
406{
407 struct mdfld_dsi_pkg_sender *sender =
408 mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
409 struct drm_device *dev = dbi_output->dev;
410 struct drm_crtc *crtc = dbi_output->base.base.crtc;
411 struct psb_intel_crtc *psb_crtc = (crtc) ?
412 to_psb_intel_crtc(crtc) : NULL;
413
414 u32 dpll_reg = MRST_DPLL_A;
415 u32 dspcntr_reg = DSPACNTR;
416 u32 pipeconf_reg = PIPEACONF;
417 u32 dsplinoff_reg = DSPALINOFF;
418 u32 dspsurf_reg = DSPASURF;
419 u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
420 u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
421 u32 reg_offset = 0;
422
423 u32 intr_status;
424 u32 fifo_stat_reg_val;
425 u32 dpll_reg_val;
426 u32 dspcntr_reg_val;
427 u32 pipeconf_reg_val;
428
429 /* If mode setting on-going, back off */
430 if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
431 (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
432 !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
433 return;
434
435 /*
436 * Look for errors here. In particular we're checking for whatever
437 * error status might have appeared during the last frame transmit
438 * (memory write).
439 *
440 * Normally, the bits we're testing here would be set infrequently,
441 * if at all. However, one panel (at least) returns at least one
442 * error bit on most frames. So we've disabled the kernel message
443 * for now.
444 *
445 * Still clear whatever error bits are set, except don't clear the
446 * ones that would make the Penwell DSI controller reset if we
447 * cleared them.
448 */
449 intr_status = REG_READ(INTR_STAT_REG);
450 if ((intr_status & 0x26FFFFFF) != 0) {
451 /* dev_err(dev->dev, "DSI status: 0x%08X\n", intr_status); */
452 intr_status &= 0x26F3FFFF;
453 REG_WRITE(INTR_STAT_REG, intr_status);
454 }
455
456 if (pipe == 2) {
457 dspcntr_reg = DSPCCNTR;
458 pipeconf_reg = PIPECCONF;
459 dsplinoff_reg = DSPCLINOFF;
460 dspsurf_reg = DSPCSURF;
461
462 hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
463 gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET,
464
465 reg_offset = MIPIC_REG_OFFSET;
466 }
467
468 if (!gma_power_begin(dev, true)) {
469 dev_err(dev->dev, "hw begin failed\n");
470 return;
471 }
472
473 fifo_stat_reg_val = REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset);
474 dpll_reg_val = REG_READ(dpll_reg);
475 dspcntr_reg_val = REG_READ(dspcntr_reg);
476 pipeconf_reg_val = REG_READ(pipeconf_reg);
477
478 if (!(fifo_stat_reg_val & (1 << 27)) ||
479 (dpll_reg_val & DPLL_VCO_ENABLE) ||
480 !(dspcntr_reg_val & DISPLAY_PLANE_ENABLE) ||
481 !(pipeconf_reg_val & DISPLAY_PLANE_ENABLE)) {
482 goto update_fb_out0;
483 }
484
485 /* Refresh plane changes */
486 REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
487 REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
488 REG_READ(dspsurf_reg);
489
490 mdfld_dsi_send_dcs(sender,
491 write_mem_start,
492 NULL,
493 0,
494 CMD_DATA_SRC_PIPE,
495 MDFLD_DSI_SEND_PACKAGE);
496
497 /*
498 * The idea here is to transmit a Generic Read command after the
499 * Write Memory Start/Continue commands finish. This asks for
500 * the panel to return an "ACK No Errors," or (if it has errors
501 * to report) an Error Report. This allows us to monitor the
502 * panel's perception of the health of the DSI.
503 */
504 mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
505 HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
506 REG_WRITE(hs_gen_ctrl_reg, (1 << WORD_COUNTS_POS) | GEN_READ_0);
507
508 dbi_output->dsr_fb_update_done = true;
509update_fb_out0:
510 gma_power_end(dev);
511}
512
513/*
514 * TODO: will be removed later, should work out display interfaces for power
515 */
516void pyr_dsi_adapter_init(struct mdfld_dsi_config *dsi_config, int pipe)
517{
518 if (!dsi_config || (pipe != 0 && pipe != 2)) {
519 WARN_ON(1);
520 return;
521 }
522 pyr_dsi_controller_dbi_init(dsi_config, pipe);
523}
524
525static int pyr_cmd_get_panel_info(struct drm_device *dev, int pipe,
526 struct panel_info *pi)
527{
528 if (!dev || !pi)
529 return -EINVAL;
530
531 pi->width_mm = PYR_PANEL_WIDTH;
532 pi->height_mm = PYR_PANEL_HEIGHT;
533
534 return 0;
535}
536
537/* PYR DBI encoder helper funcs */
538static const struct drm_encoder_helper_funcs pyr_dsi_dbi_helper_funcs = {
539 .dpms = pyr_dsi_dbi_dpms,
540 .mode_fixup = pyr_dsi_dbi_mode_fixup,
541 .prepare = pyr_dsi_dbi_prepare,
542 .mode_set = pyr_dsi_dbi_mode_set,
543 .commit = pyr_dsi_dbi_commit,
544};
545
546/* PYR DBI encoder funcs */
547static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
548 .destroy = drm_encoder_cleanup,
549};
550
551void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
552{
553 p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
554 p_funcs->encoder_helper_funcs = &pyr_dsi_dbi_helper_funcs;
555 p_funcs->get_config_mode = &pyr_cmd_get_config_mode;
556 p_funcs->update_fb = pyr_dsi_dbi_update_fb;
557 p_funcs->get_panel_info = pyr_cmd_get_panel_info;
558}
diff --git a/drivers/staging/gma500/mdfld_tmd_vid.c b/drivers/staging/gma500/mdfld_tmd_vid.c
deleted file mode 100644
index affdc09c6769..000000000000
--- a/drivers/staging/gma500/mdfld_tmd_vid.c
+++ /dev/null
@@ -1,206 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jim Liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 * Gideon Eaton <eaton.
27 * Scott Rowe <scott.m.rowe@intel.com>
28 */
29
30#include "mdfld_dsi_dbi.h"
31#include "mdfld_dsi_dpi.h"
32#include "mdfld_dsi_output.h"
33#include "mdfld_output.h"
34
35#include "mdfld_dsi_pkg_sender.h"
36
37#include "displays/tmd_vid.h"
38
39/* FIXME: static ? */
40struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
41{
42 struct drm_display_mode *mode;
43 struct drm_psb_private *dev_priv = dev->dev_private;
44 struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
45 bool use_gct = false; /*Disable GCT for now*/
46
47 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
48 if (!mode) {
49 dev_err(dev->dev, "Out of memory\n");
50 return NULL;
51 }
52
53 if (use_gct) {
54 dev_dbg(dev->dev, "gct find MIPI panel.\n");
55
56 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
57 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
58 mode->hsync_start = mode->hdisplay +
59 ((ti->hsync_offset_hi << 8) |
60 ti->hsync_offset_lo);
61 mode->hsync_end = mode->hsync_start +
62 ((ti->hsync_pulse_width_hi << 8) |
63 ti->hsync_pulse_width_lo);
64 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
65 ti->hblank_lo);
66 mode->vsync_start = \
67 mode->vdisplay + ((ti->vsync_offset_hi << 8) |
68 ti->vsync_offset_lo);
69 mode->vsync_end = \
70 mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
71 ti->vsync_pulse_width_lo);
72 mode->vtotal = mode->vdisplay +
73 ((ti->vblank_hi << 8) | ti->vblank_lo);
74 mode->clock = ti->pixel_clock * 10;
75
76 dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
77 dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
78 dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
79 dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
80 dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
81 dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
82 dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
83 dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
84 dev_dbg(dev->dev, "clock is %d\n", mode->clock);
85 } else {
86 mode->hdisplay = 480;
87 mode->vdisplay = 854;
88 mode->hsync_start = 487;
89 mode->hsync_end = 490;
90 mode->htotal = 499;
91 mode->vsync_start = 861;
92 mode->vsync_end = 865;
93 mode->vtotal = 873;
94 mode->clock = 33264;
95 }
96 drm_mode_set_name(mode);
97 drm_mode_set_crtcinfo(mode, 0);
98
99 mode->type |= DRM_MODE_TYPE_PREFERRED;
100
101 return mode;
102}
103
104static int tmd_vid_get_panel_info(struct drm_device *dev,
105 int pipe,
106 struct panel_info *pi)
107{
108 if (!dev || !pi)
109 return -EINVAL;
110
111 pi->width_mm = TMD_PANEL_WIDTH;
112 pi->height_mm = TMD_PANEL_HEIGHT;
113
114 return 0;
115}
116
117/*
118 * mdfld_init_TMD_MIPI - initialise a TMD interface
119 * @dsi_config: configuration
120 * @pipe: pipe to configure
121 *
122 * This function is called only by mrst_dsi_mode_set and
123 * restore_display_registers. since this function does not
124 * acquire the mutex, it is important that the calling function
125 * does!
126 */
127
128
129static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
130 int pipe)
131{
132 static u32 tmd_cmd_mcap_off[] = {0x000000b2};
133 static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
134 static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
135 static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
136 static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
137 static u32 tmd_cmd_set_mode[] = {0x000000b3};
138 static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
139 static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
140 static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
141 static u32 tmd_cmd_set_video_mode[] = {0x00000153};
142 /*no auto_bl,need add in furture*/
143 static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
144 static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
145
146 struct mdfld_dsi_pkg_sender *sender
147 = mdfld_dsi_get_pkg_sender(dsi_config);
148
149 DRM_INFO("Enter mdfld init TMD MIPI display.\n");
150
151 if (!sender) {
152 DRM_ERROR("Cannot get sender\n");
153 return;
154 }
155
156 if (dsi_config->dvr_ic_inited)
157 return;
158
159 msleep(3);
160
161 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_mcap_off, 1, 0);
162 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_lane_switch, 1, 0);
163 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_lane_num, 1, 0);
164 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock0, 1, 0);
165 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock1, 1, 0);
166 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_mode, 1, 0);
167 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_sync_pulse_mode, 1, 0);
168 mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_column, 2, 0);
169 mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_page, 2, 0);
170 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_video_mode, 1, 0);
171 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_backlight, 1, 0);
172 mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_backlight_dimming, 1, 0);
173
174 dsi_config->dvr_ic_inited = 1;
175}
176
177/* TMD DPI encoder helper funcs */
178static const struct drm_encoder_helper_funcs
179 mdfld_tpo_dpi_encoder_helper_funcs = {
180 .dpms = mdfld_dsi_dpi_dpms,
181 .mode_fixup = mdfld_dsi_dpi_mode_fixup,
182 .prepare = mdfld_dsi_dpi_prepare,
183 .mode_set = mdfld_dsi_dpi_mode_set,
184 .commit = mdfld_dsi_dpi_commit,
185};
186
187/* TMD DPI encoder funcs */
188static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
189 .destroy = drm_encoder_cleanup,
190};
191
192void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
193{
194 if (!dev || !p_funcs) {
195 dev_err(dev->dev, "Invalid parameters\n");
196 return;
197 }
198
199 p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
200 p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
201 p_funcs->get_config_mode = &tmd_vid_get_config_mode;
202 p_funcs->update_fb = NULL;
203 p_funcs->get_panel_info = tmd_vid_get_panel_info;
204 p_funcs->reset = mdfld_dsi_panel_reset;
205 p_funcs->drv_ic_init = mdfld_dsi_tmd_drv_ic_init;
206}
diff --git a/drivers/staging/gma500/mdfld_tpo_cmd.c b/drivers/staging/gma500/mdfld_tpo_cmd.c
deleted file mode 100644
index c7f7c9c19bc1..000000000000
--- a/drivers/staging/gma500/mdfld_tpo_cmd.c
+++ /dev/null
@@ -1,509 +0,0 @@
1/*
2 * Copyright (c) 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicensen
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Thomas Eaton <thomas.g.eaton@intel.com>
25 * Scott Rowe <scott.m.rowe@intel.com>
26 */
27
28#include "mdfld_dsi_dbi.h"
29#include "mdfld_dsi_dpi.h"
30#include "mdfld_dsi_output.h"
31#include "mdfld_output.h"
32#include "mdfld_dsi_dbi_dpu.h"
33#include "mdfld_dsi_pkg_sender.h"
34
35#include "displays/tpo_cmd.h"
36
37static struct drm_display_mode *tpo_cmd_get_config_mode(struct drm_device *dev)
38{
39 struct drm_display_mode *mode;
40 struct drm_psb_private *dev_priv = dev->dev_private;
41 struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
42 bool use_gct = false;
43
44 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
45 if (!mode)
46 return NULL;
47
48 if (use_gct) {
49 dev_dbg(dev->dev, "gct find MIPI panel.\n");
50
51 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
52 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
53 mode->hsync_start = mode->hdisplay + \
54 ((ti->hsync_offset_hi << 8) | \
55 ti->hsync_offset_lo);
56 mode->hsync_end = mode->hsync_start + \
57 ((ti->hsync_pulse_width_hi << 8) | \
58 ti->hsync_pulse_width_lo);
59 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
60 ti->hblank_lo);
61 mode->vsync_start = \
62 mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
63 ti->vsync_offset_lo);
64 mode->vsync_end = \
65 mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
66 ti->vsync_pulse_width_lo);
67 mode->vtotal = mode->vdisplay + \
68 ((ti->vblank_hi << 8) | ti->vblank_lo);
69 mode->clock = ti->pixel_clock * 10;
70
71 dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
72 dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
73 dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
74 dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
75 dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
76 dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
77 dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
78 dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
79 dev_dbg(dev->dev, "clock is %d\n", mode->clock);
80 } else {
81 mode->hdisplay = 864;
82 mode->vdisplay = 480;
83 mode->hsync_start = 872;
84 mode->hsync_end = 876;
85 mode->htotal = 884;
86 mode->vsync_start = 482;
87 mode->vsync_end = 494;
88 mode->vtotal = 486;
89 mode->clock = 25777;
90 }
91
92 drm_mode_set_name(mode);
93 drm_mode_set_crtcinfo(mode, 0);
94
95 mode->type |= DRM_MODE_TYPE_PREFERRED;
96
97 return mode;
98}
99
100static bool mdfld_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
101 struct drm_display_mode *mode,
102 struct drm_display_mode *adjusted_mode)
103{
104 struct drm_device *dev = encoder->dev;
105 struct drm_display_mode *fixed_mode = tpo_cmd_get_config_mode(dev);
106
107 if (fixed_mode) {
108 adjusted_mode->hdisplay = fixed_mode->hdisplay;
109 adjusted_mode->hsync_start = fixed_mode->hsync_start;
110 adjusted_mode->hsync_end = fixed_mode->hsync_end;
111 adjusted_mode->htotal = fixed_mode->htotal;
112 adjusted_mode->vdisplay = fixed_mode->vdisplay;
113 adjusted_mode->vsync_start = fixed_mode->vsync_start;
114 adjusted_mode->vsync_end = fixed_mode->vsync_end;
115 adjusted_mode->vtotal = fixed_mode->vtotal;
116 adjusted_mode->clock = fixed_mode->clock;
117 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
118 kfree(fixed_mode);
119 }
120 return true;
121}
122
123static void mdfld_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
124{
125 int ret = 0;
126 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
127 struct mdfld_dsi_dbi_output *dbi_output =
128 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
129 struct mdfld_dsi_config *dsi_config =
130 mdfld_dsi_encoder_get_config(dsi_encoder);
131 struct mdfld_dsi_pkg_sender *sender =
132 mdfld_dsi_encoder_get_pkg_sender(dsi_encoder);
133 struct drm_device *dev = encoder->dev;
134 struct drm_psb_private *dev_priv = dev->dev_private;
135 u32 reg_offset = 0;
136 int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
137 u32 data = 0;
138
139 dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n",
140 pipe, on ? "On" : "Off",
141 dbi_output->dbi_panel_on ? "True" : "False");
142
143 if (pipe == 2) {
144 if (on)
145 dev_priv->dual_mipi = true;
146 else
147 dev_priv->dual_mipi = false;
148 reg_offset = MIPIC_REG_OFFSET;
149 } else {
150 if (!on)
151 dev_priv->dual_mipi = false;
152 }
153
154 if (!gma_power_begin(dev, true)) {
155 dev_err(dev->dev, "hw begin failed\n");
156 return;
157 }
158
159 if (on) {
160 if (dbi_output->dbi_panel_on)
161 goto out_err;
162
163 ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
164 if (ret) {
165 dev_err(dev->dev, "power on error\n");
166 goto out_err;
167 }
168
169 dbi_output->dbi_panel_on = true;
170
171 if (pipe == 2)
172 dev_priv->dbi_panel_on2 = true;
173 else
174 dev_priv->dbi_panel_on = true;
175 mdfld_enable_te(dev, pipe);
176 } else {
177 if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
178 goto out_err;
179
180 dbi_output->dbi_panel_on = false;
181 dbi_output->first_boot = false;
182
183 if (pipe == 2)
184 dev_priv->dbi_panel_on2 = false;
185 else
186 dev_priv->dbi_panel_on = false;
187
188 mdfld_disable_te(dev, pipe);
189
190 ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
191 if (ret) {
192 dev_err(dev->dev, "power on error\n");
193 goto out_err;
194 }
195 }
196
197 /*
198 * FIXME: this is a WA for TPO panel crash on DPMS on & off around
199 * 83 times. the root cause of this issue is that Booster in
200 * drvIC crashed. Add this WA so that we can resume the driver IC
201 * once we found that booster has a fault
202 */
203 mdfld_dsi_get_power_mode(dsi_config,
204 &data,
205 MDFLD_DSI_HS_TRANSMISSION);
206
207 if (on && data && !(data & (1 << 7))) {
208 /* Soft reset */
209 mdfld_dsi_send_dcs(sender,
210 DCS_SOFT_RESET,
211 NULL,
212 0,
213 CMD_DATA_SRC_PIPE,
214 MDFLD_DSI_SEND_PACKAGE);
215
216 /* Init drvIC */
217 if (dbi_output->p_funcs->drv_ic_init)
218 dbi_output->p_funcs->drv_ic_init(dsi_config,
219 pipe);
220 }
221
222out_err:
223 gma_power_end(dev);
224 if (ret)
225 dev_err(dev->dev, "failed\n");
226}
227
228
229static void mdfld_dsi_dbi_mode_set(struct drm_encoder *encoder,
230 struct drm_display_mode *mode,
231 struct drm_display_mode *adjusted_mode)
232{
233 int ret = 0;
234 struct drm_device *dev = encoder->dev;
235 struct drm_psb_private *dev_priv = dev->dev_private;
236 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
237 struct mdfld_dsi_dbi_output *dsi_output =
238 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
239 struct mdfld_dsi_config *dsi_config =
240 mdfld_dsi_encoder_get_config(dsi_encoder);
241 struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
242 int pipe = dsi_connector->pipe;
243 u8 param = 0;
244
245 /* Regs */
246 u32 mipi_reg = MIPI;
247 u32 dspcntr_reg = DSPACNTR;
248 u32 pipeconf_reg = PIPEACONF;
249 u32 reg_offset = 0;
250
251 /* Values */
252 u32 dspcntr_val = dev_priv->dspcntr;
253 u32 pipeconf_val = dev_priv->pipeconf;
254 u32 h_active_area = mode->hdisplay;
255 u32 v_active_area = mode->vdisplay;
256 u32 mipi_val;
257
258 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
259 TE_TRIGGER_GPIO_PIN);
260
261 dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
262
263 dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
264 dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
265
266 if (pipe == 2) {
267 mipi_reg = MIPI_C;
268 dspcntr_reg = DSPCCNTR;
269 pipeconf_reg = PIPECCONF;
270
271 reg_offset = MIPIC_REG_OFFSET;
272
273 dspcntr_val = dev_priv->dspcntr2;
274 pipeconf_val = dev_priv->pipeconf2;
275 } else {
276 mipi_val |= 0x2; /*two lanes for port A and C respectively*/
277 }
278
279 if (!gma_power_begin(dev, true)) {
280 dev_err(dev->dev, "hw begin failed\n");
281 return;
282 }
283
284 REG_WRITE(dspcntr_reg, dspcntr_val);
285 REG_READ(dspcntr_reg);
286
287 /* 20ms delay before sending exit_sleep_mode */
288 msleep(20);
289
290 /* Send exit_sleep_mode DCS */
291 ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_EXIT_SLEEP_MODE,
292 NULL, 0, CMD_DATA_SRC_SYSTEM_MEM);
293 if (ret) {
294 dev_err(dev->dev, "sent exit_sleep_mode faild\n");
295 goto out_err;
296 }
297
298 /* Send set_tear_on DCS */
299 ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_SET_TEAR_ON,
300 &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
301 if (ret) {
302 dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
303 goto out_err;
304 }
305
306 /* Do some init stuff */
307 REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
308 REG_READ(pipeconf_reg);
309
310 /* TODO: this looks ugly, try to move it to CRTC mode setting*/
311 if (pipe == 2)
312 dev_priv->pipeconf2 |= PIPEACONF_DSR;
313 else
314 dev_priv->pipeconf |= PIPEACONF_DSR;
315
316 dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
317
318 ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
319 h_active_area - 1, v_active_area - 1);
320 if (ret) {
321 dev_err(dev->dev, "update area failed\n");
322 goto out_err;
323 }
324
325out_err:
326 gma_power_end(dev);
327
328 if (ret)
329 dev_err(dev->dev, "mode set failed\n");
330}
331
332static void mdfld_dsi_dbi_prepare(struct drm_encoder *encoder)
333{
334 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
335 struct mdfld_dsi_dbi_output *dbi_output
336 = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
337
338 dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
339 dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
340
341 mdfld_dsi_dbi_set_power(encoder, false);
342}
343
344static void mdfld_dsi_dbi_commit(struct drm_encoder *encoder)
345{
346 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
347 struct mdfld_dsi_dbi_output *dbi_output =
348 MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
349 struct drm_device *dev = dbi_output->dev;
350 struct drm_psb_private *dev_priv = dev->dev_private;
351 struct psb_drm_dpu_rect rect;
352
353 mdfld_dsi_dbi_set_power(encoder, true);
354 dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
355
356 rect.x = rect.y = 0;
357 rect.width = 864;
358 rect.height = 480;
359
360 if (dbi_output->channel_num == 1) {
361 dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
362 /*if dpu enabled report a fullscreen damage*/
363 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
364 } else {
365 dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
366 mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
367 }
368 dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
369}
370
371static void mdfld_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
372{
373 struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
374 struct mdfld_dsi_dbi_output *dbi_output
375 = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
376 struct drm_device *dev = dbi_output->dev;
377 struct drm_psb_private *dev_priv = dev->dev_private;
378 static bool bdispoff;
379
380 dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
381
382 if (mode == DRM_MODE_DPMS_ON) {
383 /*
384 * FIXME: in case I am wrong!
385 * we don't need to exit dsr here to wake up plane/pipe/pll
386 * if everything goes right, hw_begin will resume them all
387 * during set_power.
388 */
389 if (bdispoff /* FIXME && gbgfxsuspended */) {
390 mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D);
391 bdispoff = false;
392 dev_priv->dispstatus = true;
393 }
394
395 mdfld_dsi_dbi_set_power(encoder, true);
396 /* FIXME if (gbgfxsuspended)
397 gbgfxsuspended = false; */
398 } else {
399 /*
400 * I am not sure whether this is the perfect place to
401 * turn rpm on since we still have a lot of CRTC turnning
402 * on work to do.
403 */
404 bdispoff = true;
405 dev_priv->dispstatus = false;
406 mdfld_dsi_dbi_set_power(encoder, false);
407 }
408}
409
410
411/*
412 * Update the DBI MIPI Panel Frame Buffer.
413 */
414static void mdfld_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
415 int pipe)
416{
417 struct mdfld_dsi_pkg_sender *sender =
418 mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
419 struct drm_device *dev = dbi_output->dev;
420 struct drm_crtc *crtc = dbi_output->base.base.crtc;
421 struct psb_intel_crtc *psb_crtc = (crtc) ?
422 to_psb_intel_crtc(crtc) : NULL;
423 u32 dpll_reg = MRST_DPLL_A;
424 u32 dspcntr_reg = DSPACNTR;
425 u32 pipeconf_reg = PIPEACONF;
426 u32 dsplinoff_reg = DSPALINOFF;
427 u32 dspsurf_reg = DSPASURF;
428 u32 reg_offset = 0;
429
430 /* If mode setting on-going, back off */
431 if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
432 (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
433 !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
434 return;
435
436 if (pipe == 2) {
437 dspcntr_reg = DSPCCNTR;
438 pipeconf_reg = PIPECCONF;
439 dsplinoff_reg = DSPCLINOFF;
440 dspsurf_reg = DSPCSURF;
441 reg_offset = MIPIC_REG_OFFSET;
442 }
443
444 if (!gma_power_begin(dev, true)) {
445 dev_err(dev->dev, "hw begin failed\n");
446 return;
447 }
448
449 /* Check DBI FIFO status */
450 if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
451 !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
452 !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
453 goto update_fb_out0;
454
455 /* Refresh plane changes */
456 REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
457 REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
458 REG_READ(dspsurf_reg);
459
460 mdfld_dsi_send_dcs(sender,
461 DCS_WRITE_MEM_START,
462 NULL,
463 0,
464 CMD_DATA_SRC_PIPE,
465 MDFLD_DSI_SEND_PACKAGE);
466
467 dbi_output->dsr_fb_update_done = true;
468update_fb_out0:
469 gma_power_end(dev);
470}
471
472static int tpo_cmd_get_panel_info(struct drm_device *dev,
473 int pipe,
474 struct panel_info *pi)
475{
476 if (!dev || !pi)
477 return -EINVAL;
478
479 pi->width_mm = TPO_PANEL_WIDTH;
480 pi->height_mm = TPO_PANEL_HEIGHT;
481
482 return 0;
483}
484
485
486/* TPO DBI encoder helper funcs */
487static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
488 .dpms = mdfld_dsi_dbi_dpms,
489 .mode_fixup = mdfld_dsi_dbi_mode_fixup,
490 .prepare = mdfld_dsi_dbi_prepare,
491 .mode_set = mdfld_dsi_dbi_mode_set,
492 .commit = mdfld_dsi_dbi_commit,
493};
494
495/* TPO DBI encoder funcs */
496static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
497 .destroy = drm_encoder_cleanup,
498};
499
500void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
501{
502 p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
503 p_funcs->encoder_helper_funcs = &mdfld_dsi_dbi_helper_funcs;
504 p_funcs->get_config_mode = &tpo_cmd_get_config_mode;
505 p_funcs->update_fb = mdfld_dsi_dbi_update_fb;
506 p_funcs->get_panel_info = tpo_cmd_get_panel_info;
507 p_funcs->reset = mdfld_dsi_panel_reset;
508 p_funcs->drv_ic_init = mdfld_dsi_brightness_init;
509}
diff --git a/drivers/staging/gma500/mdfld_tpo_vid.c b/drivers/staging/gma500/mdfld_tpo_vid.c
deleted file mode 100644
index 954901751760..000000000000
--- a/drivers/staging/gma500/mdfld_tpo_vid.c
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 * Jackie Li<yaodong.li@intel.com>
26 */
27
28#include "mdfld_dsi_dbi.h"
29#include "mdfld_dsi_dpi.h"
30#include "mdfld_dsi_output.h"
31#include "mdfld_output.h"
32
33#include "mdfld_dsi_pkg_sender.h"
34
35#include "displays/tpo_vid.h"
36
37static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
38{
39 struct drm_display_mode *mode;
40 struct drm_psb_private *dev_priv = dev->dev_private;
41 struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
42 bool use_gct = false;
43
44 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
45 if (!mode) {
46 dev_err(dev->dev, "out of memory\n");
47 return NULL;
48 }
49
50 if (use_gct) {
51 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
52 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
53 mode->hsync_start = mode->hdisplay + \
54 ((ti->hsync_offset_hi << 8) | \
55 ti->hsync_offset_lo);
56 mode->hsync_end = mode->hsync_start + \
57 ((ti->hsync_pulse_width_hi << 8) | \
58 ti->hsync_pulse_width_lo);
59 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
60 ti->hblank_lo);
61 mode->vsync_start = \
62 mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
63 ti->vsync_offset_lo);
64 mode->vsync_end = \
65 mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
66 ti->vsync_pulse_width_lo);
67 mode->vtotal = mode->vdisplay + \
68 ((ti->vblank_hi << 8) | ti->vblank_lo);
69 mode->clock = ti->pixel_clock * 10;
70
71 dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
72 dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
73 dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
74 dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
75 dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
76 dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
77 dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
78 dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
79 dev_dbg(dev->dev, "clock is %d\n", mode->clock);
80 } else {
81 mode->hdisplay = 864;
82 mode->vdisplay = 480;
83 mode->hsync_start = 873;
84 mode->hsync_end = 876;
85 mode->htotal = 887;
86 mode->vsync_start = 487;
87 mode->vsync_end = 490;
88 mode->vtotal = 499;
89 mode->clock = 33264;
90 }
91
92 drm_mode_set_name(mode);
93 drm_mode_set_crtcinfo(mode, 0);
94
95 mode->type |= DRM_MODE_TYPE_PREFERRED;
96
97 return mode;
98}
99
100static int tpo_vid_get_panel_info(struct drm_device *dev,
101 int pipe,
102 struct panel_info *pi)
103{
104 if (!dev || !pi)
105 return -EINVAL;
106
107 pi->width_mm = TPO_PANEL_WIDTH;
108 pi->height_mm = TPO_PANEL_HEIGHT;
109
110 return 0;
111}
112
113/*TPO DPI encoder helper funcs*/
114static const struct drm_encoder_helper_funcs
115 mdfld_tpo_dpi_encoder_helper_funcs = {
116 .dpms = mdfld_dsi_dpi_dpms,
117 .mode_fixup = mdfld_dsi_dpi_mode_fixup,
118 .prepare = mdfld_dsi_dpi_prepare,
119 .mode_set = mdfld_dsi_dpi_mode_set,
120 .commit = mdfld_dsi_dpi_commit,
121};
122
123/*TPO DPI encoder funcs*/
124static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
125 .destroy = drm_encoder_cleanup,
126};
127
128void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
129{
130 if (!dev || !p_funcs) {
131 dev_err(dev->dev, "tpo_vid_init: Invalid parameters\n");
132 return;
133 }
134
135 p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
136 p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
137 p_funcs->get_config_mode = &tpo_vid_get_config_mode;
138 p_funcs->update_fb = NULL;
139 p_funcs->get_panel_info = tpo_vid_get_panel_info;
140}
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
deleted file mode 100644
index 09e9687431f1..000000000000
--- a/drivers/staging/gma500/medfield.h
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/* Medfield DSI controller registers */
25
26#define MIPIA_DEVICE_READY_REG 0xb000
27#define MIPIA_INTR_STAT_REG 0xb004
28#define MIPIA_INTR_EN_REG 0xb008
29#define MIPIA_DSI_FUNC_PRG_REG 0xb00c
30#define MIPIA_HS_TX_TIMEOUT_REG 0xb010
31#define MIPIA_LP_RX_TIMEOUT_REG 0xb014
32#define MIPIA_TURN_AROUND_TIMEOUT_REG 0xb018
33#define MIPIA_DEVICE_RESET_TIMER_REG 0xb01c
34#define MIPIA_DPI_RESOLUTION_REG 0xb020
35#define MIPIA_DBI_FIFO_THROTTLE_REG 0xb024
36#define MIPIA_HSYNC_COUNT_REG 0xb028
37#define MIPIA_HBP_COUNT_REG 0xb02c
38#define MIPIA_HFP_COUNT_REG 0xb030
39#define MIPIA_HACTIVE_COUNT_REG 0xb034
40#define MIPIA_VSYNC_COUNT_REG 0xb038
41#define MIPIA_VBP_COUNT_REG 0xb03c
42#define MIPIA_VFP_COUNT_REG 0xb040
43#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG 0xb044
44#define MIPIA_DPI_CONTROL_REG 0xb048
45#define MIPIA_DPI_DATA_REG 0xb04c
46#define MIPIA_INIT_COUNT_REG 0xb050
47#define MIPIA_MAX_RETURN_PACK_SIZE_REG 0xb054
48#define MIPIA_VIDEO_MODE_FORMAT_REG 0xb058
49#define MIPIA_EOT_DISABLE_REG 0xb05c
50#define MIPIA_LP_BYTECLK_REG 0xb060
51#define MIPIA_LP_GEN_DATA_REG 0xb064
52#define MIPIA_HS_GEN_DATA_REG 0xb068
53#define MIPIA_LP_GEN_CTRL_REG 0xb06c
54#define MIPIA_HS_GEN_CTRL_REG 0xb070
55#define MIPIA_GEN_FIFO_STAT_REG 0xb074
56#define MIPIA_HS_LS_DBI_ENABLE_REG 0xb078
57#define MIPIA_DPHY_PARAM_REG 0xb080
58#define MIPIA_DBI_BW_CTRL_REG 0xb084
59#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG 0xb088
60
61#define DSI_DEVICE_READY (0x1)
62#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
63#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
64#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
65
66
67#define DSI_ONE_DATA_LANE (0x1)
68#define DSI_TWO_DATA_LANE (0x2)
69#define DSI_THREE_DATA_LANE (0X3)
70#define DSI_FOUR_DATA_LANE (0x4)
71#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
72#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
73#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
74#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
75#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
76#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
77#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
78
79#define DSI_INTR_STATE_RXSOTERROR 1
80
81#define DSI_INTR_STATE_SPL_PKG_SENT (1 << 30)
82#define DSI_INTR_STATE_TE (1 << 31)
83
84#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
85
86#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
87
88#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
89
90#define DSI_RESET_TIMER_MASK (0xffff)
91
92#define DSI_DBI_FIFO_WM_HALF (0x0)
93#define DSI_DBI_FIFO_WM_QUARTER (0x1)
94#define DSI_DBI_FIFO_WM_LOW (0x2)
95
96#define DSI_DPI_TIMING_MASK (0xffff)
97
98#define DSI_INIT_TIMER_MASK (0xffff)
99
100#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
101
102#define DSI_LP_BYTECLK_MASK (0x0ffff)
103
104#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
105#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
106#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
107#define DSI_HS_CTRL_GEN_R0 (0x04)
108#define DSI_HS_CTRL_GEN_R1 (0x14)
109#define DSI_HS_CTRL_GEN_R2 (0x24)
110#define DSI_HS_CTRL_GEN_LONG_W (0x29)
111#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
112#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
113#define DSI_HS_CTRL_MCS_R0 (0x06)
114#define DSI_HS_CTRL_MCS_LONG_W (0x39)
115#define DSI_HS_CTRL_VC_OFFSET (0x06)
116#define DSI_HS_CTRL_WC_OFFSET (0x08)
117
118#define DSI_FIFO_GEN_HS_DATA_FULL (1 << 0)
119#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY (1 << 1)
120#define DSI_FIFO_GEN_HS_DATA_EMPTY (1 << 2)
121#define DSI_FIFO_GEN_LP_DATA_FULL (1 << 8)
122#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY (1 << 9)
123#define DSI_FIFO_GEN_LP_DATA_EMPTY (1 << 10)
124#define DSI_FIFO_GEN_HS_CTRL_FULL (1 << 16)
125#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY (1 << 17)
126#define DSI_FIFO_GEN_HS_CTRL_EMPTY (1 << 18)
127#define DSI_FIFO_GEN_LP_CTRL_FULL (1 << 24)
128#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY (1 << 25)
129#define DSI_FIFO_GEN_LP_CTRL_EMPTY (1 << 26)
130#define DSI_FIFO_DBI_EMPTY (1 << 27)
131#define DSI_FIFO_DPI_EMPTY (1 << 28)
132
133#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
134
135#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
136#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
137
138#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
139#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
140
141/* Medfield DSI adapter registers */
142#define MIPIA_CONTROL_REG 0xb104
143#define MIPIA_DATA_ADD_REG 0xb108
144#define MIPIA_DATA_LEN_REG 0xb10c
145#define MIPIA_CMD_ADD_REG 0xb110
146#define MIPIA_CMD_LEN_REG 0xb114
147
148/*dsi power modes*/
149#define DSI_POWER_MODE_DISPLAY_ON (1 << 2)
150#define DSI_POWER_MODE_NORMAL_ON (1 << 3)
151#define DSI_POWER_MODE_SLEEP_OUT (1 << 4)
152#define DSI_POWER_MODE_PARTIAL_ON (1 << 5)
153#define DSI_POWER_MODE_IDLE_ON (1 << 6)
154
155enum {
156 MDFLD_DSI_ENCODER_DBI = 0,
157 MDFLD_DSI_ENCODER_DPI,
158};
159
160enum {
161 MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
162 MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
163 MDFLD_DSI_VIDEO_BURST_MODE = 3,
164};
165
166#define DSI_DPI_COMPLETE_LAST_LINE (1 << 2)
167#define DSI_DPI_DISABLE_BTA (1 << 3)
168/* Panel types */
169enum {
170 TPO_CMD,
171 TPO_VID,
172 TMD_CMD,
173 TMD_VID,
174 PYR_CMD,
175 PYR_VID,
176 TPO,
177 TMD,
178 PYR,
179 HDMI,
180 GCT_DETECT
181};
182
183/* Junk that belongs elsewhere */
184#define TPO_PANEL_WIDTH 84
185#define TPO_PANEL_HEIGHT 46
186#define TMD_PANEL_WIDTH 39
187#define TMD_PANEL_HEIGHT 71
188#define PYR_PANEL_WIDTH 53
189#define PYR_PANEL_HEIGHT 95
190
191/* Panel interface */
192struct panel_info {
193 u32 width_mm;
194 u32 height_mm;
195};
196
197struct mdfld_dsi_dbi_output;
198
199struct mdfld_dsi_connector_state {
200 u32 mipi_ctrl_reg;
201};
202
203struct mdfld_dsi_encoder_state {
204
205};
206
207struct mdfld_dsi_connector {
208 /*
209 * This is ugly, but I have to use connector in it! :-(
210 * FIXME: use drm_connector instead.
211 */
212 struct psb_intel_output base;
213
214 int pipe;
215 void *private;
216 void *pkg_sender;
217
218 /* Connection status */
219 enum drm_connector_status status;
220};
221
222struct mdfld_dsi_encoder {
223 struct drm_encoder base;
224 void *private;
225};
226
227/*
228 * DSI config, consists of one DSI connector, two DSI encoders.
229 * DRM will pick up on DSI encoder basing on differents configs.
230 */
231struct mdfld_dsi_config {
232 struct drm_device *dev;
233 struct drm_display_mode *fixed_mode;
234 struct drm_display_mode *mode;
235
236 struct mdfld_dsi_connector *connector;
237 struct mdfld_dsi_encoder *encoders[DRM_CONNECTOR_MAX_ENCODER];
238 struct mdfld_dsi_encoder *encoder;
239
240 int changed;
241
242 int bpp;
243 int type;
244 int lane_count;
245 /*Virtual channel number for this encoder*/
246 int channel_num;
247 /*video mode configure*/
248 int video_mode;
249
250 int dvr_ic_inited;
251};
252
253#define MDFLD_DSI_CONNECTOR(psb_output) \
254 (container_of(psb_output, struct mdfld_dsi_connector, base))
255
256#define MDFLD_DSI_ENCODER(encoder) \
257 (container_of(encoder, struct mdfld_dsi_encoder, base))
258
259struct panel_funcs {
260 const struct drm_encoder_funcs *encoder_funcs;
261 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
262 struct drm_display_mode *(*get_config_mode) (struct drm_device *);
263 void (*update_fb) (struct mdfld_dsi_dbi_output *, int);
264 int (*get_panel_info) (struct drm_device *, int, struct panel_info *);
265 int (*reset)(int pipe);
266 void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
267};
268
diff --git a/drivers/staging/gma500/mid_bios.c b/drivers/staging/gma500/mid_bios.c
deleted file mode 100644
index ee3c0368e320..000000000000
--- a/drivers/staging/gma500/mid_bios.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20/* TODO
21 * - Split functions by vbt type
22 * - Make them all take drm_device
23 * - Check ioremap failures
24 */
25
26#include <linux/moduleparam.h>
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include "psb_drm.h"
30#include "psb_drv.h"
31#include "mid_bios.h"
32#include "mdfld_output.h"
33
34static int panel_id = GCT_DETECT;
35module_param_named(panel_id, panel_id, int, 0600);
36MODULE_PARM_DESC(panel_id, "Panel Identifier");
37
38
39static void mid_get_fuse_settings(struct drm_device *dev)
40{
41 struct drm_psb_private *dev_priv = dev->dev_private;
42 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
43 uint32_t fuse_value = 0;
44 uint32_t fuse_value_tmp = 0;
45
46#define FB_REG06 0xD0810600
47#define FB_MIPI_DISABLE (1 << 11)
48#define FB_REG09 0xD0810900
49#define FB_REG09 0xD0810900
50#define FB_SKU_MASK 0x7000
51#define FB_SKU_SHIFT 12
52#define FB_SKU_100 0
53#define FB_SKU_100L 1
54#define FB_SKU_83 2
55 pci_write_config_dword(pci_root, 0xD0, FB_REG06);
56 pci_read_config_dword(pci_root, 0xD4, &fuse_value);
57
58 /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
59 if (IS_MRST(dev))
60 dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
61
62 DRM_INFO("internal display is %s\n",
63 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
64
65 /* Prevent runtime suspend at start*/
66 if (dev_priv->iLVDS_enable) {
67 dev_priv->is_lvds_on = true;
68 dev_priv->is_mipi_on = false;
69 } else {
70 dev_priv->is_mipi_on = true;
71 dev_priv->is_lvds_on = false;
72 }
73
74 dev_priv->video_device_fuse = fuse_value;
75
76 pci_write_config_dword(pci_root, 0xD0, FB_REG09);
77 pci_read_config_dword(pci_root, 0xD4, &fuse_value);
78
79 dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
80 fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
81
82 dev_priv->fuse_reg_value = fuse_value;
83
84 switch (fuse_value_tmp) {
85 case FB_SKU_100:
86 dev_priv->core_freq = 200;
87 break;
88 case FB_SKU_100L:
89 dev_priv->core_freq = 100;
90 break;
91 case FB_SKU_83:
92 dev_priv->core_freq = 166;
93 break;
94 default:
95 dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
96 fuse_value_tmp);
97 dev_priv->core_freq = 0;
98 }
99 dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
100 pci_dev_put(pci_root);
101}
102
103/*
104 * Get the revison ID, B0:D2:F0;0x08
105 */
106static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
107{
108 uint32_t platform_rev_id = 0;
109 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
110
111 pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
112 dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
113 pci_dev_put(pci_gfx_root);
114 dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
115 dev_priv->platform_rev_id);
116}
117
118static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
119{
120 struct drm_device *dev = dev_priv->dev;
121 struct mrst_vbt *vbt = &dev_priv->vbt_data;
122 u32 addr;
123 u16 new_size;
124 u8 *vbt_virtual;
125 u8 bpi;
126 u8 number_desc = 0;
127 struct mrst_timing_info *dp_ti = &dev_priv->gct_data.DTD;
128 struct gct_r10_timing_info ti;
129 void *pGCT;
130 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
131
132 /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
133 pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
134 pci_dev_put(pci_gfx_root);
135
136 dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
137
138 /* check for platform config address == 0. */
139 /* this means fw doesn't support vbt */
140
141 if (addr == 0) {
142 vbt->size = 0;
143 return;
144 }
145
146 /* get the virtual address of the vbt */
147 vbt_virtual = ioremap(addr, sizeof(*vbt));
148
149 memcpy(vbt, vbt_virtual, sizeof(*vbt));
150 iounmap(vbt_virtual); /* Free virtual address space */
151
152 dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
153
154 switch (vbt->revision) {
155 case 0:
156 vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
157 vbt->size - sizeof(*vbt) + 4);
158 pGCT = vbt->mrst_gct;
159 bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
160 dev_priv->gct_data.bpi = bpi;
161 dev_priv->gct_data.pt =
162 ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
163 memcpy(&dev_priv->gct_data.DTD,
164 &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
165 sizeof(struct mrst_timing_info));
166 dev_priv->gct_data.Panel_Port_Control =
167 ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
168 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
169 ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
170 break;
171 case 1:
172 vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
173 vbt->size - sizeof(*vbt) + 4);
174 pGCT = vbt->mrst_gct;
175 bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
176 dev_priv->gct_data.bpi = bpi;
177 dev_priv->gct_data.pt =
178 ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
179 memcpy(&dev_priv->gct_data.DTD,
180 &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
181 sizeof(struct mrst_timing_info));
182 dev_priv->gct_data.Panel_Port_Control =
183 ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
184 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
185 ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
186 break;
187 case 0x10:
188 /*header definition changed from rev 01 (v2) to rev 10h. */
189 /*so, some values have changed location*/
190 new_size = vbt->checksum; /*checksum contains lo size byte*/
191 /*LSB of mrst_gct contains hi size byte*/
192 new_size |= ((0xff & (unsigned int)vbt->mrst_gct)) << 8;
193
194 vbt->checksum = vbt->size; /*size contains the checksum*/
195 if (new_size > 0xff)
196 vbt->size = 0xff; /*restrict size to 255*/
197 else
198 vbt->size = new_size;
199
200 /* number of descriptors defined in the GCT */
201 number_desc = ((0xff00 & (unsigned int)vbt->mrst_gct)) >> 8;
202 bpi = ((0xff0000 & (unsigned int)vbt->mrst_gct)) >> 16;
203 vbt->mrst_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
204 GCT_R10_DISPLAY_DESC_SIZE * number_desc);
205 pGCT = vbt->mrst_gct;
206 pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
207 dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
208
209 /*copy the GCT display timings into a temp structure*/
210 memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
211
212 /*now copy the temp struct into the dev_priv->gct_data*/
213 dp_ti->pixel_clock = ti.pixel_clock;
214 dp_ti->hactive_hi = ti.hactive_hi;
215 dp_ti->hactive_lo = ti.hactive_lo;
216 dp_ti->hblank_hi = ti.hblank_hi;
217 dp_ti->hblank_lo = ti.hblank_lo;
218 dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
219 dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
220 dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
221 dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
222 dp_ti->vactive_hi = ti.vactive_hi;
223 dp_ti->vactive_lo = ti.vactive_lo;
224 dp_ti->vblank_hi = ti.vblank_hi;
225 dp_ti->vblank_lo = ti.vblank_lo;
226 dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
227 dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
228 dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
229 dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
230
231 /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
232 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
233 *((u8 *)pGCT + 0x0d);
234 dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
235 (*((u8 *)pGCT + 0x0e)) << 8;
236 break;
237 default:
238 dev_err(dev->dev, "Unknown revision of GCT!\n");
239 vbt->size = 0;
240 }
241 if (IS_MFLD(dev_priv->dev)) {
242 if (panel_id == GCT_DETECT) {
243 if (dev_priv->gct_data.bpi == 2) {
244 dev_info(dev->dev, "[GFX] PYR Panel Detected\n");
245 dev_priv->panel_id = PYR_CMD;
246 panel_id = PYR_CMD;
247 } else if (dev_priv->gct_data.bpi == 0) {
248 dev_info(dev->dev, "[GFX] TMD Panel Detected.\n");
249 dev_priv->panel_id = TMD_VID;
250 panel_id = TMD_VID;
251 } else {
252 dev_info(dev->dev, "[GFX] Default Panel (TPO)\n");
253 dev_priv->panel_id = TPO_CMD;
254 panel_id = TPO_CMD;
255 }
256 } else {
257 dev_info(dev->dev, "[GFX] Panel Parameter Passed in through cmd line\n");
258 dev_priv->panel_id = panel_id;
259 }
260 }
261}
262
263int mid_chip_setup(struct drm_device *dev)
264{
265 struct drm_psb_private *dev_priv = dev->dev_private;
266 mid_get_fuse_settings(dev);
267 mid_get_vbt_data(dev_priv);
268 mid_get_pci_revID(dev_priv);
269 return 0;
270}
diff --git a/drivers/staging/gma500/mid_bios.h b/drivers/staging/gma500/mid_bios.h
deleted file mode 100644
index 00e7d564b7eb..000000000000
--- a/drivers/staging/gma500/mid_bios.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20extern int mid_chip_setup(struct drm_device *dev);
21
diff --git a/drivers/staging/gma500/mmu.c b/drivers/staging/gma500/mmu.c
deleted file mode 100644
index c904d73b1de3..000000000000
--- a/drivers/staging/gma500/mmu.c
+++ /dev/null
@@ -1,858 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 **************************************************************************/
18#include <drm/drmP.h>
19#include "psb_drv.h"
20#include "psb_reg.h"
21
22/*
23 * Code for the SGX MMU:
24 */
25
26/*
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
29 * SMP system.
30 */
31
32/*
33 * kmap atomic:
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
38 * user.
39 */
40
41/*
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
45 * may fail.
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
48 */
49
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
98}
99
100static inline uint32_t psb_mmu_pd_index(uint32_t offset)
101{
102 return offset >> PSB_PDE_SHIFT;
103}
104
105static inline void psb_clflush(void *addr)
106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108}
109
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
111 void *addr)
112{
113 if (!driver->has_clflush)
114 return;
115
116 mb();
117 psb_clflush(addr);
118 mb();
119}
120
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
122{
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page, KM_USER0);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf, KM_USER0);
136}
137
138static void psb_pages_clflush(struct psb_mmu_driver *driver,
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145
146 for (i = 0; i < num_pages; i++)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{
153 atomic_set(&driver->needs_tlbflush, 0);
154}
155
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{
158 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem);
161}
162
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
164{
165 if (rc_prot)
166 down_write(&driver->sem);
167 if (rc_prot)
168 up_write(&driver->sem);
169}
170
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/
174 psb_pages_clflush(pd->driver, &pd->p, 1);
175 down_write(&pd->driver->sem);
176 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context;
179 up_write(&pd->driver->sem);
180
181}
182
183static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end)
185{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end;
189}
190
191static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
192{
193 uint32_t mask = PSB_PTE_VALID;
194
195 if (type & PSB_MMU_CACHED_MEMORY)
196 mask |= PSB_PTE_CACHED;
197 if (type & PSB_MMU_RO_MEMORY)
198 mask |= PSB_PTE_RO;
199 if (type & PSB_MMU_WO_MEMORY)
200 mask |= PSB_PTE_WO;
201
202 return (pfn << PAGE_SHIFT) | mask;
203}
204
205struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
206 int trap_pagefaults, int invalid_type)
207{
208 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
209 uint32_t *v;
210 int i;
211
212 if (!pd)
213 return NULL;
214
215 pd->p = alloc_page(GFP_DMA32);
216 if (!pd->p)
217 goto out_err1;
218 pd->dummy_pt = alloc_page(GFP_DMA32);
219 if (!pd->dummy_pt)
220 goto out_err2;
221 pd->dummy_page = alloc_page(GFP_DMA32);
222 if (!pd->dummy_page)
223 goto out_err3;
224
225 if (!trap_pagefaults) {
226 pd->invalid_pde =
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
228 invalid_type);
229 pd->invalid_pte =
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else {
233 pd->invalid_pde = 0;
234 pd->invalid_pte = 0;
235 }
236
237 v = kmap(pd->dummy_pt);
238 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
239 v[i] = pd->invalid_pte;
240
241 kunmap(pd->dummy_pt);
242
243 v = kmap(pd->p);
244 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
245 v[i] = pd->invalid_pde;
246
247 kunmap(pd->p);
248
249 clear_page(kmap(pd->dummy_page));
250 kunmap(pd->dummy_page);
251
252 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
253 if (!pd->tables)
254 goto out_err4;
255
256 pd->hw_context = -1;
257 pd->pd_mask = PSB_PTE_VALID;
258 pd->driver = driver;
259
260 return pd;
261
262out_err4:
263 __free_page(pd->dummy_page);
264out_err3:
265 __free_page(pd->dummy_pt);
266out_err2:
267 __free_page(pd->p);
268out_err1:
269 kfree(pd);
270 return NULL;
271}
272
273void psb_mmu_free_pt(struct psb_mmu_pt *pt)
274{
275 __free_page(pt->p);
276 kfree(pt);
277}
278
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{
281 struct psb_mmu_driver *driver = pd->driver;
282 struct psb_mmu_pt *pt;
283 int i;
284
285 down_write(&driver->sem);
286 if (pd->hw_context != -1)
287 psb_mmu_flush_pd_locked(driver, 1);
288
289 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */
291
292 for (i = 0; i < 1024; ++i) {
293 pt = pd->tables[i];
294 if (pt)
295 psb_mmu_free_pt(pt);
296 }
297
298 vfree(pd->tables);
299 __free_page(pd->dummy_page);
300 __free_page(pd->dummy_pt);
301 __free_page(pd->p);
302 kfree(pd);
303 up_write(&driver->sem);
304}
305
306static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
307{
308 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
309 void *v;
310 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
311 uint32_t clflush_count = PAGE_SIZE / clflush_add;
312 spinlock_t *lock = &pd->driver->lock;
313 uint8_t *clf;
314 uint32_t *ptes;
315 int i;
316
317 if (!pt)
318 return NULL;
319
320 pt->p = alloc_page(GFP_DMA32);
321 if (!pt->p) {
322 kfree(pt);
323 return NULL;
324 }
325
326 spin_lock(lock);
327
328 v = kmap_atomic(pt->p, KM_USER0);
329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte;
333
334
335 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb();
337 for (i = 0; i < clflush_count; ++i) {
338 psb_clflush(clf);
339 clf += clflush_add;
340 }
341 mb();
342 }
343
344 kunmap_atomic(v, KM_USER0);
345 spin_unlock(lock);
346
347 pt->count = 0;
348 pt->pd = pd;
349 pt->index = 0;
350
351 return pt;
352}
353
354struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr)
356{
357 uint32_t index = psb_mmu_pd_index(addr);
358 struct psb_mmu_pt *pt;
359 uint32_t *v;
360 spinlock_t *lock = &pd->driver->lock;
361
362 spin_lock(lock);
363 pt = pd->tables[index];
364 while (!pt) {
365 spin_unlock(lock);
366 pt = psb_mmu_alloc_pt(pd);
367 if (!pt)
368 return NULL;
369 spin_lock(lock);
370
371 if (pd->tables[index]) {
372 spin_unlock(lock);
373 psb_mmu_free_pt(pt);
374 spin_lock(lock);
375 pt = pd->tables[index];
376 continue;
377 }
378
379 v = kmap_atomic(pd->p, KM_USER0);
380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0);
384
385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 }
389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0);
391 return pt;
392}
393
394static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
395 unsigned long addr)
396{
397 uint32_t index = psb_mmu_pd_index(addr);
398 struct psb_mmu_pt *pt;
399 spinlock_t *lock = &pd->driver->lock;
400
401 spin_lock(lock);
402 pt = pd->tables[index];
403 if (!pt) {
404 spin_unlock(lock);
405 return NULL;
406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0);
408 return pt;
409}
410
411static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
412{
413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v;
415
416 kunmap_atomic(pt->v, KM_USER0);
417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0);
419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL;
421
422 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver,
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 }
427 kunmap_atomic(pt->v, KM_USER0);
428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt);
430 return;
431 }
432 spin_unlock(&pd->driver->lock);
433}
434
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
436 unsigned long addr, uint32_t pte)
437{
438 pt->v[psb_mmu_pt_index(addr)] = pte;
439}
440
441static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
442 unsigned long addr)
443{
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445}
446
447
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{
452 uint32_t *v;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456
457 down_read(&driver->sem);
458 spin_lock(&driver->lock);
459
460 v = kmap_atomic(pd->p, KM_USER0);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v, KM_USER0);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475
476 up_read(&pd->driver->sem);
477 psb_mmu_flush_pd(pd->driver, 0);
478}
479
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
481{
482 struct psb_mmu_pd *pd;
483
484 /* down_read(&driver->sem); */
485 pd = driver->default_pd;
486 /* up_read(&driver->sem); */
487
488 return pd;
489}
490
491/* Returns the physical address of the PD shared by sgx/msvdx */
492uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
493{
494 struct psb_mmu_pd *pd;
495
496 pd = psb_mmu_get_default_pd(driver);
497 return page_to_pfn(pd->p) << PAGE_SHIFT;
498}
499
500void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
501{
502 psb_mmu_free_pagedir(driver->default_pd);
503 kfree(driver);
504}
505
506struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
507 int trap_pagefaults,
508 int invalid_type,
509 struct drm_psb_private *dev_priv)
510{
511 struct psb_mmu_driver *driver;
512
513 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
514
515 if (!driver)
516 return NULL;
517 driver->dev_priv = dev_priv;
518
519 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
520 invalid_type);
521 if (!driver->default_pd)
522 goto out_err1;
523
524 spin_lock_init(&driver->lock);
525 init_rwsem(&driver->sem);
526 down_write(&driver->sem);
527 driver->register_map = registers;
528 atomic_set(&driver->needs_tlbflush, 1);
529
530 driver->has_clflush = 0;
531
532 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
533 uint32_t tfms, misc, cap0, cap4, clflush_size;
534
535 /*
536 * clflush size is determined at kernel setup for x86_64
537 * but not for i386. We have to do it here.
538 */
539
540 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
541 clflush_size = ((misc >> 8) & 0xff) * 8;
542 driver->has_clflush = 1;
543 driver->clflush_add =
544 PAGE_SIZE * clflush_size / sizeof(uint32_t);
545 driver->clflush_mask = driver->clflush_add - 1;
546 driver->clflush_mask = ~driver->clflush_mask;
547 }
548
549 up_write(&driver->sem);
550 return driver;
551
552out_err1:
553 kfree(driver);
554 return NULL;
555}
556
557static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
558 unsigned long address, uint32_t num_pages,
559 uint32_t desired_tile_stride,
560 uint32_t hw_tile_stride)
561{
562 struct psb_mmu_pt *pt;
563 uint32_t rows = 1;
564 uint32_t i;
565 unsigned long addr;
566 unsigned long end;
567 unsigned long next;
568 unsigned long add;
569 unsigned long row_add;
570 unsigned long clflush_add = pd->driver->clflush_add;
571 unsigned long clflush_mask = pd->driver->clflush_mask;
572
573 if (!pd->driver->has_clflush) {
574 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
575 psb_pages_clflush(pd->driver, &pd->p, num_pages);
576 return;
577 }
578
579 if (hw_tile_stride)
580 rows = num_pages / desired_tile_stride;
581 else
582 desired_tile_stride = num_pages;
583
584 add = desired_tile_stride << PAGE_SHIFT;
585 row_add = hw_tile_stride << PAGE_SHIFT;
586 mb();
587 for (i = 0; i < rows; ++i) {
588
589 addr = address;
590 end = addr + add;
591
592 do {
593 next = psb_pd_addr_end(addr, end);
594 pt = psb_mmu_pt_map_lock(pd, addr);
595 if (!pt)
596 continue;
597 do {
598 psb_clflush(&pt->v
599 [psb_mmu_pt_index(addr)]);
600 } while (addr +=
601 clflush_add,
602 (addr & clflush_mask) < next);
603
604 psb_mmu_pt_unmap_unlock(pt);
605 } while (addr = next, next != end);
606 address += row_add;
607 }
608 mb();
609}
610
611void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
612 unsigned long address, uint32_t num_pages)
613{
614 struct psb_mmu_pt *pt;
615 unsigned long addr;
616 unsigned long end;
617 unsigned long next;
618 unsigned long f_address = address;
619
620 down_read(&pd->driver->sem);
621
622 addr = address;
623 end = addr + (num_pages << PAGE_SHIFT);
624
625 do {
626 next = psb_pd_addr_end(addr, end);
627 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
628 if (!pt)
629 goto out;
630 do {
631 psb_mmu_invalidate_pte(pt, addr);
632 --pt->count;
633 } while (addr += PAGE_SIZE, addr < next);
634 psb_mmu_pt_unmap_unlock(pt);
635
636 } while (addr = next, next != end);
637
638out:
639 if (pd->hw_context != -1)
640 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
641
642 up_read(&pd->driver->sem);
643
644 if (pd->hw_context != -1)
645 psb_mmu_flush(pd->driver, 0);
646
647 return;
648}
649
650void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
651 uint32_t num_pages, uint32_t desired_tile_stride,
652 uint32_t hw_tile_stride)
653{
654 struct psb_mmu_pt *pt;
655 uint32_t rows = 1;
656 uint32_t i;
657 unsigned long addr;
658 unsigned long end;
659 unsigned long next;
660 unsigned long add;
661 unsigned long row_add;
662 unsigned long f_address = address;
663
664 if (hw_tile_stride)
665 rows = num_pages / desired_tile_stride;
666 else
667 desired_tile_stride = num_pages;
668
669 add = desired_tile_stride << PAGE_SHIFT;
670 row_add = hw_tile_stride << PAGE_SHIFT;
671
672 /* down_read(&pd->driver->sem); */
673
674 /* Make sure we only need to flush this processor's cache */
675
676 for (i = 0; i < rows; ++i) {
677
678 addr = address;
679 end = addr + add;
680
681 do {
682 next = psb_pd_addr_end(addr, end);
683 pt = psb_mmu_pt_map_lock(pd, addr);
684 if (!pt)
685 continue;
686 do {
687 psb_mmu_invalidate_pte(pt, addr);
688 --pt->count;
689
690 } while (addr += PAGE_SIZE, addr < next);
691 psb_mmu_pt_unmap_unlock(pt);
692
693 } while (addr = next, next != end);
694 address += row_add;
695 }
696 if (pd->hw_context != -1)
697 psb_mmu_flush_ptes(pd, f_address, num_pages,
698 desired_tile_stride, hw_tile_stride);
699
700 /* up_read(&pd->driver->sem); */
701
702 if (pd->hw_context != -1)
703 psb_mmu_flush(pd->driver, 0);
704}
705
706int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
707 unsigned long address, uint32_t num_pages,
708 int type)
709{
710 struct psb_mmu_pt *pt;
711 uint32_t pte;
712 unsigned long addr;
713 unsigned long end;
714 unsigned long next;
715 unsigned long f_address = address;
716 int ret = 0;
717
718 down_read(&pd->driver->sem);
719
720 addr = address;
721 end = addr + (num_pages << PAGE_SHIFT);
722
723 do {
724 next = psb_pd_addr_end(addr, end);
725 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
726 if (!pt) {
727 ret = -ENOMEM;
728 goto out;
729 }
730 do {
731 pte = psb_mmu_mask_pte(start_pfn++, type);
732 psb_mmu_set_pte(pt, addr, pte);
733 pt->count++;
734 } while (addr += PAGE_SIZE, addr < next);
735 psb_mmu_pt_unmap_unlock(pt);
736
737 } while (addr = next, next != end);
738
739out:
740 if (pd->hw_context != -1)
741 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
742
743 up_read(&pd->driver->sem);
744
745 if (pd->hw_context != -1)
746 psb_mmu_flush(pd->driver, 1);
747
748 return ret;
749}
750
751int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
752 unsigned long address, uint32_t num_pages,
753 uint32_t desired_tile_stride,
754 uint32_t hw_tile_stride, int type)
755{
756 struct psb_mmu_pt *pt;
757 uint32_t rows = 1;
758 uint32_t i;
759 uint32_t pte;
760 unsigned long addr;
761 unsigned long end;
762 unsigned long next;
763 unsigned long add;
764 unsigned long row_add;
765 unsigned long f_address = address;
766 int ret = 0;
767
768 if (hw_tile_stride) {
769 if (num_pages % desired_tile_stride != 0)
770 return -EINVAL;
771 rows = num_pages / desired_tile_stride;
772 } else {
773 desired_tile_stride = num_pages;
774 }
775
776 add = desired_tile_stride << PAGE_SHIFT;
777 row_add = hw_tile_stride << PAGE_SHIFT;
778
779 down_read(&pd->driver->sem);
780
781 for (i = 0; i < rows; ++i) {
782
783 addr = address;
784 end = addr + add;
785
786 do {
787 next = psb_pd_addr_end(addr, end);
788 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
789 if (!pt) {
790 ret = -ENOMEM;
791 goto out;
792 }
793 do {
794 pte =
795 psb_mmu_mask_pte(page_to_pfn(*pages++),
796 type);
797 psb_mmu_set_pte(pt, addr, pte);
798 pt->count++;
799 } while (addr += PAGE_SIZE, addr < next);
800 psb_mmu_pt_unmap_unlock(pt);
801
802 } while (addr = next, next != end);
803
804 address += row_add;
805 }
806out:
807 if (pd->hw_context != -1)
808 psb_mmu_flush_ptes(pd, f_address, num_pages,
809 desired_tile_stride, hw_tile_stride);
810
811 up_read(&pd->driver->sem);
812
813 if (pd->hw_context != -1)
814 psb_mmu_flush(pd->driver, 1);
815
816 return ret;
817}
818
819int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
820 unsigned long *pfn)
821{
822 int ret;
823 struct psb_mmu_pt *pt;
824 uint32_t tmp;
825 spinlock_t *lock = &pd->driver->lock;
826
827 down_read(&pd->driver->sem);
828 pt = psb_mmu_pt_map_lock(pd, virtual);
829 if (!pt) {
830 uint32_t *v;
831
832 spin_lock(lock);
833 v = kmap_atomic(pd->p, KM_USER0);
834 tmp = v[psb_mmu_pd_index(virtual)];
835 kunmap_atomic(v, KM_USER0);
836 spin_unlock(lock);
837
838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
839 !(pd->invalid_pte & PSB_PTE_VALID)) {
840 ret = -EINVAL;
841 goto out;
842 }
843 ret = 0;
844 *pfn = pd->invalid_pte >> PAGE_SHIFT;
845 goto out;
846 }
847 tmp = pt->v[psb_mmu_pt_index(virtual)];
848 if (!(tmp & PSB_PTE_VALID)) {
849 ret = -EINVAL;
850 } else {
851 ret = 0;
852 *pfn = tmp >> PAGE_SHIFT;
853 }
854 psb_mmu_pt_unmap_unlock(pt);
855out:
856 up_read(&pd->driver->sem);
857 return ret;
858}
diff --git a/drivers/staging/gma500/mrst.h b/drivers/staging/gma500/mrst.h
deleted file mode 100644
index b563dbc73104..000000000000
--- a/drivers/staging/gma500/mrst.h
+++ /dev/null
@@ -1,252 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20/* MID device specific descriptors */
21
22struct mrst_vbt {
23 s8 signature[4]; /*4 bytes,"$GCT" */
24 u8 revision;
25 u8 size;
26 u8 checksum;
27 void *mrst_gct;
28} __packed;
29
30struct mrst_timing_info {
31 u16 pixel_clock;
32 u8 hactive_lo;
33 u8 hblank_lo;
34 u8 hblank_hi:4;
35 u8 hactive_hi:4;
36 u8 vactive_lo;
37 u8 vblank_lo;
38 u8 vblank_hi:4;
39 u8 vactive_hi:4;
40 u8 hsync_offset_lo;
41 u8 hsync_pulse_width_lo;
42 u8 vsync_pulse_width_lo:4;
43 u8 vsync_offset_lo:4;
44 u8 vsync_pulse_width_hi:2;
45 u8 vsync_offset_hi:2;
46 u8 hsync_pulse_width_hi:2;
47 u8 hsync_offset_hi:2;
48 u8 width_mm_lo;
49 u8 height_mm_lo;
50 u8 height_mm_hi:4;
51 u8 width_mm_hi:4;
52 u8 hborder;
53 u8 vborder;
54 u8 unknown0:1;
55 u8 hsync_positive:1;
56 u8 vsync_positive:1;
57 u8 separate_sync:2;
58 u8 stereo:1;
59 u8 unknown6:1;
60 u8 interlaced:1;
61} __packed;
62
63struct gct_r10_timing_info {
64 u16 pixel_clock;
65 u32 hactive_lo:8;
66 u32 hactive_hi:4;
67 u32 hblank_lo:8;
68 u32 hblank_hi:4;
69 u32 hsync_offset_lo:8;
70 u16 hsync_offset_hi:2;
71 u16 hsync_pulse_width_lo:8;
72 u16 hsync_pulse_width_hi:2;
73 u16 hsync_positive:1;
74 u16 rsvd_1:3;
75 u8 vactive_lo:8;
76 u16 vactive_hi:4;
77 u16 vblank_lo:8;
78 u16 vblank_hi:4;
79 u16 vsync_offset_lo:4;
80 u16 vsync_offset_hi:2;
81 u16 vsync_pulse_width_lo:4;
82 u16 vsync_pulse_width_hi:2;
83 u16 vsync_positive:1;
84 u16 rsvd_2:3;
85} __packed;
86
87struct mrst_panel_descriptor_v1 {
88 u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
89 /* 0x61190 if MIPI */
90 u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
91 u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
92 u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
93 /* Register 0x61210 */
94 struct mrst_timing_info DTD;/*18 bytes, Standard definition */
95 u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
96 /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
97 /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
98 u16 Panel_MIPI_Display_Descriptor;
99 /*16 bits, Defined as follows: */
100 /* if MIPI, 0x0000 if LVDS */
101 /* Bit 0, Type, 2 bits, */
102 /* 0: Type-1, */
103 /* 1: Type-2, */
104 /* 2: Type-3, */
105 /* 3: Type-4 */
106 /* Bit 2, Pixel Format, 4 bits */
107 /* Bit0: 16bpp (not supported in LNC), */
108 /* Bit1: 18bpp loosely packed, */
109 /* Bit2: 18bpp packed, */
110 /* Bit3: 24bpp */
111 /* Bit 6, Reserved, 2 bits, 00b */
112 /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
113 /* Bit 14, Reserved, 2 bits, 00b */
114} __packed;
115
116struct mrst_panel_descriptor_v2 {
117 u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
118 /* 0x61190 if MIPI */
119 u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
120 u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
121 u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
122 /* Register 0x61210 */
123 struct mrst_timing_info DTD;/*18 bytes, Standard definition */
124 u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
125 /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
126 u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
127 /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
128 u16 Panel_MIPI_Display_Descriptor;
129 /*16 bits, Defined as follows: */
130 /* if MIPI, 0x0000 if LVDS */
131 /* Bit 0, Type, 2 bits, */
132 /* 0: Type-1, */
133 /* 1: Type-2, */
134 /* 2: Type-3, */
135 /* 3: Type-4 */
136 /* Bit 2, Pixel Format, 4 bits */
137 /* Bit0: 16bpp (not supported in LNC), */
138 /* Bit1: 18bpp loosely packed, */
139 /* Bit2: 18bpp packed, */
140 /* Bit3: 24bpp */
141 /* Bit 6, Reserved, 2 bits, 00b */
142 /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
143 /* Bit 14, Reserved, 2 bits, 00b */
144} __packed;
145
146union mrst_panel_rx {
147 struct {
148 u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
149 /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
150 u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
151 /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
152 u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
153 /* 1: Burst and non-burst */
154 /* 2/3: Reserved */
155 u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
156 u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
157 u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
158 u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
159 u16 Rsvd:5;/*5 bits,00000b */
160 } panelrx;
161 u16 panel_receiver;
162} __packed;
163
164struct mrst_gct_v1 {
165 union { /*8 bits,Defined as follows: */
166 struct {
167 u8 PanelType:4; /*4 bits, Bit field for panels*/
168 /* 0 - 3: 0 = LVDS, 1 = MIPI*/
169 /*2 bits,Specifies which of the*/
170 u8 BootPanelIndex:2;
171 /* 4 panels to use by default*/
172 u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
173 /* the 4 MIPI DSI receivers to use*/
174 } PD;
175 u8 PanelDescriptor;
176 };
177 struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
178 union mrst_panel_rx panelrx[4]; /* panel receivers*/
179} __packed;
180
181struct mrst_gct_v2 {
182 union { /*8 bits,Defined as follows: */
183 struct {
184 u8 PanelType:4; /*4 bits, Bit field for panels*/
185 /* 0 - 3: 0 = LVDS, 1 = MIPI*/
186 /*2 bits,Specifies which of the*/
187 u8 BootPanelIndex:2;
188 /* 4 panels to use by default*/
189 u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
190 /* the 4 MIPI DSI receivers to use*/
191 } PD;
192 u8 PanelDescriptor;
193 };
194 struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
195 union mrst_panel_rx panelrx[4]; /* panel receivers*/
196} __packed;
197
198struct mrst_gct_data {
199 u8 bpi; /* boot panel index, number of panel used during boot */
200 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
201 struct mrst_timing_info DTD; /* timing info for the selected panel */
202 u32 Panel_Port_Control;
203 u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
204 u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
205 u32 PP_Cycle_Delay;
206 u16 Panel_Backlight_Inverter_Descriptor;
207 u16 Panel_MIPI_Display_Descriptor;
208} __packed;
209
210#define MODE_SETTING_IN_CRTC 0x1
211#define MODE_SETTING_IN_ENCODER 0x2
212#define MODE_SETTING_ON_GOING 0x3
213#define MODE_SETTING_IN_DSR 0x4
214#define MODE_SETTING_ENCODER_DONE 0x8
215
216#define GCT_R10_HEADER_SIZE 16
217#define GCT_R10_DISPLAY_DESC_SIZE 28
218
219/*
220 * Moorestown HDMI interfaces
221 */
222
223struct mrst_hdmi_dev {
224 struct pci_dev *dev;
225 void __iomem *regs;
226 unsigned int mmio, mmio_len;
227 int dpms_mode;
228 struct hdmi_i2c_dev *i2c_dev;
229
230 /* register state */
231 u32 saveDPLL_CTRL;
232 u32 saveDPLL_DIV_CTRL;
233 u32 saveDPLL_ADJUST;
234 u32 saveDPLL_UPDATE;
235 u32 saveDPLL_CLK_ENABLE;
236 u32 savePCH_HTOTAL_B;
237 u32 savePCH_HBLANK_B;
238 u32 savePCH_HSYNC_B;
239 u32 savePCH_VTOTAL_B;
240 u32 savePCH_VBLANK_B;
241 u32 savePCH_VSYNC_B;
242 u32 savePCH_PIPEBCONF;
243 u32 savePCH_PIPEBSRC;
244};
245
246extern void mrst_hdmi_setup(struct drm_device *dev);
247extern void mrst_hdmi_teardown(struct drm_device *dev);
248extern int mrst_hdmi_i2c_init(struct pci_dev *dev);
249extern void mrst_hdmi_i2c_exit(struct pci_dev *dev);
250extern void mrst_hdmi_save(struct drm_device *dev);
251extern void mrst_hdmi_restore(struct drm_device *dev);
252extern void mrst_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
deleted file mode 100644
index 980837e37d80..000000000000
--- a/drivers/staging/gma500/mrst_crtc.c
+++ /dev/null
@@ -1,604 +0,0 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#include <linux/i2c.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include "framebuffer.h"
23#include "psb_drv.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_intel_display.h"
27#include "power.h"
28
29struct psb_intel_range_t {
30 int min, max;
31};
32
33struct mrst_limit_t {
34 struct psb_intel_range_t dot, m, p1;
35};
36
37struct mrst_clock_t {
38 /* derived values */
39 int dot;
40 int m;
41 int p1;
42};
43
44#define MRST_LIMIT_LVDS_100L 0
45#define MRST_LIMIT_LVDS_83 1
46#define MRST_LIMIT_LVDS_100 2
47
48#define MRST_DOT_MIN 19750
49#define MRST_DOT_MAX 120000
50#define MRST_M_MIN_100L 20
51#define MRST_M_MIN_100 10
52#define MRST_M_MIN_83 12
53#define MRST_M_MAX_100L 34
54#define MRST_M_MAX_100 17
55#define MRST_M_MAX_83 20
56#define MRST_P1_MIN 2
57#define MRST_P1_MAX_0 7
58#define MRST_P1_MAX_1 8
59
60static const struct mrst_limit_t mrst_limits[] = {
61 { /* MRST_LIMIT_LVDS_100L */
62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
63 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
65 },
66 { /* MRST_LIMIT_LVDS_83L */
67 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
68 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
69 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
70 },
71 { /* MRST_LIMIT_LVDS_100 */
72 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
73 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
74 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
75 },
76};
77
78#define MRST_M_MIN 10
79static const u32 mrst_m_converts[] = {
80 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
81 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
82 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
83};
84
85static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
86{
87 const struct mrst_limit_t *limit = NULL;
88 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private;
90
91 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
92 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) {
94 case 100:
95 limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
96 break;
97 case 166:
98 limit = &mrst_limits[MRST_LIMIT_LVDS_83];
99 break;
100 case 200:
101 limit = &mrst_limits[MRST_LIMIT_LVDS_100];
102 break;
103 }
104 } else {
105 limit = NULL;
106 dev_err(dev->dev, "mrst_limit Wrong display type.\n");
107 }
108
109 return limit;
110}
111
112/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
113static void mrst_clock(int refclk, struct mrst_clock_t *clock)
114{
115 clock->dot = (refclk * clock->m) / (14 * clock->p1);
116}
117
118void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
119{
120 pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
121 prefix, clock->dot, clock->m, clock->p1);
122}
123
124/**
125 * Returns a set of divisors for the desired target clock with the given refclk,
126 * or FALSE. Divisor values are the actual divisors for
127 */
128static bool
129mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
130 struct mrst_clock_t *best_clock)
131{
132 struct mrst_clock_t clock;
133 const struct mrst_limit_t *limit = mrst_limit(crtc);
134 int err = target;
135
136 memset(best_clock, 0, sizeof(*best_clock));
137
138 for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
139 for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
140 clock.p1++) {
141 int this_err;
142
143 mrst_clock(refclk, &clock);
144
145 this_err = abs(clock.dot - target);
146 if (this_err < err) {
147 *best_clock = clock;
148 err = this_err;
149 }
150 }
151 }
152 dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
153 return err != target;
154}
155
156/**
157 * Sets the power management mode of the pipe and plane.
158 *
159 * This code should probably grow support for turning the cursor off and back
160 * on appropriately at the same time as we're turning the pipe off/on.
161 */
162static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
163{
164 struct drm_device *dev = crtc->dev;
165 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
166 int pipe = psb_intel_crtc->pipe;
167 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
168 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
169 int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
170 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
171 u32 temp;
172 bool enabled;
173
174 if (!gma_power_begin(dev, true))
175 return;
176
177 /* XXX: When our outputs are all unaware of DPMS modes other than off
178 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
179 */
180 switch (mode) {
181 case DRM_MODE_DPMS_ON:
182 case DRM_MODE_DPMS_STANDBY:
183 case DRM_MODE_DPMS_SUSPEND:
184 /* Enable the DPLL */
185 temp = REG_READ(dpll_reg);
186 if ((temp & DPLL_VCO_ENABLE) == 0) {
187 REG_WRITE(dpll_reg, temp);
188 REG_READ(dpll_reg);
189 /* Wait for the clocks to stabilize. */
190 udelay(150);
191 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
192 REG_READ(dpll_reg);
193 /* Wait for the clocks to stabilize. */
194 udelay(150);
195 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
196 REG_READ(dpll_reg);
197 /* Wait for the clocks to stabilize. */
198 udelay(150);
199 }
200 /* Enable the pipe */
201 temp = REG_READ(pipeconf_reg);
202 if ((temp & PIPEACONF_ENABLE) == 0)
203 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
204 /* Enable the plane */
205 temp = REG_READ(dspcntr_reg);
206 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
207 REG_WRITE(dspcntr_reg,
208 temp | DISPLAY_PLANE_ENABLE);
209 /* Flush the plane changes */
210 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
211 }
212
213 psb_intel_crtc_load_lut(crtc);
214
215 /* Give the overlay scaler a chance to enable
216 if it's on this pipe */
217 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
218 break;
219 case DRM_MODE_DPMS_OFF:
220 /* Give the overlay scaler a chance to disable
221 * if it's on this pipe */
222 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
223
224 /* Disable the VGA plane that we never use */
225 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
226 /* Disable display plane */
227 temp = REG_READ(dspcntr_reg);
228 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
229 REG_WRITE(dspcntr_reg,
230 temp & ~DISPLAY_PLANE_ENABLE);
231 /* Flush the plane changes */
232 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
233 REG_READ(dspbase_reg);
234 }
235
236 /* Next, disable display pipes */
237 temp = REG_READ(pipeconf_reg);
238 if ((temp & PIPEACONF_ENABLE) != 0) {
239 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
240 REG_READ(pipeconf_reg);
241 }
242 /* Wait for for the pipe disable to take effect. */
243 psb_intel_wait_for_vblank(dev);
244
245 temp = REG_READ(dpll_reg);
246 if ((temp & DPLL_VCO_ENABLE) != 0) {
247 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
248 REG_READ(dpll_reg);
249 }
250
251 /* Wait for the clocks to turn off. */
252 udelay(150);
253 break;
254 }
255
256 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
257
258 /*Set FIFO Watermarks*/
259 REG_WRITE(DSPARB, 0x3FFF);
260 REG_WRITE(DSPFW1, 0x3F88080A);
261 REG_WRITE(DSPFW2, 0x0b060808);
262 REG_WRITE(DSPFW3, 0x0);
263 REG_WRITE(DSPFW4, 0x08030404);
264 REG_WRITE(DSPFW5, 0x04040404);
265 REG_WRITE(DSPFW6, 0x78);
266 REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
267 /* Must write Bit 14 of the Chicken Bit Register */
268
269 gma_power_end(dev);
270}
271
272/**
273 * Return the pipe currently connected to the panel fitter,
274 * or -1 if the panel fitter is not present or not in use
275 */
276static int mrst_panel_fitter_pipe(struct drm_device *dev)
277{
278 u32 pfit_control;
279
280 pfit_control = REG_READ(PFIT_CONTROL);
281
282 /* See if the panel fitter is in use */
283 if ((pfit_control & PFIT_ENABLE) == 0)
284 return -1;
285 return (pfit_control >> 29) & 3;
286}
287
288static int mrst_crtc_mode_set(struct drm_crtc *crtc,
289 struct drm_display_mode *mode,
290 struct drm_display_mode *adjusted_mode,
291 int x, int y,
292 struct drm_framebuffer *old_fb)
293{
294 struct drm_device *dev = crtc->dev;
295 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
296 struct drm_psb_private *dev_priv = dev->dev_private;
297 int pipe = psb_intel_crtc->pipe;
298 int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
299 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
300 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
301 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
302 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
303 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
304 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
305 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
306 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
307 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
308 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
309 int refclk = 0;
310 struct mrst_clock_t clock;
311 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
312 bool ok, is_sdvo = false;
313 bool is_crt = false, is_lvds = false, is_tv = false;
314 bool is_mipi = false;
315 struct drm_mode_config *mode_config = &dev->mode_config;
316 struct psb_intel_output *psb_intel_output = NULL;
317 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
318 struct drm_encoder *encoder;
319
320 if (!gma_power_begin(dev, true))
321 return 0;
322
323 memcpy(&psb_intel_crtc->saved_mode,
324 mode,
325 sizeof(struct drm_display_mode));
326 memcpy(&psb_intel_crtc->saved_adjusted_mode,
327 adjusted_mode,
328 sizeof(struct drm_display_mode));
329
330 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
331
332 if (encoder->crtc != crtc)
333 continue;
334
335 psb_intel_output = enc_to_psb_intel_output(encoder);
336 switch (psb_intel_output->type) {
337 case INTEL_OUTPUT_LVDS:
338 is_lvds = true;
339 break;
340 case INTEL_OUTPUT_SDVO:
341 is_sdvo = true;
342 break;
343 case INTEL_OUTPUT_TVOUT:
344 is_tv = true;
345 break;
346 case INTEL_OUTPUT_ANALOG:
347 is_crt = true;
348 break;
349 case INTEL_OUTPUT_MIPI:
350 is_mipi = true;
351 break;
352 }
353 }
354
355 /* Disable the VGA plane that we never use */
356 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
357
358 /* Disable the panel fitter if it was on our pipe */
359 if (mrst_panel_fitter_pipe(dev) == pipe)
360 REG_WRITE(PFIT_CONTROL, 0);
361
362 REG_WRITE(pipesrc_reg,
363 ((mode->crtc_hdisplay - 1) << 16) |
364 (mode->crtc_vdisplay - 1));
365
366 if (psb_intel_output)
367 drm_connector_property_get_value(&psb_intel_output->base,
368 dev->mode_config.scaling_mode_property, &scalingType);
369
370 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
371 /* Moorestown doesn't have register support for centering so
372 * we need to mess with the h/vblank and h/vsync start and
373 * ends to get centering */
374 int offsetX = 0, offsetY = 0;
375
376 offsetX = (adjusted_mode->crtc_hdisplay -
377 mode->crtc_hdisplay) / 2;
378 offsetY = (adjusted_mode->crtc_vdisplay -
379 mode->crtc_vdisplay) / 2;
380
381 REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
382 ((adjusted_mode->crtc_htotal - 1) << 16));
383 REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
384 ((adjusted_mode->crtc_vtotal - 1) << 16));
385 REG_WRITE(hblank_reg,
386 (adjusted_mode->crtc_hblank_start - offsetX - 1) |
387 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
388 REG_WRITE(hsync_reg,
389 (adjusted_mode->crtc_hsync_start - offsetX - 1) |
390 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
391 REG_WRITE(vblank_reg,
392 (adjusted_mode->crtc_vblank_start - offsetY - 1) |
393 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
394 REG_WRITE(vsync_reg,
395 (adjusted_mode->crtc_vsync_start - offsetY - 1) |
396 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
397 } else {
398 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
399 ((adjusted_mode->crtc_htotal - 1) << 16));
400 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
401 ((adjusted_mode->crtc_vtotal - 1) << 16));
402 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
403 ((adjusted_mode->crtc_hblank_end - 1) << 16));
404 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
405 ((adjusted_mode->crtc_hsync_end - 1) << 16));
406 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
407 ((adjusted_mode->crtc_vblank_end - 1) << 16));
408 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
409 ((adjusted_mode->crtc_vsync_end - 1) << 16));
410 }
411
412 /* Flush the plane changes */
413 {
414 struct drm_crtc_helper_funcs *crtc_funcs =
415 crtc->helper_private;
416 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
417 }
418
419 /* setup pipeconf */
420 pipeconf = REG_READ(pipeconf_reg);
421
422 /* Set up the display plane register */
423 dspcntr = REG_READ(dspcntr_reg);
424 dspcntr |= DISPPLANE_GAMMA_ENABLE;
425
426 if (pipe == 0)
427 dspcntr |= DISPPLANE_SEL_PIPE_A;
428 else
429 dspcntr |= DISPPLANE_SEL_PIPE_B;
430
431 dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
432 dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
433
434 if (is_mipi)
435 goto mrst_crtc_mode_set_exit;
436
437 refclk = dev_priv->core_freq * 1000;
438
439 dpll = 0; /*BIT16 = 0 for 100MHz reference */
440
441 ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
442
443 if (!ok) {
444 dev_dbg(dev->dev, "mrstFindBestPLL fail in mrst_crtc_mode_set.\n");
445 } else {
446 dev_dbg(dev->dev, "mrst_crtc_mode_set pixel clock = %d,"
447 "m = %x, p1 = %x.\n", clock.dot, clock.m,
448 clock.p1);
449 }
450
451 fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
452
453 dpll |= DPLL_VGA_MODE_DIS;
454
455
456 dpll |= DPLL_VCO_ENABLE;
457
458 if (is_lvds)
459 dpll |= DPLLA_MODE_LVDS;
460 else
461 dpll |= DPLLB_MODE_DAC_SERIAL;
462
463 if (is_sdvo) {
464 int sdvo_pixel_multiply =
465 adjusted_mode->clock / mode->clock;
466
467 dpll |= DPLL_DVO_HIGH_SPEED;
468 dpll |=
469 (sdvo_pixel_multiply -
470 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
471 }
472
473
474 /* compute bitmask from p1 value */
475 dpll |= (1 << (clock.p1 - 2)) << 17;
476
477 dpll |= DPLL_VCO_ENABLE;
478
479 mrstPrintPll("chosen", &clock);
480
481 if (dpll & DPLL_VCO_ENABLE) {
482 REG_WRITE(fp_reg, fp);
483 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
484 REG_READ(dpll_reg);
485 /* Check the DPLLA lock bit PIPEACONF[29] */
486 udelay(150);
487 }
488
489 REG_WRITE(fp_reg, fp);
490 REG_WRITE(dpll_reg, dpll);
491 REG_READ(dpll_reg);
492 /* Wait for the clocks to stabilize. */
493 udelay(150);
494
495 /* write it again -- the BIOS does, after all */
496 REG_WRITE(dpll_reg, dpll);
497 REG_READ(dpll_reg);
498 /* Wait for the clocks to stabilize. */
499 udelay(150);
500
501 REG_WRITE(pipeconf_reg, pipeconf);
502 REG_READ(pipeconf_reg);
503 psb_intel_wait_for_vblank(dev);
504
505 REG_WRITE(dspcntr_reg, dspcntr);
506 psb_intel_wait_for_vblank(dev);
507
508mrst_crtc_mode_set_exit:
509 gma_power_end(dev);
510 return 0;
511}
512
513static bool mrst_crtc_mode_fixup(struct drm_crtc *crtc,
514 struct drm_display_mode *mode,
515 struct drm_display_mode *adjusted_mode)
516{
517 return true;
518}
519
520int mrst_pipe_set_base(struct drm_crtc *crtc,
521 int x, int y, struct drm_framebuffer *old_fb)
522{
523 struct drm_device *dev = crtc->dev;
524 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
525 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
526 int pipe = psb_intel_crtc->pipe;
527 unsigned long start, offset;
528
529 int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
530 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
531 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
532 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
533 u32 dspcntr;
534 int ret = 0;
535
536 /* no fb bound */
537 if (!crtc->fb) {
538 dev_dbg(dev->dev, "No FB bound\n");
539 return 0;
540 }
541
542 if (!gma_power_begin(dev, true))
543 return 0;
544
545 start = psbfb->gtt->offset;
546 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
547
548 REG_WRITE(dspstride, crtc->fb->pitches[0]);
549
550 dspcntr = REG_READ(dspcntr_reg);
551 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
552
553 switch (crtc->fb->bits_per_pixel) {
554 case 8:
555 dspcntr |= DISPPLANE_8BPP;
556 break;
557 case 16:
558 if (crtc->fb->depth == 15)
559 dspcntr |= DISPPLANE_15_16BPP;
560 else
561 dspcntr |= DISPPLANE_16BPP;
562 break;
563 case 24:
564 case 32:
565 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
566 break;
567 default:
568 dev_err(dev->dev, "Unknown color depth\n");
569 ret = -EINVAL;
570 goto pipe_set_base_exit;
571 }
572 REG_WRITE(dspcntr_reg, dspcntr);
573
574 REG_WRITE(dspbase, offset);
575 REG_READ(dspbase);
576 REG_WRITE(dspsurf, start);
577 REG_READ(dspsurf);
578
579pipe_set_base_exit:
580 gma_power_end(dev);
581 return ret;
582}
583
584static void mrst_crtc_prepare(struct drm_crtc *crtc)
585{
586 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
587 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
588}
589
590static void mrst_crtc_commit(struct drm_crtc *crtc)
591{
592 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
593 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
594}
595
596const struct drm_crtc_helper_funcs mrst_helper_funcs = {
597 .dpms = mrst_crtc_dpms,
598 .mode_fixup = mrst_crtc_mode_fixup,
599 .mode_set = mrst_crtc_mode_set,
600 .mode_set_base = mrst_pipe_set_base,
601 .prepare = mrst_crtc_prepare,
602 .commit = mrst_crtc_commit,
603};
604
diff --git a/drivers/staging/gma500/mrst_device.c b/drivers/staging/gma500/mrst_device.c
deleted file mode 100644
index 6707fafbfa1e..000000000000
--- a/drivers/staging/gma500/mrst_device.c
+++ /dev/null
@@ -1,634 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <linux/module.h>
22#include <linux/dmi.h>
23#include <drm/drmP.h>
24#include <drm/drm.h>
25#include "psb_drm.h"
26#include "psb_drv.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include <asm/mrst.h>
30#include <asm/intel_scu_ipc.h>
31#include "mid_bios.h"
32
33static int devtype;
34
35module_param_named(type, devtype, int, 0600);
36MODULE_PARM_DESC(type, "Moorestown/Oaktrail device type");
37
38#define DEVICE_MOORESTOWN 1
39#define DEVICE_OAKTRAIL 2
40#define DEVICE_MOORESTOWN_MM 3
41
42static int mrst_device_ident(struct drm_device *dev)
43{
44 /* User forced */
45 if (devtype)
46 return devtype;
47 if (dmi_match(DMI_PRODUCT_NAME, "OakTrail") ||
48 dmi_match(DMI_PRODUCT_NAME, "OakTrail platform"))
49 return DEVICE_OAKTRAIL;
50#if defined(CONFIG_X86_MRST)
51 if (dmi_match(DMI_PRODUCT_NAME, "MM") ||
52 dmi_match(DMI_PRODUCT_NAME, "MM 10"))
53 return DEVICE_MOORESTOWN_MM;
54 if (mrst_identify_cpu())
55 return DEVICE_MOORESTOWN;
56#endif
57 return DEVICE_OAKTRAIL;
58}
59
60
61/* IPC message and command defines used to enable/disable mipi panel voltages */
62#define IPC_MSG_PANEL_ON_OFF 0xE9
63#define IPC_CMD_PANEL_ON 1
64#define IPC_CMD_PANEL_OFF 0
65
66static int mrst_output_init(struct drm_device *dev)
67{
68 struct drm_psb_private *dev_priv = dev->dev_private;
69 if (dev_priv->iLVDS_enable)
70 mrst_lvds_init(dev, &dev_priv->mode_dev);
71 else
72 dev_err(dev->dev, "DSI is not supported\n");
73 if (dev_priv->hdmi_priv)
74 mrst_hdmi_init(dev, &dev_priv->mode_dev);
75 return 0;
76}
77
78/*
79 * Provide the low level interfaces for the Moorestown backlight
80 */
81
82#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
83
84#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
85#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
86#define BLC_PWM_FREQ_CALC_CONSTANT 32
87#define MHz 1000000
88#define BLC_ADJUSTMENT_MAX 100
89
90static struct backlight_device *mrst_backlight_device;
91static int mrst_brightness;
92
93static int mrst_set_brightness(struct backlight_device *bd)
94{
95 struct drm_device *dev = bl_get_data(mrst_backlight_device);
96 struct drm_psb_private *dev_priv = dev->dev_private;
97 int level = bd->props.brightness;
98 u32 blc_pwm_ctl;
99 u32 max_pwm_blc;
100
101 /* Percentage 1-100% being valid */
102 if (level < 1)
103 level = 1;
104
105 if (gma_power_begin(dev, 0)) {
106 /* Calculate and set the brightness value */
107 max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
108 blc_pwm_ctl = level * max_pwm_blc / 100;
109
110 /* Adjust the backlight level with the percent in
111 * dev_priv->blc_adj1;
112 */
113 blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
114 blc_pwm_ctl = blc_pwm_ctl / 100;
115
116 /* Adjust the backlight level with the percent in
117 * dev_priv->blc_adj2;
118 */
119 blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
120 blc_pwm_ctl = blc_pwm_ctl / 100;
121
122 /* force PWM bit on */
123 REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
124 REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
125 gma_power_end(dev);
126 }
127 mrst_brightness = level;
128 return 0;
129}
130
131static int mrst_get_brightness(struct backlight_device *bd)
132{
133 /* return locally cached var instead of HW read (due to DPST etc.) */
134 /* FIXME: ideally return actual value in case firmware fiddled with
135 it */
136 return mrst_brightness;
137}
138
139static int device_backlight_init(struct drm_device *dev)
140{
141 struct drm_psb_private *dev_priv = dev->dev_private;
142 unsigned long core_clock;
143 u16 bl_max_freq;
144 uint32_t value;
145 uint32_t blc_pwm_precision_factor;
146
147 dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
148 dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
149 bl_max_freq = 256;
150 /* this needs to be set elsewhere */
151 blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
152
153 core_clock = dev_priv->core_freq;
154
155 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
156 value *= blc_pwm_precision_factor;
157 value /= bl_max_freq;
158 value /= blc_pwm_precision_factor;
159
160 if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
161 return -ERANGE;
162
163 if (gma_power_begin(dev, false)) {
164 REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
165 REG_WRITE(BLC_PWM_CTL, value | (value << 16));
166 gma_power_end(dev);
167 }
168 return 0;
169}
170
171static const struct backlight_ops mrst_ops = {
172 .get_brightness = mrst_get_brightness,
173 .update_status = mrst_set_brightness,
174};
175
176int mrst_backlight_init(struct drm_device *dev)
177{
178 struct drm_psb_private *dev_priv = dev->dev_private;
179 int ret;
180 struct backlight_properties props;
181
182 memset(&props, 0, sizeof(struct backlight_properties));
183 props.max_brightness = 100;
184 props.type = BACKLIGHT_PLATFORM;
185
186 mrst_backlight_device = backlight_device_register("mrst-bl",
187 NULL, (void *)dev, &mrst_ops, &props);
188
189 if (IS_ERR(mrst_backlight_device))
190 return PTR_ERR(mrst_backlight_device);
191
192 ret = device_backlight_init(dev);
193 if (ret < 0) {
194 backlight_device_unregister(mrst_backlight_device);
195 return ret;
196 }
197 mrst_backlight_device->props.brightness = 100;
198 mrst_backlight_device->props.max_brightness = 100;
199 backlight_update_status(mrst_backlight_device);
200 dev_priv->backlight_device = mrst_backlight_device;
201 return 0;
202}
203
204#endif
205
206/*
207 * Provide the Moorestown specific chip logic and low level methods
208 * for power management
209 */
210
211static void mrst_init_pm(struct drm_device *dev)
212{
213}
214
215/**
216 * mrst_save_display_registers - save registers lost on suspend
217 * @dev: our DRM device
218 *
219 * Save the state we need in order to be able to restore the interface
220 * upon resume from suspend
221 */
222static int mrst_save_display_registers(struct drm_device *dev)
223{
224 struct drm_psb_private *dev_priv = dev->dev_private;
225 int i;
226 u32 pp_stat;
227
228 /* Display arbitration control + watermarks */
229 dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
230 dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
231 dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
232 dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
233 dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
234 dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
235 dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
236 dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
237
238 /* Pipe & plane A info */
239 dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
240 dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
241 dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
242 dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
243 dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
244 dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
245 dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
246 dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
247 dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
248 dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
249 dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
250 dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
251 dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
252 dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
253 dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
254 dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
255 dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
256 dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
257
258 /* Save cursor regs */
259 dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
260 dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
261 dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
262
263 /* Save palette (gamma) */
264 for (i = 0; i < 256; i++)
265 dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
266
267 if (dev_priv->hdmi_priv)
268 mrst_hdmi_save(dev);
269
270 /* Save performance state */
271 dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
272
273 /* LVDS state */
274 dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
275 dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
276 dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
277 dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
278 dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
279 dev_priv->saveLVDS = PSB_RVDC32(LVDS);
280 dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
281 dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
282 dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
283 dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
284
285 /* HW overlay */
286 dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
287 dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
288 dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
289 dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
290 dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
291 dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
292 dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
293
294 /* DPST registers */
295 dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
296 PSB_RVDC32(HISTOGRAM_INT_CONTROL);
297 dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
298 PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
299 dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
300
301 if (dev_priv->iLVDS_enable) {
302 /* Shut down the panel */
303 PSB_WVDC32(0, PP_CONTROL);
304
305 do {
306 pp_stat = PSB_RVDC32(PP_STATUS);
307 } while (pp_stat & 0x80000000);
308
309 /* Turn off the plane */
310 PSB_WVDC32(0x58000000, DSPACNTR);
311 /* Trigger the plane disable */
312 PSB_WVDC32(0, DSPASURF);
313
314 /* Wait ~4 ticks */
315 msleep(4);
316
317 /* Turn off pipe */
318 PSB_WVDC32(0x0, PIPEACONF);
319 /* Wait ~8 ticks */
320 msleep(8);
321
322 /* Turn off PLLs */
323 PSB_WVDC32(0, MRST_DPLL_A);
324 }
325 return 0;
326}
327
328/**
329 * mrst_restore_display_registers - restore lost register state
330 * @dev: our DRM device
331 *
332 * Restore register state that was lost during suspend and resume.
333 */
334static int mrst_restore_display_registers(struct drm_device *dev)
335{
336 struct drm_psb_private *dev_priv = dev->dev_private;
337 u32 pp_stat;
338 int i;
339
340 /* Display arbitration + watermarks */
341 PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
342 PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
343 PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
344 PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
345 PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
346 PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
347 PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
348 PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
349
350 /* Make sure VGA plane is off. it initializes to on after reset!*/
351 PSB_WVDC32(0x80000000, VGACNTRL);
352
353 /* set the plls */
354 PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
355 PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
356
357 /* Actually enable it */
358 PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
359 DRM_UDELAY(150);
360
361 /* Restore mode */
362 PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
363 PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
364 PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
365 PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
366 PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
367 PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
368 PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
369 PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
370
371 /* Restore performance mode*/
372 PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
373
374 /* Enable the pipe*/
375 if (dev_priv->iLVDS_enable)
376 PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
377
378 /* Set up the plane*/
379 PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
380 PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
381 PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
382
383 /* Enable the plane */
384 PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
385 PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
386
387 /* Enable Cursor A */
388 PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
389 PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
390 PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
391
392 /* Restore palette (gamma) */
393 for (i = 0; i < 256; i++)
394 PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
395
396 if (dev_priv->hdmi_priv)
397 mrst_hdmi_restore(dev);
398
399 if (dev_priv->iLVDS_enable) {
400 PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
401 PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
402 PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
403 PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
404 PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
405 PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
406 PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
407 PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
408 PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
409 PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
410 }
411
412 /* Wait for cycle delay */
413 do {
414 pp_stat = PSB_RVDC32(PP_STATUS);
415 } while (pp_stat & 0x08000000);
416
417 /* Wait for panel power up */
418 do {
419 pp_stat = PSB_RVDC32(PP_STATUS);
420 } while (pp_stat & 0x10000000);
421
422 /* Restore HW overlay */
423 PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
424 PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
425 PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
426 PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
427 PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
428 PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
429 PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
430
431 /* DPST registers */
432 PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
433 HISTOGRAM_INT_CONTROL);
434 PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
435 HISTOGRAM_LOGIC_CONTROL);
436 PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
437
438 return 0;
439}
440
441/**
442 * mrst_power_down - power down the display island
443 * @dev: our DRM device
444 *
445 * Power down the display interface of our device
446 */
447static int mrst_power_down(struct drm_device *dev)
448{
449 struct drm_psb_private *dev_priv = dev->dev_private;
450 u32 pwr_mask ;
451 u32 pwr_sts;
452
453 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
454 outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
455
456 while (true) {
457 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
458 if ((pwr_sts & pwr_mask) == pwr_mask)
459 break;
460 else
461 udelay(10);
462 }
463 return 0;
464}
465
466/*
467 * mrst_power_up
468 *
469 * Restore power to the specified island(s) (powergating)
470 */
471static int mrst_power_up(struct drm_device *dev)
472{
473 struct drm_psb_private *dev_priv = dev->dev_private;
474 u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
475 u32 pwr_sts, pwr_cnt;
476
477 pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
478 pwr_cnt &= ~pwr_mask;
479 outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
480
481 while (true) {
482 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
483 if ((pwr_sts & pwr_mask) == 0)
484 break;
485 else
486 udelay(10);
487 }
488 return 0;
489}
490
491#if defined(CONFIG_X86_MRST)
492static void mrst_lvds_cache_bl(struct drm_device *dev)
493{
494 struct drm_psb_private *dev_priv = dev->dev_private;
495
496 intel_scu_ipc_ioread8(0x28, &(dev_priv->saveBKLTCNT));
497 intel_scu_ipc_ioread8(0x29, &(dev_priv->saveBKLTREQ));
498 intel_scu_ipc_ioread8(0x2A, &(dev_priv->saveBKLTBRTL));
499}
500
501static void mrst_mm_bl_power(struct drm_device *dev, bool on)
502{
503 struct drm_psb_private *dev_priv = dev->dev_private;
504
505 if (on) {
506 intel_scu_ipc_iowrite8(0x2A, dev_priv->saveBKLTBRTL);
507 intel_scu_ipc_iowrite8(0x28, dev_priv->saveBKLTCNT);
508 intel_scu_ipc_iowrite8(0x29, dev_priv->saveBKLTREQ);
509 } else {
510 intel_scu_ipc_iowrite8(0x2A, 0);
511 intel_scu_ipc_iowrite8(0x28, 0);
512 intel_scu_ipc_iowrite8(0x29, 0);
513 }
514}
515
516static const struct psb_ops mrst_mm_chip_ops = {
517 .name = "Moorestown MM ",
518 .accel_2d = 1,
519 .pipes = 1,
520 .crtcs = 1,
521 .sgx_offset = MRST_SGX_OFFSET,
522
523 .crtc_helper = &mrst_helper_funcs,
524 .crtc_funcs = &psb_intel_crtc_funcs,
525
526 .output_init = mrst_output_init,
527
528 .lvds_bl_power = mrst_mm_bl_power,
529#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
530 .backlight_init = mrst_backlight_init,
531#endif
532
533 .init_pm = mrst_init_pm,
534 .save_regs = mrst_save_display_registers,
535 .restore_regs = mrst_restore_display_registers,
536 .power_down = mrst_power_down,
537 .power_up = mrst_power_up,
538
539 .i2c_bus = 0,
540};
541
542#endif
543
544static void oaktrail_teardown(struct drm_device *dev)
545{
546 mrst_hdmi_teardown(dev);
547}
548
549static const struct psb_ops oaktrail_chip_ops = {
550 .name = "Oaktrail",
551 .accel_2d = 1,
552 .pipes = 2,
553 .crtcs = 2,
554 .sgx_offset = MRST_SGX_OFFSET,
555
556 .chip_setup = mid_chip_setup,
557 .chip_teardown = oaktrail_teardown,
558 .crtc_helper = &mrst_helper_funcs,
559 .crtc_funcs = &psb_intel_crtc_funcs,
560
561 .output_init = mrst_output_init,
562
563#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
564 .backlight_init = mrst_backlight_init,
565#endif
566
567 .init_pm = mrst_init_pm,
568 .save_regs = mrst_save_display_registers,
569 .restore_regs = mrst_restore_display_registers,
570 .power_down = mrst_power_down,
571 .power_up = mrst_power_up,
572
573 .i2c_bus = 1,
574};
575
576/**
577 * mrst_chip_setup - perform the initial chip init
578 * @dev: Our drm_device
579 *
580 * Figure out which incarnation we are and then scan the firmware for
581 * tables and information.
582 */
583static int mrst_chip_setup(struct drm_device *dev)
584{
585 struct drm_psb_private *dev_priv = dev->dev_private;
586
587 switch (mrst_device_ident(dev)) {
588 case DEVICE_OAKTRAIL:
589 /* Dual CRTC, PC compatible, HDMI, I2C #2 */
590 dev_priv->ops = &oaktrail_chip_ops;
591 mrst_hdmi_setup(dev);
592 return mid_chip_setup(dev);
593#if defined(CONFIG_X86_MRST)
594 case DEVICE_MOORESTOWN_MM:
595 /* Single CRTC, No HDMI, I2C #0, BL control */
596 mrst_lvds_cache_bl(dev);
597 dev_priv->ops = &mrst_mm_chip_ops;
598 return mid_chip_setup(dev);
599 case DEVICE_MOORESTOWN:
600 /* Dual CRTC, No HDMI(?), I2C #1 */
601 return mid_chip_setup(dev);
602#endif
603 default:
604 dev_err(dev->dev, "unsupported device type.\n");
605 return -ENODEV;
606 }
607}
608
609const struct psb_ops mrst_chip_ops = {
610 .name = "Moorestown",
611 .accel_2d = 1,
612 .pipes = 2,
613 .crtcs = 2,
614 .sgx_offset = MRST_SGX_OFFSET,
615
616 .chip_setup = mrst_chip_setup,
617 .crtc_helper = &mrst_helper_funcs,
618 .crtc_funcs = &psb_intel_crtc_funcs,
619
620 .output_init = mrst_output_init,
621
622#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
623 .backlight_init = mrst_backlight_init,
624#endif
625
626 .init_pm = mrst_init_pm,
627 .save_regs = mrst_save_display_registers,
628 .restore_regs = mrst_restore_display_registers,
629 .power_down = mrst_power_down,
630 .power_up = mrst_power_up,
631
632 .i2c_bus = 2,
633};
634
diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/staging/gma500/mrst_hdmi.c
deleted file mode 100644
index e66607eb3d3e..000000000000
--- a/drivers/staging/gma500/mrst_hdmi.c
+++ /dev/null
@@ -1,852 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Li Peng <peng.li@intel.com>
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "psb_drv.h"
32
33#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
34#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
35
36#define HDMI_HCR 0x1000
37#define HCR_ENABLE_HDCP (1 << 5)
38#define HCR_ENABLE_AUDIO (1 << 2)
39#define HCR_ENABLE_PIXEL (1 << 1)
40#define HCR_ENABLE_TMDS (1 << 0)
41
42#define HDMI_HICR 0x1004
43#define HDMI_HSR 0x1008
44#define HDMI_HISR 0x100C
45#define HDMI_DETECT_HDP (1 << 0)
46
47#define HDMI_VIDEO_REG 0x3000
48#define HDMI_UNIT_EN (1 << 7)
49#define HDMI_MODE_OUTPUT (1 << 0)
50#define HDMI_HBLANK_A 0x3100
51
52#define HDMI_AUDIO_CTRL 0x4000
53#define HDMI_ENABLE_AUDIO (1 << 0)
54
55#define PCH_HTOTAL_B 0x3100
56#define PCH_HBLANK_B 0x3104
57#define PCH_HSYNC_B 0x3108
58#define PCH_VTOTAL_B 0x310C
59#define PCH_VBLANK_B 0x3110
60#define PCH_VSYNC_B 0x3114
61#define PCH_PIPEBSRC 0x311C
62
63#define PCH_PIPEB_DSL 0x3800
64#define PCH_PIPEB_SLC 0x3804
65#define PCH_PIPEBCONF 0x3808
66#define PCH_PIPEBSTAT 0x3824
67
68#define CDVO_DFT 0x5000
69#define CDVO_SLEWRATE 0x5004
70#define CDVO_STRENGTH 0x5008
71#define CDVO_RCOMP 0x500C
72
73#define DPLL_CTRL 0x6000
74#define DPLL_PDIV_SHIFT 16
75#define DPLL_PDIV_MASK (0xf << 16)
76#define DPLL_PWRDN (1 << 4)
77#define DPLL_RESET (1 << 3)
78#define DPLL_FASTEN (1 << 2)
79#define DPLL_ENSTAT (1 << 1)
80#define DPLL_DITHEN (1 << 0)
81
82#define DPLL_DIV_CTRL 0x6004
83#define DPLL_CLKF_MASK 0xffffffc0
84#define DPLL_CLKR_MASK (0x3f)
85
86#define DPLL_CLK_ENABLE 0x6008
87#define DPLL_EN_DISP (1 << 31)
88#define DPLL_SEL_HDMI (1 << 8)
89#define DPLL_EN_HDMI (1 << 1)
90#define DPLL_EN_VGA (1 << 0)
91
92#define DPLL_ADJUST 0x600C
93#define DPLL_STATUS 0x6010
94#define DPLL_UPDATE 0x6014
95#define DPLL_DFT 0x6020
96
97struct intel_range {
98 int min, max;
99};
100
101struct mrst_hdmi_limit {
102 struct intel_range vco, np, nr, nf;
103};
104
105struct mrst_hdmi_clock {
106 int np;
107 int nr;
108 int nf;
109 int dot;
110};
111
112#define VCO_MIN 320000
113#define VCO_MAX 1650000
114#define NP_MIN 1
115#define NP_MAX 15
116#define NR_MIN 1
117#define NR_MAX 64
118#define NF_MIN 2
119#define NF_MAX 4095
120
121static const struct mrst_hdmi_limit mrst_hdmi_limit = {
122 .vco = { .min = VCO_MIN, .max = VCO_MAX },
123 .np = { .min = NP_MIN, .max = NP_MAX },
124 .nr = { .min = NR_MIN, .max = NR_MAX },
125 .nf = { .min = NF_MIN, .max = NF_MAX },
126};
127
128static void wait_for_vblank(struct drm_device *dev)
129{
130 /* FIXME: Can we do this as a sleep ? */
131 /* Wait for 20ms, i.e. one cycle at 50hz. */
132 mdelay(20);
133}
134
135static void scu_busy_loop(void *scu_base)
136{
137 u32 status = 0;
138 u32 loop_count = 0;
139
140 status = readl(scu_base + 0x04);
141 while (status & 1) {
142 udelay(1); /* scu processing time is in few u secods */
143 status = readl(scu_base + 0x04);
144 loop_count++;
145 /* break if scu doesn't reset busy bit after huge retry */
146 if (loop_count > 1000) {
147 DRM_DEBUG_KMS("SCU IPC timed out");
148 return;
149 }
150 }
151}
152
153static void mrst_hdmi_reset(struct drm_device *dev)
154{
155 void *base;
156 /* FIXME: at least make these defines */
157 unsigned int scu_ipc_mmio = 0xff11c000;
158 int scu_len = 1024;
159
160 base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
161 if (base == NULL) {
162 DRM_ERROR("failed to map SCU mmio\n");
163 return;
164 }
165
166 /* scu ipc: assert hdmi controller reset */
167 writel(0xff11d118, base + 0x0c);
168 writel(0x7fffffdf, base + 0x80);
169 writel(0x42005, base + 0x0);
170 scu_busy_loop(base);
171
172 /* scu ipc: de-assert hdmi controller reset */
173 writel(0xff11d118, base + 0x0c);
174 writel(0x7fffffff, base + 0x80);
175 writel(0x42005, base + 0x0);
176 scu_busy_loop(base);
177
178 iounmap(base);
179}
180
181static void mrst_hdmi_audio_enable(struct drm_device *dev)
182{
183 struct drm_psb_private *dev_priv = dev->dev_private;
184 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
185
186 HDMI_WRITE(HDMI_HCR, 0x67);
187 HDMI_READ(HDMI_HCR);
188
189 HDMI_WRITE(0x51a8, 0x10);
190 HDMI_READ(0x51a8);
191
192 HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
193 HDMI_READ(HDMI_AUDIO_CTRL);
194}
195
196static void mrst_hdmi_audio_disable(struct drm_device *dev)
197{
198 struct drm_psb_private *dev_priv = dev->dev_private;
199 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
200
201 HDMI_WRITE(0x51a8, 0x0);
202 HDMI_READ(0x51a8);
203
204 HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
205 HDMI_READ(HDMI_AUDIO_CTRL);
206
207 HDMI_WRITE(HDMI_HCR, 0x47);
208 HDMI_READ(HDMI_HCR);
209}
210
211void mrst_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
212{
213 struct drm_device *dev = crtc->dev;
214 u32 temp;
215
216 switch (mode) {
217 case DRM_MODE_DPMS_OFF:
218 /* Disable VGACNTRL */
219 REG_WRITE(VGACNTRL, 0x80000000);
220
221 /* Disable plane */
222 temp = REG_READ(DSPBCNTR);
223 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
224 REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
225 REG_READ(DSPBCNTR);
226 /* Flush the plane changes */
227 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
228 REG_READ(DSPBSURF);
229 }
230
231 /* Disable pipe B */
232 temp = REG_READ(PIPEBCONF);
233 if ((temp & PIPEACONF_ENABLE) != 0) {
234 REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
235 REG_READ(PIPEBCONF);
236 }
237
238 /* Disable LNW Pipes, etc */
239 temp = REG_READ(PCH_PIPEBCONF);
240 if ((temp & PIPEACONF_ENABLE) != 0) {
241 REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
242 REG_READ(PCH_PIPEBCONF);
243 }
244 /* wait for pipe off */
245 udelay(150);
246 /* Disable dpll */
247 temp = REG_READ(DPLL_CTRL);
248 if ((temp & DPLL_PWRDN) == 0) {
249 REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
250 REG_WRITE(DPLL_STATUS, 0x1);
251 }
252 /* wait for dpll off */
253 udelay(150);
254 break;
255 case DRM_MODE_DPMS_ON:
256 case DRM_MODE_DPMS_STANDBY:
257 case DRM_MODE_DPMS_SUSPEND:
258 /* Enable dpll */
259 temp = REG_READ(DPLL_CTRL);
260 if ((temp & DPLL_PWRDN) != 0) {
261 REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
262 temp = REG_READ(DPLL_CLK_ENABLE);
263 REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
264 REG_READ(DPLL_CLK_ENABLE);
265 }
266 /* wait for dpll warm up */
267 udelay(150);
268
269 /* Enable pipe B */
270 temp = REG_READ(PIPEBCONF);
271 if ((temp & PIPEACONF_ENABLE) == 0) {
272 REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
273 REG_READ(PIPEBCONF);
274 }
275
276 /* Enable LNW Pipe B */
277 temp = REG_READ(PCH_PIPEBCONF);
278 if ((temp & PIPEACONF_ENABLE) == 0) {
279 REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
280 REG_READ(PCH_PIPEBCONF);
281 }
282 wait_for_vblank(dev);
283
284 /* Enable plane */
285 temp = REG_READ(DSPBCNTR);
286 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
287 REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
288 /* Flush the plane changes */
289 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
290 REG_READ(DSPBSURF);
291 }
292 psb_intel_crtc_load_lut(crtc);
293 }
294 /* DSPARB */
295 REG_WRITE(DSPARB, 0x00003fbf);
296 /* FW1 */
297 REG_WRITE(0x70034, 0x3f880a0a);
298 /* FW2 */
299 REG_WRITE(0x70038, 0x0b060808);
300 /* FW4 */
301 REG_WRITE(0x70050, 0x08030404);
302 /* FW5 */
303 REG_WRITE(0x70054, 0x04040404);
304 /* LNC Chicken Bits */
305 REG_WRITE(0x70400, 0x4000);
306}
307
308
309static void mrst_hdmi_dpms(struct drm_encoder *encoder, int mode)
310{
311 static int dpms_mode = -1;
312
313 struct drm_device *dev = encoder->dev;
314 struct drm_psb_private *dev_priv = dev->dev_private;
315 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
316 u32 temp;
317
318 if (dpms_mode == mode)
319 return;
320
321 if (mode != DRM_MODE_DPMS_ON)
322 temp = 0x0;
323 else
324 temp = 0x99;
325
326 dpms_mode = mode;
327 HDMI_WRITE(HDMI_VIDEO_REG, temp);
328}
329
330static unsigned int htotal_calculate(struct drm_display_mode *mode)
331{
332 u32 htotal, new_crtc_htotal;
333
334 htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
335
336 /*
337 * 1024 x 768 new_crtc_htotal = 0x1024;
338 * 1280 x 1024 new_crtc_htotal = 0x0c34;
339 */
340 new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
341
342 return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
343}
344
345static void mrst_hdmi_find_dpll(struct drm_crtc *crtc, int target,
346 int refclk, struct mrst_hdmi_clock *best_clock)
347{
348 int np_min, np_max, nr_min, nr_max;
349 int np, nr, nf;
350
351 np_min = DIV_ROUND_UP(mrst_hdmi_limit.vco.min, target * 10);
352 np_max = mrst_hdmi_limit.vco.max / (target * 10);
353 if (np_min < mrst_hdmi_limit.np.min)
354 np_min = mrst_hdmi_limit.np.min;
355 if (np_max > mrst_hdmi_limit.np.max)
356 np_max = mrst_hdmi_limit.np.max;
357
358 nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
359 nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
360 if (nr_min < mrst_hdmi_limit.nr.min)
361 nr_min = mrst_hdmi_limit.nr.min;
362 if (nr_max > mrst_hdmi_limit.nr.max)
363 nr_max = mrst_hdmi_limit.nr.max;
364
365 np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
366 nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
367 nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
368 DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
369
370 /*
371 * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
372 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
373 */
374 best_clock->np = np;
375 best_clock->nr = nr - 1;
376 best_clock->nf = (nf << 14);
377}
378
379int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
380 struct drm_display_mode *mode,
381 struct drm_display_mode *adjusted_mode,
382 int x, int y,
383 struct drm_framebuffer *old_fb)
384{
385 struct drm_device *dev = crtc->dev;
386 struct drm_psb_private *dev_priv = dev->dev_private;
387 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
388 int pipe = 1;
389 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
390 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
391 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
392 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
393 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
394 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
395 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
396 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
397 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
398 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
399 int refclk;
400 struct mrst_hdmi_clock clock;
401 u32 dspcntr, pipeconf, dpll, temp;
402 int dspcntr_reg = DSPBCNTR;
403
404 /* Disable the VGA plane that we never use */
405 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
406
407 /* XXX: Disable the panel fitter if it was on our pipe */
408
409 /* Disable dpll if necessary */
410 dpll = REG_READ(DPLL_CTRL);
411 if ((dpll & DPLL_PWRDN) == 0) {
412 REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
413 REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
414 REG_WRITE(DPLL_STATUS, 0x1);
415 }
416 udelay(150);
417
418 /* reset controller: FIXME - can we sort out the ioremap mess ? */
419 iounmap(hdmi_dev->regs);
420 mrst_hdmi_reset(dev);
421
422 /* program and enable dpll */
423 refclk = 25000;
424 mrst_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
425
426 /* Setting DPLL */
427 dpll = REG_READ(DPLL_CTRL);
428 dpll &= ~DPLL_PDIV_MASK;
429 dpll &= ~(DPLL_PWRDN | DPLL_RESET);
430 REG_WRITE(DPLL_CTRL, 0x00000008);
431 REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
432 REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
433 REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
434 REG_WRITE(DPLL_UPDATE, 0x80000000);
435 REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
436 udelay(150);
437
438 hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
439 if (hdmi_dev->regs == NULL) {
440 DRM_ERROR("failed to do hdmi mmio mapping\n");
441 return -ENOMEM;
442 }
443
444 /* configure HDMI */
445 HDMI_WRITE(0x1004, 0x1fd);
446 HDMI_WRITE(0x2000, 0x1);
447 HDMI_WRITE(0x2008, 0x0);
448 HDMI_WRITE(0x3130, 0x8);
449 HDMI_WRITE(0x101c, 0x1800810);
450
451 temp = htotal_calculate(adjusted_mode);
452 REG_WRITE(htot_reg, temp);
453 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
454 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
455 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
456 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
457 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
458 REG_WRITE(pipesrc_reg,
459 ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
460
461 REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
462 REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
463 REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
464 REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
465 REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
466 REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
467 REG_WRITE(PCH_PIPEBSRC,
468 ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
469
470 temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
471 HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
472
473 REG_WRITE(dspsize_reg,
474 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
475 REG_WRITE(dsppos_reg, 0);
476
477 /* Flush the plane changes */
478 {
479 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
480 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
481 }
482
483 /* Set up the display plane register */
484 dspcntr = REG_READ(dspcntr_reg);
485 dspcntr |= DISPPLANE_GAMMA_ENABLE;
486 dspcntr |= DISPPLANE_SEL_PIPE_B;
487 dspcntr |= DISPLAY_PLANE_ENABLE;
488
489 /* setup pipeconf */
490 pipeconf = REG_READ(pipeconf_reg);
491 pipeconf |= PIPEACONF_ENABLE;
492
493 REG_WRITE(pipeconf_reg, pipeconf);
494 REG_READ(pipeconf_reg);
495
496 REG_WRITE(PCH_PIPEBCONF, pipeconf);
497 REG_READ(PCH_PIPEBCONF);
498 wait_for_vblank(dev);
499
500 REG_WRITE(dspcntr_reg, dspcntr);
501 wait_for_vblank(dev);
502
503 return 0;
504}
505
506static int mrst_hdmi_mode_valid(struct drm_connector *connector,
507 struct drm_display_mode *mode)
508{
509 if (mode->clock > 165000)
510 return MODE_CLOCK_HIGH;
511 if (mode->clock < 20000)
512 return MODE_CLOCK_LOW;
513
514 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
515 return MODE_NO_DBLESCAN;
516
517 return MODE_OK;
518}
519
520static bool mrst_hdmi_mode_fixup(struct drm_encoder *encoder,
521 struct drm_display_mode *mode,
522 struct drm_display_mode *adjusted_mode)
523{
524 return true;
525}
526
527static enum drm_connector_status
528mrst_hdmi_detect(struct drm_connector *connector, bool force)
529{
530 enum drm_connector_status status;
531 struct drm_device *dev = connector->dev;
532 struct drm_psb_private *dev_priv = dev->dev_private;
533 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
534 u32 temp;
535
536 temp = HDMI_READ(HDMI_HSR);
537 DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
538
539 if ((temp & HDMI_DETECT_HDP) != 0)
540 status = connector_status_connected;
541 else
542 status = connector_status_disconnected;
543
544 return status;
545}
546
547static const unsigned char raw_edid[] = {
548 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
549 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
550 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
551 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
552 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
553 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
554 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
555 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
556 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
557 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
558 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
559};
560
561static int mrst_hdmi_get_modes(struct drm_connector *connector)
562{
563 struct drm_device *dev = connector->dev;
564 struct drm_psb_private *dev_priv = dev->dev_private;
565 struct i2c_adapter *i2c_adap;
566 struct edid *edid;
567 struct drm_display_mode *mode, *t;
568 int i = 0, ret = 0;
569
570 i2c_adap = i2c_get_adapter(3);
571 if (i2c_adap == NULL) {
572 DRM_ERROR("No ddc adapter available!\n");
573 edid = (struct edid *)raw_edid;
574 } else {
575 edid = (struct edid *)raw_edid;
576 /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
577 }
578
579 if (edid) {
580 drm_mode_connector_update_edid_property(connector, edid);
581 ret = drm_add_edid_modes(connector, edid);
582 connector->display_info.raw_edid = NULL;
583 }
584
585 /*
586 * prune modes that require frame buffer bigger than stolen mem
587 */
588 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
589 if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
590 i++;
591 drm_mode_remove(connector, mode);
592 }
593 }
594 return ret - i;
595}
596
597static void mrst_hdmi_mode_set(struct drm_encoder *encoder,
598 struct drm_display_mode *mode,
599 struct drm_display_mode *adjusted_mode)
600{
601 struct drm_device *dev = encoder->dev;
602
603 mrst_hdmi_audio_enable(dev);
604 return;
605}
606
607static void mrst_hdmi_destroy(struct drm_connector *connector)
608{
609 return;
610}
611
612static const struct drm_encoder_helper_funcs mrst_hdmi_helper_funcs = {
613 .dpms = mrst_hdmi_dpms,
614 .mode_fixup = mrst_hdmi_mode_fixup,
615 .prepare = psb_intel_encoder_prepare,
616 .mode_set = mrst_hdmi_mode_set,
617 .commit = psb_intel_encoder_commit,
618};
619
620static const struct drm_connector_helper_funcs
621 mrst_hdmi_connector_helper_funcs = {
622 .get_modes = mrst_hdmi_get_modes,
623 .mode_valid = mrst_hdmi_mode_valid,
624 .best_encoder = psb_intel_best_encoder,
625};
626
627static const struct drm_connector_funcs mrst_hdmi_connector_funcs = {
628 .dpms = drm_helper_connector_dpms,
629 .detect = mrst_hdmi_detect,
630 .fill_modes = drm_helper_probe_single_connector_modes,
631 .destroy = mrst_hdmi_destroy,
632};
633
634static void mrst_hdmi_enc_destroy(struct drm_encoder *encoder)
635{
636 drm_encoder_cleanup(encoder);
637}
638
639static const struct drm_encoder_funcs mrst_hdmi_enc_funcs = {
640 .destroy = mrst_hdmi_enc_destroy,
641};
642
643void mrst_hdmi_init(struct drm_device *dev,
644 struct psb_intel_mode_device *mode_dev)
645{
646 struct psb_intel_output *psb_intel_output;
647 struct drm_connector *connector;
648 struct drm_encoder *encoder;
649
650 psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
651 if (!psb_intel_output)
652 return;
653
654 psb_intel_output->mode_dev = mode_dev;
655 connector = &psb_intel_output->base;
656 encoder = &psb_intel_output->enc;
657 drm_connector_init(dev, &psb_intel_output->base,
658 &mrst_hdmi_connector_funcs,
659 DRM_MODE_CONNECTOR_DVID);
660
661 drm_encoder_init(dev, &psb_intel_output->enc,
662 &mrst_hdmi_enc_funcs,
663 DRM_MODE_ENCODER_TMDS);
664
665 drm_mode_connector_attach_encoder(&psb_intel_output->base,
666 &psb_intel_output->enc);
667
668 psb_intel_output->type = INTEL_OUTPUT_HDMI;
669 drm_encoder_helper_add(encoder, &mrst_hdmi_helper_funcs);
670 drm_connector_helper_add(connector, &mrst_hdmi_connector_helper_funcs);
671
672 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
673 connector->interlace_allowed = false;
674 connector->doublescan_allowed = false;
675 drm_sysfs_connector_add(connector);
676
677 return;
678}
679
680static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
681 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
682 {}
683};
684
685void mrst_hdmi_setup(struct drm_device *dev)
686{
687 struct drm_psb_private *dev_priv = dev->dev_private;
688 struct pci_dev *pdev;
689 struct mrst_hdmi_dev *hdmi_dev;
690 int ret;
691
692 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
693 if (!pdev)
694 return;
695
696 hdmi_dev = kzalloc(sizeof(struct mrst_hdmi_dev), GFP_KERNEL);
697 if (!hdmi_dev) {
698 dev_err(dev->dev, "failed to allocate memory\n");
699 goto out;
700 }
701
702
703 ret = pci_enable_device(pdev);
704 if (ret) {
705 dev_err(dev->dev, "failed to enable hdmi controller\n");
706 goto free;
707 }
708
709 hdmi_dev->mmio = pci_resource_start(pdev, 0);
710 hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
711 hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
712 if (!hdmi_dev->regs) {
713 dev_err(dev->dev, "failed to map hdmi mmio\n");
714 goto free;
715 }
716
717 hdmi_dev->dev = pdev;
718 pci_set_drvdata(pdev, hdmi_dev);
719
720 /* Initialize i2c controller */
721 ret = mrst_hdmi_i2c_init(hdmi_dev->dev);
722 if (ret)
723 dev_err(dev->dev, "HDMI I2C initialization failed\n");
724
725 dev_priv->hdmi_priv = hdmi_dev;
726 mrst_hdmi_audio_disable(dev);
727 return;
728
729free:
730 kfree(hdmi_dev);
731out:
732 return;
733}
734
735void mrst_hdmi_teardown(struct drm_device *dev)
736{
737 struct drm_psb_private *dev_priv = dev->dev_private;
738 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
739 struct pci_dev *pdev;
740
741 if (hdmi_dev) {
742 pdev = hdmi_dev->dev;
743 pci_set_drvdata(pdev, NULL);
744 mrst_hdmi_i2c_exit(pdev);
745 iounmap(hdmi_dev->regs);
746 kfree(hdmi_dev);
747 pci_dev_put(pdev);
748 }
749}
750
751/* save HDMI register state */
752void mrst_hdmi_save(struct drm_device *dev)
753{
754 struct drm_psb_private *dev_priv = dev->dev_private;
755 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
756 int i;
757
758 /* dpll */
759 hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
760 hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
761 hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
762 hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
763 hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
764
765 /* pipe B */
766 dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
767 dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
768 dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
769 dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
770 dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
771 dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
772 dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
773 dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
774
775 hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
776 hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
777 hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
778 hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
779 hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
780 hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
781 hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
782 hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
783
784 /* plane */
785 dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
786 dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
787 dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
788 dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
789 dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
790 dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
791
792 /* cursor B */
793 dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
794 dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
795 dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
796
797 /* save palette */
798 for (i = 0; i < 256; i++)
799 dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
800}
801
802/* restore HDMI register state */
803void mrst_hdmi_restore(struct drm_device *dev)
804{
805 struct drm_psb_private *dev_priv = dev->dev_private;
806 struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
807 int i;
808
809 /* dpll */
810 PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
811 PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
812 PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
813 PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
814 PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
815 DRM_UDELAY(150);
816
817 /* pipe */
818 PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
819 PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
820 PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
821 PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
822 PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
823 PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
824 PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
825
826 PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
827 PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
828 PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
829 PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
830 PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
831 PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
832 PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
833
834 PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
835 PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
836
837 /* plane */
838 PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
839 PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
840 PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
841 PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
842 PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
843
844 /* cursor B */
845 PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
846 PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
847 PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
848
849 /* restore palette */
850 for (i = 0; i < 256; i++)
851 PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
852}
diff --git a/drivers/staging/gma500/mrst_hdmi_i2c.c b/drivers/staging/gma500/mrst_hdmi_i2c.c
deleted file mode 100644
index 36e7edc4d14c..000000000000
--- a/drivers/staging/gma500/mrst_hdmi_i2c.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Li Peng <peng.li@intel.com>
25 */
26
27#include <linux/mutex.h>
28#include <linux/pci.h>
29#include <linux/i2c.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/export.h>
33#include "psb_drv.h"
34
35#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
36#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
37
38#define HDMI_HCR 0x1000
39#define HCR_DETECT_HDP (1 << 6)
40#define HCR_ENABLE_HDCP (1 << 5)
41#define HCR_ENABLE_AUDIO (1 << 2)
42#define HCR_ENABLE_PIXEL (1 << 1)
43#define HCR_ENABLE_TMDS (1 << 0)
44#define HDMI_HICR 0x1004
45#define HDMI_INTR_I2C_ERROR (1 << 4)
46#define HDMI_INTR_I2C_FULL (1 << 3)
47#define HDMI_INTR_I2C_DONE (1 << 2)
48#define HDMI_INTR_HPD (1 << 0)
49#define HDMI_HSR 0x1008
50#define HDMI_HISR 0x100C
51#define HDMI_HI2CRDB0 0x1200
52#define HDMI_HI2CHCR 0x1240
53#define HI2C_HDCP_WRITE (0 << 2)
54#define HI2C_HDCP_RI_READ (1 << 2)
55#define HI2C_HDCP_READ (2 << 2)
56#define HI2C_EDID_READ (3 << 2)
57#define HI2C_READ_CONTINUE (1 << 1)
58#define HI2C_ENABLE_TRANSACTION (1 << 0)
59
60#define HDMI_ICRH 0x1100
61#define HDMI_HI2CTDR0 0x1244
62#define HDMI_HI2CTDR1 0x1248
63
64#define I2C_STAT_INIT 0
65#define I2C_READ_DONE 1
66#define I2C_TRANSACTION_DONE 2
67
68struct hdmi_i2c_dev {
69 struct i2c_adapter *adap;
70 struct mutex i2c_lock;
71 struct completion complete;
72 int status;
73 struct i2c_msg *msg;
74 int buf_offset;
75};
76
77static void hdmi_i2c_irq_enable(struct mrst_hdmi_dev *hdmi_dev)
78{
79 u32 temp;
80
81 temp = HDMI_READ(HDMI_HICR);
82 temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
83 HDMI_WRITE(HDMI_HICR, temp);
84 HDMI_READ(HDMI_HICR);
85}
86
87static void hdmi_i2c_irq_disable(struct mrst_hdmi_dev *hdmi_dev)
88{
89 HDMI_WRITE(HDMI_HICR, 0x0);
90 HDMI_READ(HDMI_HICR);
91}
92
93static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
94{
95 struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
96 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
97 u32 temp;
98
99 i2c_dev->status = I2C_STAT_INIT;
100 i2c_dev->msg = pmsg;
101 i2c_dev->buf_offset = 0;
102 INIT_COMPLETION(i2c_dev->complete);
103
104 /* Enable I2C transaction */
105 temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
106 HDMI_WRITE(HDMI_HI2CHCR, temp);
107 HDMI_READ(HDMI_HI2CHCR);
108
109 while (i2c_dev->status != I2C_TRANSACTION_DONE)
110 wait_for_completion_interruptible_timeout(&i2c_dev->complete,
111 10 * HZ);
112
113 return 0;
114}
115
116static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
117{
118 /*
119 * XXX: i2c write seems isn't useful for EDID probe, don't do anything
120 */
121 return 0;
122}
123
124static int mrst_hdmi_i2c_access(struct i2c_adapter *adap,
125 struct i2c_msg *pmsg,
126 int num)
127{
128 struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
129 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
130 int i, err = 0;
131
132 mutex_lock(&i2c_dev->i2c_lock);
133
134 /* Enable i2c unit */
135 HDMI_WRITE(HDMI_ICRH, 0x00008760);
136
137 /* Enable irq */
138 hdmi_i2c_irq_enable(hdmi_dev);
139 for (i = 0; i < num; i++) {
140 if (pmsg->len && pmsg->buf) {
141 if (pmsg->flags & I2C_M_RD)
142 err = xfer_read(adap, pmsg);
143 else
144 err = xfer_write(adap, pmsg);
145 }
146 pmsg++; /* next message */
147 }
148
149 /* Disable irq */
150 hdmi_i2c_irq_disable(hdmi_dev);
151
152 mutex_unlock(&i2c_dev->i2c_lock);
153
154 return i;
155}
156
157static u32 mrst_hdmi_i2c_func(struct i2c_adapter *adapter)
158{
159 return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
160}
161
162static const struct i2c_algorithm mrst_hdmi_i2c_algorithm = {
163 .master_xfer = mrst_hdmi_i2c_access,
164 .functionality = mrst_hdmi_i2c_func,
165};
166
167static struct i2c_adapter mrst_hdmi_i2c_adapter = {
168 .name = "mrst_hdmi_i2c",
169 .nr = 3,
170 .owner = THIS_MODULE,
171 .class = I2C_CLASS_DDC,
172 .algo = &mrst_hdmi_i2c_algorithm,
173};
174
175static void hdmi_i2c_read(struct mrst_hdmi_dev *hdmi_dev)
176{
177 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
178 struct i2c_msg *msg = i2c_dev->msg;
179 u8 *buf = msg->buf;
180 u32 temp;
181 int i, offset;
182
183 offset = i2c_dev->buf_offset;
184 for (i = 0; i < 0x10; i++) {
185 temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
186 memcpy(buf + (offset + i * 4), &temp, 4);
187 }
188 i2c_dev->buf_offset += (0x10 * 4);
189
190 /* clearing read buffer full intr */
191 temp = HDMI_READ(HDMI_HISR);
192 HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
193 HDMI_READ(HDMI_HISR);
194
195 /* continue read transaction */
196 temp = HDMI_READ(HDMI_HI2CHCR);
197 HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
198 HDMI_READ(HDMI_HI2CHCR);
199
200 i2c_dev->status = I2C_READ_DONE;
201 return;
202}
203
204static void hdmi_i2c_transaction_done(struct mrst_hdmi_dev *hdmi_dev)
205{
206 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
207 u32 temp;
208
209 /* clear transaction done intr */
210 temp = HDMI_READ(HDMI_HISR);
211 HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
212 HDMI_READ(HDMI_HISR);
213
214
215 temp = HDMI_READ(HDMI_HI2CHCR);
216 HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
217 HDMI_READ(HDMI_HI2CHCR);
218
219 i2c_dev->status = I2C_TRANSACTION_DONE;
220 return;
221}
222
223static irqreturn_t mrst_hdmi_i2c_handler(int this_irq, void *dev)
224{
225 struct mrst_hdmi_dev *hdmi_dev = dev;
226 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
227 u32 stat;
228
229 stat = HDMI_READ(HDMI_HISR);
230
231 if (stat & HDMI_INTR_HPD) {
232 HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
233 HDMI_READ(HDMI_HISR);
234 }
235
236 if (stat & HDMI_INTR_I2C_FULL)
237 hdmi_i2c_read(hdmi_dev);
238
239 if (stat & HDMI_INTR_I2C_DONE)
240 hdmi_i2c_transaction_done(hdmi_dev);
241
242 complete(&i2c_dev->complete);
243
244 return IRQ_HANDLED;
245}
246
247/*
248 * choose alternate function 2 of GPIO pin 52, 53,
249 * which is used by HDMI I2C logic
250 */
251static void mrst_hdmi_i2c_gpio_fix(void)
252{
253 void *base;
254 unsigned int gpio_base = 0xff12c000;
255 int gpio_len = 0x1000;
256 u32 temp;
257
258 base = ioremap((resource_size_t)gpio_base, gpio_len);
259 if (base == NULL) {
260 DRM_ERROR("gpio ioremap fail\n");
261 return;
262 }
263
264 temp = readl(base + 0x44);
265 DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
266 writel((temp | 0x00000a00), (base + 0x44));
267 temp = readl(base + 0x44);
268 DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
269
270 iounmap(base);
271}
272
273int mrst_hdmi_i2c_init(struct pci_dev *dev)
274{
275 struct mrst_hdmi_dev *hdmi_dev;
276 struct hdmi_i2c_dev *i2c_dev;
277 int ret;
278
279 hdmi_dev = pci_get_drvdata(dev);
280
281 i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
282 if (i2c_dev == NULL) {
283 DRM_ERROR("Can't allocate interface\n");
284 ret = -ENOMEM;
285 goto exit;
286 }
287
288 i2c_dev->adap = &mrst_hdmi_i2c_adapter;
289 i2c_dev->status = I2C_STAT_INIT;
290 init_completion(&i2c_dev->complete);
291 mutex_init(&i2c_dev->i2c_lock);
292 i2c_set_adapdata(&mrst_hdmi_i2c_adapter, hdmi_dev);
293 hdmi_dev->i2c_dev = i2c_dev;
294
295 /* Enable HDMI I2C function on gpio */
296 mrst_hdmi_i2c_gpio_fix();
297
298 /* request irq */
299 ret = request_irq(dev->irq, mrst_hdmi_i2c_handler, IRQF_SHARED,
300 mrst_hdmi_i2c_adapter.name, hdmi_dev);
301 if (ret) {
302 DRM_ERROR("Failed to request IRQ for I2C controller\n");
303 goto err;
304 }
305
306 /* Adapter registration */
307 ret = i2c_add_numbered_adapter(&mrst_hdmi_i2c_adapter);
308 return ret;
309
310err:
311 kfree(i2c_dev);
312exit:
313 return ret;
314}
315
316void mrst_hdmi_i2c_exit(struct pci_dev *dev)
317{
318 struct mrst_hdmi_dev *hdmi_dev;
319 struct hdmi_i2c_dev *i2c_dev;
320
321 hdmi_dev = pci_get_drvdata(dev);
322 if (i2c_del_adapter(&mrst_hdmi_i2c_adapter))
323 DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
324
325 i2c_dev = hdmi_dev->i2c_dev;
326 kfree(i2c_dev);
327 free_irq(dev->irq, hdmi_dev);
328}
diff --git a/drivers/staging/gma500/mrst_lvds.c b/drivers/staging/gma500/mrst_lvds.c
deleted file mode 100644
index e7999a2a3796..000000000000
--- a/drivers/staging/gma500/mrst_lvds.c
+++ /dev/null
@@ -1,407 +0,0 @@
1/*
2 * Copyright © 2006-2009 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <drm/drmP.h>
25#include <asm/mrst.h>
26
27#include "intel_bios.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "power.h"
32#include <linux/pm_runtime.h>
33
34/* The max/min PWM frequency in BPCR[31:17] - */
35/* The smallest number is 1 (not 0) that can fit in the
36 * 15-bit field of the and then*/
37/* shifts to the left by one bit to get the actual 16-bit
38 * value that the 15-bits correspond to.*/
39#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
40#define BRIGHTNESS_MAX_LEVEL 100
41
42/**
43 * Sets the power state for the panel.
44 */
45static void mrst_lvds_set_power(struct drm_device *dev,
46 struct psb_intel_output *output, bool on)
47{
48 u32 pp_status;
49 struct drm_psb_private *dev_priv = dev->dev_private;
50
51 if (!gma_power_begin(dev, true))
52 return;
53
54 if (on) {
55 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
56 POWER_TARGET_ON);
57 do {
58 pp_status = REG_READ(PP_STATUS);
59 } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
60 dev_priv->is_lvds_on = true;
61 if (dev_priv->ops->lvds_bl_power)
62 dev_priv->ops->lvds_bl_power(dev, true);
63 } else {
64 if (dev_priv->ops->lvds_bl_power)
65 dev_priv->ops->lvds_bl_power(dev, false);
66 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
67 ~POWER_TARGET_ON);
68 do {
69 pp_status = REG_READ(PP_STATUS);
70 } while (pp_status & PP_ON);
71 dev_priv->is_lvds_on = false;
72 pm_request_idle(&dev->pdev->dev);
73 }
74 gma_power_end(dev);
75}
76
77static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
78{
79 struct drm_device *dev = encoder->dev;
80 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
81
82 if (mode == DRM_MODE_DPMS_ON)
83 mrst_lvds_set_power(dev, output, true);
84 else
85 mrst_lvds_set_power(dev, output, false);
86
87 /* XXX: We never power down the LVDS pairs. */
88}
89
90static void mrst_lvds_mode_set(struct drm_encoder *encoder,
91 struct drm_display_mode *mode,
92 struct drm_display_mode *adjusted_mode)
93{
94 struct psb_intel_mode_device *mode_dev =
95 enc_to_psb_intel_output(encoder)->mode_dev;
96 struct drm_device *dev = encoder->dev;
97 struct drm_psb_private *dev_priv = dev->dev_private;
98 u32 lvds_port;
99 uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
100
101 if (!gma_power_begin(dev, true))
102 return;
103
104 /*
105 * The LVDS pin pair will already have been turned on in the
106 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
107 * settings.
108 */
109 lvds_port = (REG_READ(LVDS) &
110 (~LVDS_PIPEB_SELECT)) |
111 LVDS_PORT_EN |
112 LVDS_BORDER_EN;
113
114 /* If the firmware says dither on Moorestown, or the BIOS does
115 on Oaktrail then enable dithering */
116 if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
117 lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
118
119 REG_WRITE(LVDS, lvds_port);
120
121 drm_connector_property_get_value(
122 &enc_to_psb_intel_output(encoder)->base,
123 dev->mode_config.scaling_mode_property,
124 &v);
125
126 if (v == DRM_MODE_SCALE_NO_SCALE)
127 REG_WRITE(PFIT_CONTROL, 0);
128 else if (v == DRM_MODE_SCALE_ASPECT) {
129 if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
130 (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
131 if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
132 (mode->hdisplay * adjusted_mode->crtc_vdisplay))
133 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
134 else if ((adjusted_mode->crtc_hdisplay *
135 mode->vdisplay) > (mode->hdisplay *
136 adjusted_mode->crtc_vdisplay))
137 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
138 PFIT_SCALING_MODE_PILLARBOX);
139 else
140 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
141 PFIT_SCALING_MODE_LETTERBOX);
142 } else
143 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
144 } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
145 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
146
147 gma_power_end(dev);
148}
149
150static void mrst_lvds_prepare(struct drm_encoder *encoder)
151{
152 struct drm_device *dev = encoder->dev;
153 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
154 struct psb_intel_mode_device *mode_dev = output->mode_dev;
155
156 if (!gma_power_begin(dev, true))
157 return;
158
159 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
160 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
161 BACKLIGHT_DUTY_CYCLE_MASK);
162 mrst_lvds_set_power(dev, output, false);
163 gma_power_end(dev);
164}
165
166static u32 mrst_lvds_get_max_backlight(struct drm_device *dev)
167{
168 struct drm_psb_private *dev_priv = dev->dev_private;
169 u32 ret;
170
171 if (gma_power_begin(dev, false)) {
172 ret = ((REG_READ(BLC_PWM_CTL) &
173 BACKLIGHT_MODULATION_FREQ_MASK) >>
174 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
175
176 gma_power_end(dev);
177 } else
178 ret = ((dev_priv->saveBLC_PWM_CTL &
179 BACKLIGHT_MODULATION_FREQ_MASK) >>
180 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
181
182 return ret;
183}
184
185static void mrst_lvds_commit(struct drm_encoder *encoder)
186{
187 struct drm_device *dev = encoder->dev;
188 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
189 struct psb_intel_mode_device *mode_dev = output->mode_dev;
190
191 if (mode_dev->backlight_duty_cycle == 0)
192 mode_dev->backlight_duty_cycle =
193 mrst_lvds_get_max_backlight(dev);
194 mrst_lvds_set_power(dev, output, true);
195}
196
197static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
198 .dpms = mrst_lvds_dpms,
199 .mode_fixup = psb_intel_lvds_mode_fixup,
200 .prepare = mrst_lvds_prepare,
201 .mode_set = mrst_lvds_mode_set,
202 .commit = mrst_lvds_commit,
203};
204
205static struct drm_display_mode lvds_configuration_modes[] = {
206 /* hard coded fixed mode for TPO LTPS LPJ040K001A */
207 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
208 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
209 /* hard coded fixed mode for LVDS 800x480 */
210 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
211 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
212 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
213 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
214 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
215 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
216 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
217 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
218 /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
219 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
220 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
221 /* hard coded fixed mode for LVDS 1024x768 */
222 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
223 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
224 /* hard coded fixed mode for LVDS 1366x768 */
225 { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
226 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
227};
228
229/* Returns the panel fixed mode from configuration. */
230
231static struct drm_display_mode *
232mrst_lvds_get_configuration_mode(struct drm_device *dev)
233{
234 struct drm_display_mode *mode = NULL;
235 struct drm_psb_private *dev_priv = dev->dev_private;
236 struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
237
238 if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
239 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
240 if (!mode)
241 return NULL;
242
243 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
244 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
245 mode->hsync_start = mode->hdisplay + \
246 ((ti->hsync_offset_hi << 8) | \
247 ti->hsync_offset_lo);
248 mode->hsync_end = mode->hsync_start + \
249 ((ti->hsync_pulse_width_hi << 8) | \
250 ti->hsync_pulse_width_lo);
251 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
252 ti->hblank_lo);
253 mode->vsync_start = \
254 mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
255 ti->vsync_offset_lo);
256 mode->vsync_end = \
257 mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
258 ti->vsync_pulse_width_lo);
259 mode->vtotal = mode->vdisplay + \
260 ((ti->vblank_hi << 8) | ti->vblank_lo);
261 mode->clock = ti->pixel_clock * 10;
262#if 0
263 printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
264 printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
265 printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
266 printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
267 printk(KERN_INFO "htotal is %d\n", mode->htotal);
268 printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
269 printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
270 printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
271 printk(KERN_INFO "clock is %d\n", mode->clock);
272#endif
273 } else
274 mode = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
275
276 drm_mode_set_name(mode);
277 drm_mode_set_crtcinfo(mode, 0);
278
279 return mode;
280}
281
282/**
283 * mrst_lvds_init - setup LVDS connectors on this device
284 * @dev: drm device
285 *
286 * Create the connector, register the LVDS DDC bus, and try to figure out what
287 * modes we can display on the LVDS panel (if present).
288 */
289void mrst_lvds_init(struct drm_device *dev,
290 struct psb_intel_mode_device *mode_dev)
291{
292 struct psb_intel_output *psb_intel_output;
293 struct drm_connector *connector;
294 struct drm_encoder *encoder;
295 struct drm_psb_private *dev_priv =
296 (struct drm_psb_private *) dev->dev_private;
297 struct edid *edid;
298 int ret = 0;
299 struct i2c_adapter *i2c_adap;
300 struct drm_display_mode *scan; /* *modes, *bios_mode; */
301
302 psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
303 if (!psb_intel_output)
304 return;
305
306 psb_intel_output->mode_dev = mode_dev;
307 connector = &psb_intel_output->base;
308 encoder = &psb_intel_output->enc;
309 dev_priv->is_lvds_on = true;
310 drm_connector_init(dev, &psb_intel_output->base,
311 &psb_intel_lvds_connector_funcs,
312 DRM_MODE_CONNECTOR_LVDS);
313
314 drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
315 DRM_MODE_ENCODER_LVDS);
316
317 drm_mode_connector_attach_encoder(&psb_intel_output->base,
318 &psb_intel_output->enc);
319 psb_intel_output->type = INTEL_OUTPUT_LVDS;
320
321 drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
322 drm_connector_helper_add(connector,
323 &psb_intel_lvds_connector_helper_funcs);
324 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
325 connector->interlace_allowed = false;
326 connector->doublescan_allowed = false;
327
328 drm_connector_attach_property(connector,
329 dev->mode_config.scaling_mode_property,
330 DRM_MODE_SCALE_FULLSCREEN);
331 drm_connector_attach_property(connector,
332 dev_priv->backlight_property,
333 BRIGHTNESS_MAX_LEVEL);
334
335 mode_dev->panel_wants_dither = false;
336 if (dev_priv->vbt_data.size != 0x00)
337 mode_dev->panel_wants_dither = (dev_priv->gct_data.
338 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
339
340 /*
341 * LVDS discovery:
342 * 1) check for EDID on DDC
343 * 2) check for VBT data
344 * 3) check to see if LVDS is already on
345 * if none of the above, no panel
346 * 4) make sure lid is open
347 * if closed, act like it's not there for now
348 */
349
350 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
351
352 if (i2c_adap == NULL)
353 dev_err(dev->dev, "No ddc adapter available!\n");
354 /*
355 * Attempt to get the fixed panel mode from DDC. Assume that the
356 * preferred mode is the right one.
357 */
358 if (i2c_adap) {
359 edid = drm_get_edid(connector, i2c_adap);
360 if (edid) {
361 drm_mode_connector_update_edid_property(connector,
362 edid);
363 ret = drm_add_edid_modes(connector, edid);
364 kfree(edid);
365 }
366
367 list_for_each_entry(scan, &connector->probed_modes, head) {
368 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
369 mode_dev->panel_fixed_mode =
370 drm_mode_duplicate(dev, scan);
371 goto out; /* FIXME: check for quirks */
372 }
373 }
374 }
375 /*
376 * If we didn't get EDID, try geting panel timing
377 * from configuration data
378 */
379 mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
380
381 if (mode_dev->panel_fixed_mode) {
382 mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
383 goto out; /* FIXME: check for quirks */
384 }
385
386 /* If we still don't have a mode after all that, give up. */
387 if (!mode_dev->panel_fixed_mode) {
388 dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
389 goto failed_find;
390 }
391
392out:
393 drm_sysfs_connector_add(connector);
394 return;
395
396failed_find:
397 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
398 if (psb_intel_output->ddc_bus)
399 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
400
401/* failed_ddc: */
402
403 drm_encoder_cleanup(encoder);
404 drm_connector_cleanup(connector);
405 kfree(connector);
406}
407
diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
deleted file mode 100644
index 408257038335..000000000000
--- a/drivers/staging/gma500/power.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Benjamin Defnet <benjamin.r.defnet@intel.com>
26 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
27 * Massively reworked
28 * Alan Cox <alan@linux.intel.com>
29 */
30
31#include "power.h"
32#include "psb_drv.h"
33#include "psb_reg.h"
34#include "psb_intel_reg.h"
35#include <linux/mutex.h>
36#include <linux/pm_runtime.h>
37
38static struct mutex power_mutex; /* Serialize power ops */
39static spinlock_t power_ctrl_lock; /* Serialize power claim */
40
41/**
42 * gma_power_init - initialise power manager
43 * @dev: our device
44 *
45 * Set up for power management tracking of our hardware.
46 */
47void gma_power_init(struct drm_device *dev)
48{
49 struct drm_psb_private *dev_priv = dev->dev_private;
50
51 /* FIXME: Move APM/OSPM base into relevant device code */
52 dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
53 dev_priv->ospm_base &= 0xffff;
54
55 dev_priv->display_power = true; /* We start active */
56 dev_priv->display_count = 0; /* Currently no users */
57 dev_priv->suspended = false; /* And not suspended */
58 spin_lock_init(&power_ctrl_lock);
59 mutex_init(&power_mutex);
60
61 dev_priv->ops->init_pm(dev);
62}
63
64/**
65 * gma_power_uninit - end power manager
66 * @dev: device to end for
67 *
68 * Undo the effects of gma_power_init
69 */
70void gma_power_uninit(struct drm_device *dev)
71{
72 pm_runtime_disable(&dev->pdev->dev);
73 pm_runtime_set_suspended(&dev->pdev->dev);
74}
75
76/**
77 * gma_suspend_display - suspend the display logic
78 * @dev: our DRM device
79 *
80 * Suspend the display logic of the graphics interface
81 */
82static void gma_suspend_display(struct drm_device *dev)
83{
84 struct drm_psb_private *dev_priv = dev->dev_private;
85
86 if (!dev_priv->display_power)
87 return;
88 dev_priv->ops->save_regs(dev);
89 dev_priv->ops->power_down(dev);
90 dev_priv->display_power = false;
91}
92
93/**
94 * gma_resume_display - resume display side logic
95 *
96 * Resume the display hardware restoring state and enabling
97 * as necessary.
98 */
99static void gma_resume_display(struct pci_dev *pdev)
100{
101 struct drm_device *dev = pci_get_drvdata(pdev);
102 struct drm_psb_private *dev_priv = dev->dev_private;
103
104 if (dev_priv->display_power)
105 return;
106
107 /* turn on the display power island */
108 dev_priv->ops->power_up(dev);
109 dev_priv->suspended = false;
110 dev_priv->display_power = true;
111
112 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
113 pci_write_config_word(pdev, PSB_GMCH_CTRL,
114 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
115 dev_priv->ops->restore_regs(dev);
116}
117
118/**
119 * gma_suspend_pci - suspend PCI side
120 * @pdev: PCI device
121 *
122 * Perform the suspend processing on our PCI device state
123 */
124static void gma_suspend_pci(struct pci_dev *pdev)
125{
126 struct drm_device *dev = pci_get_drvdata(pdev);
127 struct drm_psb_private *dev_priv = dev->dev_private;
128 int bsm, vbt;
129
130 if (dev_priv->suspended)
131 return;
132
133 pci_save_state(pdev);
134 pci_read_config_dword(pdev, 0x5C, &bsm);
135 dev_priv->saveBSM = bsm;
136 pci_read_config_dword(pdev, 0xFC, &vbt);
137 dev_priv->saveVBT = vbt;
138 pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
139 pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
140
141 pci_disable_device(pdev);
142 pci_set_power_state(pdev, PCI_D3hot);
143
144 dev_priv->suspended = true;
145}
146
147/**
148 * gma_resume_pci - resume helper
149 * @dev: our PCI device
150 *
151 * Perform the resume processing on our PCI device state - rewrite
152 * register state and re-enable the PCI device
153 */
154static bool gma_resume_pci(struct pci_dev *pdev)
155{
156 struct drm_device *dev = pci_get_drvdata(pdev);
157 struct drm_psb_private *dev_priv = dev->dev_private;
158 int ret;
159
160 if (!dev_priv->suspended)
161 return true;
162
163 pci_set_power_state(pdev, PCI_D0);
164 pci_restore_state(pdev);
165 pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
166 pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
167 /* restoring MSI address and data in PCIx space */
168 pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
169 pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
170 ret = pci_enable_device(pdev);
171
172 if (ret != 0)
173 dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
174 else
175 dev_priv->suspended = false;
176 return !dev_priv->suspended;
177}
178
179/**
180 * gma_power_suspend - bus callback for suspend
181 * @pdev: our PCI device
182 * @state: suspend type
183 *
184 * Called back by the PCI layer during a suspend of the system. We
185 * perform the necessary shut down steps and save enough state that
186 * we can undo this when resume is called.
187 */
188int gma_power_suspend(struct device *_dev)
189{
190 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
191 struct drm_device *dev = pci_get_drvdata(pdev);
192 struct drm_psb_private *dev_priv = dev->dev_private;
193
194 mutex_lock(&power_mutex);
195 if (!dev_priv->suspended) {
196 if (dev_priv->display_count) {
197 mutex_unlock(&power_mutex);
198 return -EBUSY;
199 }
200 psb_irq_uninstall(dev);
201 gma_suspend_display(dev);
202 gma_suspend_pci(pdev);
203 }
204 mutex_unlock(&power_mutex);
205 return 0;
206}
207
208/**
209 * gma_power_resume - resume power
210 * @pdev: PCI device
211 *
212 * Resume the PCI side of the graphics and then the displays
213 */
214int gma_power_resume(struct device *_dev)
215{
216 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
217 struct drm_device *dev = pci_get_drvdata(pdev);
218
219 mutex_lock(&power_mutex);
220 gma_resume_pci(pdev);
221 gma_resume_display(pdev);
222 psb_irq_preinstall(dev);
223 psb_irq_postinstall(dev);
224 mutex_unlock(&power_mutex);
225 return 0;
226}
227
228/**
229 * gma_power_is_on - returne true if power is on
230 * @dev: our DRM device
231 *
232 * Returns true if the display island power is on at this moment
233 */
234bool gma_power_is_on(struct drm_device *dev)
235{
236 struct drm_psb_private *dev_priv = dev->dev_private;
237 return dev_priv->display_power;
238}
239
240/**
241 * gma_power_begin - begin requiring power
242 * @dev: our DRM device
243 * @force_on: true to force power on
244 *
245 * Begin an action that requires the display power island is enabled.
246 * We refcount the islands.
247 */
248bool gma_power_begin(struct drm_device *dev, bool force_on)
249{
250 struct drm_psb_private *dev_priv = dev->dev_private;
251 int ret;
252 unsigned long flags;
253
254 spin_lock_irqsave(&power_ctrl_lock, flags);
255 /* Power already on ? */
256 if (dev_priv->display_power) {
257 dev_priv->display_count++;
258 pm_runtime_get(&dev->pdev->dev);
259 spin_unlock_irqrestore(&power_ctrl_lock, flags);
260 return true;
261 }
262 if (force_on == false)
263 goto out_false;
264
265 /* Ok power up needed */
266 ret = gma_resume_pci(dev->pdev);
267 if (ret == 0) {
268 /* FIXME: we want to defer this for Medfield/Oaktrail */
269 gma_resume_display(dev->pdev);
270 psb_irq_preinstall(dev);
271 psb_irq_postinstall(dev);
272 pm_runtime_get(&dev->pdev->dev);
273 dev_priv->display_count++;
274 spin_unlock_irqrestore(&power_ctrl_lock, flags);
275 return true;
276 }
277out_false:
278 spin_unlock_irqrestore(&power_ctrl_lock, flags);
279 return false;
280}
281
282/**
283 * gma_power_end - end use of power
284 * @dev: Our DRM device
285 *
286 * Indicate that one of our gma_power_begin() requested periods when
287 * the diplay island power is needed has completed.
288 */
289void gma_power_end(struct drm_device *dev)
290{
291 struct drm_psb_private *dev_priv = dev->dev_private;
292 unsigned long flags;
293 spin_lock_irqsave(&power_ctrl_lock, flags);
294 dev_priv->display_count--;
295 WARN_ON(dev_priv->display_count < 0);
296 spin_unlock_irqrestore(&power_ctrl_lock, flags);
297 pm_runtime_put(&dev->pdev->dev);
298}
299
300int psb_runtime_suspend(struct device *dev)
301{
302 return gma_power_suspend(dev);
303}
304
305int psb_runtime_resume(struct device *dev)
306{
307 return gma_power_resume(dev);;
308}
309
310int psb_runtime_idle(struct device *dev)
311{
312 struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
313 struct drm_psb_private *dev_priv = drmdev->dev_private;
314 if (dev_priv->display_count)
315 return 0;
316 else
317 return 1;
318}
diff --git a/drivers/staging/gma500/power.h b/drivers/staging/gma500/power.h
deleted file mode 100644
index 1969d2ecb328..000000000000
--- a/drivers/staging/gma500/power.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Benjamin Defnet <benjamin.r.defnet@intel.com>
26 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
27 * Massively reworked
28 * Alan Cox <alan@linux.intel.com>
29 */
30#ifndef _PSB_POWERMGMT_H_
31#define _PSB_POWERMGMT_H_
32
33#include <linux/pci.h>
34#include <drm/drmP.h>
35
36void gma_power_init(struct drm_device *dev);
37void gma_power_uninit(struct drm_device *dev);
38
39/*
40 * The kernel bus power management will call these functions
41 */
42int gma_power_suspend(struct device *dev);
43int gma_power_resume(struct device *dev);
44
45/*
46 * These are the functions the driver should use to wrap all hw access
47 * (i.e. register reads and writes)
48 */
49bool gma_power_begin(struct drm_device *dev, bool force);
50void gma_power_end(struct drm_device *dev);
51
52/*
53 * Use this function to do an instantaneous check for if the hw is on.
54 * Only use this in cases where you know the mutex is already held such
55 * as in irq install/uninstall and you need to
56 * prevent a deadlock situation. Otherwise use gma_power_begin().
57 */
58bool gma_power_is_on(struct drm_device *dev);
59
60/*
61 * GFX-Runtime PM callbacks
62 */
63int psb_runtime_suspend(struct device *dev);
64int psb_runtime_resume(struct device *dev);
65int psb_runtime_idle(struct device *dev);
66
67#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/staging/gma500/psb_device.c b/drivers/staging/gma500/psb_device.c
deleted file mode 100644
index b97aa78519f2..000000000000
--- a/drivers/staging/gma500/psb_device.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "psb_drm.h"
24#include "psb_drv.h"
25#include "psb_reg.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28
29
30static int psb_output_init(struct drm_device *dev)
31{
32 struct drm_psb_private *dev_priv = dev->dev_private;
33 psb_intel_lvds_init(dev, &dev_priv->mode_dev);
34 psb_intel_sdvo_init(dev, SDVOB);
35 return 0;
36}
37
38#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
39
40/*
41 * Poulsbo Backlight Interfaces
42 */
43
44#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
45#define BLC_PWM_FREQ_CALC_CONSTANT 32
46#define MHz 1000000
47
48#define PSB_BLC_PWM_PRECISION_FACTOR 10
49#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
50#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
51
52#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
53#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
54
55static int psb_brightness;
56static struct backlight_device *psb_backlight_device;
57
58static int psb_get_brightness(struct backlight_device *bd)
59{
60 /* return locally cached var instead of HW read (due to DPST etc.) */
61 /* FIXME: ideally return actual value in case firmware fiddled with
62 it */
63 return psb_brightness;
64}
65
66
67static int psb_backlight_setup(struct drm_device *dev)
68{
69 struct drm_psb_private *dev_priv = dev->dev_private;
70 unsigned long core_clock;
71 /* u32 bl_max_freq; */
72 /* unsigned long value; */
73 u16 bl_max_freq;
74 uint32_t value;
75 uint32_t blc_pwm_precision_factor;
76
77 /* get bl_max_freq and pol from dev_priv*/
78 if (!dev_priv->lvds_bl) {
79 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
80 return -ENOENT;
81 }
82 bl_max_freq = dev_priv->lvds_bl->freq;
83 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
84
85 core_clock = dev_priv->core_freq;
86
87 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
88 value *= blc_pwm_precision_factor;
89 value /= bl_max_freq;
90 value /= blc_pwm_precision_factor;
91
92 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
93 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
94 return -ERANGE;
95 else {
96 value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
97 REG_WRITE(BLC_PWM_CTL,
98 (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
99 }
100 return 0;
101}
102
103static int psb_set_brightness(struct backlight_device *bd)
104{
105 struct drm_device *dev = bl_get_data(psb_backlight_device);
106 int level = bd->props.brightness;
107
108 /* Percentage 1-100% being valid */
109 if (level < 1)
110 level = 1;
111
112 psb_intel_lvds_set_brightness(dev, level);
113 psb_brightness = level;
114 return 0;
115}
116
117static const struct backlight_ops psb_ops = {
118 .get_brightness = psb_get_brightness,
119 .update_status = psb_set_brightness,
120};
121
122static int psb_backlight_init(struct drm_device *dev)
123{
124 struct drm_psb_private *dev_priv = dev->dev_private;
125 int ret;
126 struct backlight_properties props;
127
128 memset(&props, 0, sizeof(struct backlight_properties));
129 props.max_brightness = 100;
130 props.type = BACKLIGHT_PLATFORM;
131
132 psb_backlight_device = backlight_device_register("psb-bl",
133 NULL, (void *)dev, &psb_ops, &props);
134 if (IS_ERR(psb_backlight_device))
135 return PTR_ERR(psb_backlight_device);
136
137 ret = psb_backlight_setup(dev);
138 if (ret < 0) {
139 backlight_device_unregister(psb_backlight_device);
140 psb_backlight_device = NULL;
141 return ret;
142 }
143 psb_backlight_device->props.brightness = 100;
144 psb_backlight_device->props.max_brightness = 100;
145 backlight_update_status(psb_backlight_device);
146 dev_priv->backlight_device = psb_backlight_device;
147 return 0;
148}
149
150#endif
151
152/*
153 * Provide the Poulsbo specific chip logic and low level methods
154 * for power management
155 */
156
157static void psb_init_pm(struct drm_device *dev)
158{
159 struct drm_psb_private *dev_priv = dev->dev_private;
160
161 u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
162 gating &= ~3; /* Disable 2D clock gating */
163 gating |= 1;
164 PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
165 PSB_RSGX32(PSB_CR_CLKGATECTL);
166}
167
168/**
169 * psb_save_display_registers - save registers lost on suspend
170 * @dev: our DRM device
171 *
172 * Save the state we need in order to be able to restore the interface
173 * upon resume from suspend
174 */
175static int psb_save_display_registers(struct drm_device *dev)
176{
177 struct drm_psb_private *dev_priv = dev->dev_private;
178 struct drm_crtc *crtc;
179 struct drm_connector *connector;
180
181 /* Display arbitration control + watermarks */
182 dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
183 dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
184 dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
185 dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
186 dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
187 dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
188 dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
189 dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
190
191 /* Save crtc and output state */
192 mutex_lock(&dev->mode_config.mutex);
193 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
194 if (drm_helper_crtc_in_use(crtc))
195 crtc->funcs->save(crtc);
196 }
197
198 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
199 connector->funcs->save(connector);
200
201 mutex_unlock(&dev->mode_config.mutex);
202 return 0;
203}
204
205/**
206 * psb_restore_display_registers - restore lost register state
207 * @dev: our DRM device
208 *
209 * Restore register state that was lost during suspend and resume.
210 */
211static int psb_restore_display_registers(struct drm_device *dev)
212{
213 struct drm_psb_private *dev_priv = dev->dev_private;
214 struct drm_crtc *crtc;
215 struct drm_connector *connector;
216
217 /* Display arbitration + watermarks */
218 PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
219 PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
220 PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
221 PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
222 PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
223 PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
224 PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
225 PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
226
227 /*make sure VGA plane is off. it initializes to on after reset!*/
228 PSB_WVDC32(0x80000000, VGACNTRL);
229
230 mutex_lock(&dev->mode_config.mutex);
231 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
232 if (drm_helper_crtc_in_use(crtc))
233 crtc->funcs->restore(crtc);
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
236 connector->funcs->restore(connector);
237
238 mutex_unlock(&dev->mode_config.mutex);
239 return 0;
240}
241
242static int psb_power_down(struct drm_device *dev)
243{
244 return 0;
245}
246
247static int psb_power_up(struct drm_device *dev)
248{
249 return 0;
250}
251
252static void psb_get_core_freq(struct drm_device *dev)
253{
254 uint32_t clock;
255 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
256 struct drm_psb_private *dev_priv = dev->dev_private;
257
258 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
259 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
260
261 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
262 pci_read_config_dword(pci_root, 0xD4, &clock);
263 pci_dev_put(pci_root);
264
265 switch (clock & 0x07) {
266 case 0:
267 dev_priv->core_freq = 100;
268 break;
269 case 1:
270 dev_priv->core_freq = 133;
271 break;
272 case 2:
273 dev_priv->core_freq = 150;
274 break;
275 case 3:
276 dev_priv->core_freq = 178;
277 break;
278 case 4:
279 dev_priv->core_freq = 200;
280 break;
281 case 5:
282 case 6:
283 case 7:
284 dev_priv->core_freq = 266;
285 default:
286 dev_priv->core_freq = 0;
287 }
288}
289
290static int psb_chip_setup(struct drm_device *dev)
291{
292 psb_get_core_freq(dev);
293 gma_intel_opregion_init(dev);
294 psb_intel_init_bios(dev);
295 return 0;
296}
297
298const struct psb_ops psb_chip_ops = {
299 .name = "Poulsbo",
300 .accel_2d = 1,
301 .pipes = 2,
302 .crtcs = 2,
303 .sgx_offset = PSB_SGX_OFFSET,
304 .chip_setup = psb_chip_setup,
305
306 .crtc_helper = &psb_intel_helper_funcs,
307 .crtc_funcs = &psb_intel_crtc_funcs,
308
309 .output_init = psb_output_init,
310
311#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
312 .backlight_init = psb_backlight_init,
313#endif
314
315 .init_pm = psb_init_pm,
316 .save_regs = psb_save_display_registers,
317 .restore_regs = psb_restore_display_registers,
318 .power_down = psb_power_down,
319 .power_up = psb_power_up,
320};
321
diff --git a/drivers/staging/gma500/psb_drm.h b/drivers/staging/gma500/psb_drm.h
deleted file mode 100644
index 0da846835688..000000000000
--- a/drivers/staging/gma500/psb_drm.h
+++ /dev/null
@@ -1,219 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 **************************************************************************/
21
22#ifndef _PSB_DRM_H_
23#define _PSB_DRM_H_
24
25#define PSB_NUM_PIPE 3
26
27#define PSB_GPU_ACCESS_READ (1ULL << 32)
28#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
29#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
30
31#define PSB_BO_FLAG_COMMAND (1ULL << 52)
32
33/*
34 * Feedback components:
35 */
36
37struct drm_psb_sizes_arg {
38 u32 ta_mem_size;
39 u32 mmu_size;
40 u32 pds_size;
41 u32 rastgeom_size;
42 u32 tt_size;
43 u32 vram_size;
44};
45
46struct drm_psb_dpst_lut_arg {
47 uint8_t lut[256];
48 int output_id;
49};
50
51#define PSB_DC_CRTC_SAVE 0x01
52#define PSB_DC_CRTC_RESTORE 0x02
53#define PSB_DC_OUTPUT_SAVE 0x04
54#define PSB_DC_OUTPUT_RESTORE 0x08
55#define PSB_DC_CRTC_MASK 0x03
56#define PSB_DC_OUTPUT_MASK 0x0C
57
58struct drm_psb_dc_state_arg {
59 u32 flags;
60 u32 obj_id;
61};
62
63struct drm_psb_mode_operation_arg {
64 u32 obj_id;
65 u16 operation;
66 struct drm_mode_modeinfo mode;
67 void *data;
68};
69
70struct drm_psb_stolen_memory_arg {
71 u32 base;
72 u32 size;
73};
74
75/*Display Register Bits*/
76#define REGRWBITS_PFIT_CONTROLS (1 << 0)
77#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
78#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
79#define REGRWBITS_PIPEASRC (1 << 3)
80#define REGRWBITS_PIPEBSRC (1 << 4)
81#define REGRWBITS_VTOTAL_A (1 << 5)
82#define REGRWBITS_VTOTAL_B (1 << 6)
83#define REGRWBITS_DSPACNTR (1 << 8)
84#define REGRWBITS_DSPBCNTR (1 << 9)
85#define REGRWBITS_DSPCCNTR (1 << 10)
86
87/*Overlay Register Bits*/
88#define OV_REGRWBITS_OVADD (1 << 0)
89#define OV_REGRWBITS_OGAM_ALL (1 << 1)
90
91#define OVC_REGRWBITS_OVADD (1 << 2)
92#define OVC_REGRWBITS_OGAM_ALL (1 << 3)
93
94struct drm_psb_register_rw_arg {
95 u32 b_force_hw_on;
96
97 u32 display_read_mask;
98 u32 display_write_mask;
99
100 struct {
101 u32 pfit_controls;
102 u32 pfit_autoscale_ratios;
103 u32 pfit_programmed_scale_ratios;
104 u32 pipeasrc;
105 u32 pipebsrc;
106 u32 vtotal_a;
107 u32 vtotal_b;
108 } display;
109
110 u32 overlay_read_mask;
111 u32 overlay_write_mask;
112
113 struct {
114 u32 OVADD;
115 u32 OGAMC0;
116 u32 OGAMC1;
117 u32 OGAMC2;
118 u32 OGAMC3;
119 u32 OGAMC4;
120 u32 OGAMC5;
121 u32 IEP_ENABLED;
122 u32 IEP_BLE_MINMAX;
123 u32 IEP_BSSCC_CONTROL;
124 u32 b_wait_vblank;
125 } overlay;
126
127 u32 sprite_enable_mask;
128 u32 sprite_disable_mask;
129
130 struct {
131 u32 dspa_control;
132 u32 dspa_key_value;
133 u32 dspa_key_mask;
134 u32 dspc_control;
135 u32 dspc_stride;
136 u32 dspc_position;
137 u32 dspc_linear_offset;
138 u32 dspc_size;
139 u32 dspc_surface;
140 } sprite;
141
142 u32 subpicture_enable_mask;
143 u32 subpicture_disable_mask;
144};
145
146/* Controlling the kernel modesetting buffers */
147
148#define DRM_PSB_SIZES 0x07
149#define DRM_PSB_FUSE_REG 0x08
150#define DRM_PSB_DC_STATE 0x0A
151#define DRM_PSB_ADB 0x0B
152#define DRM_PSB_MODE_OPERATION 0x0C
153#define DRM_PSB_STOLEN_MEMORY 0x0D
154#define DRM_PSB_REGISTER_RW 0x0E
155
156/*
157 * NOTE: Add new commands here, but increment
158 * the values below and increment their
159 * corresponding defines where they're
160 * defined elsewhere.
161 */
162
163#define DRM_PSB_GEM_CREATE 0x10
164#define DRM_PSB_2D_OP 0x11
165#define DRM_PSB_GEM_MMAP 0x12
166#define DRM_PSB_DPST 0x1B
167#define DRM_PSB_GAMMA 0x1C
168#define DRM_PSB_DPST_BL 0x1D
169#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
170
171#define PSB_MODE_OPERATION_MODE_VALID 0x01
172#define PSB_MODE_OPERATION_SET_DC_BASE 0x02
173
174struct drm_psb_get_pipe_from_crtc_id_arg {
175 /** ID of CRTC being requested **/
176 u32 crtc_id;
177
178 /** pipe of requested CRTC **/
179 u32 pipe;
180};
181
182/* FIXME: move this into a medfield header once we are sure it isn't needed for an
183 ioctl */
184struct psb_drm_dpu_rect {
185 int x, y;
186 int width, height;
187};
188
189struct drm_psb_gem_create {
190 __u64 size;
191 __u32 handle;
192 __u32 flags;
193#define PSB_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
194};
195
196#define PSB_2D_OP_BUFLEN 16
197
198struct drm_psb_2d_op {
199 __u32 src; /* Handles, only src supported right now */
200 __u32 dst;
201 __u32 mask;
202 __u32 pat;
203 __u32 size; /* In dwords of command */
204 __u32 spare; /* And bumps array to u64 align */
205 __u32 cmd[PSB_2D_OP_BUFLEN];
206};
207
208struct drm_psb_gem_mmap {
209 __u32 handle;
210 __u32 pad;
211 /**
212 * Fake offset to use for subsequent mmap call
213 *
214 * This is a fixed-size type for 32/64 compatibility.
215 */
216 __u64 offset;
217};
218
219#endif
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
deleted file mode 100644
index 95816808f867..000000000000
--- a/drivers/staging/gma500/psb_drv.c
+++ /dev/null
@@ -1,1230 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 **************************************************************************/
21
22#include <drm/drmP.h>
23#include <drm/drm.h>
24#include "psb_drm.h"
25#include "psb_drv.h"
26#include "framebuffer.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include "intel_bios.h"
30#include "mid_bios.h"
31#include "mdfld_dsi_dbi.h"
32#include <drm/drm_pciids.h>
33#include "power.h"
34#include <linux/cpu.h>
35#include <linux/notifier.h>
36#include <linux/spinlock.h>
37#include <linux/pm_runtime.h>
38#include <linux/module.h>
39#include <acpi/video.h>
40
41static int drm_psb_trap_pagefaults;
42
43int drm_psb_no_fb;
44
45static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
46
47MODULE_PARM_DESC(no_fb, "Disable FBdev");
48MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
49module_param_named(no_fb, drm_psb_no_fb, int, 0600);
50module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
51
52
53static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
54 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
55 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
56#if defined(CONFIG_DRM_PSB_MRST)
57 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
58 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
59 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
60 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
61 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
62 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
63 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
64 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
65#endif
66#if defined(CONFIG_DRM_PSB_MFLD)
67 { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
68 { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
69 { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
70 { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
71 { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
72 { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
73 { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
74 { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
75#endif
76#if defined(CONFIG_DRM_PSB_CDV)
77 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
78 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
79 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
80 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
81 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
82 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
83 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
84 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
85#endif
86 { 0, 0, 0}
87};
88MODULE_DEVICE_TABLE(pci, pciidlist);
89
90/*
91 * Standard IOCTLs.
92 */
93
94#define DRM_IOCTL_PSB_SIZES \
95 DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
96 struct drm_psb_sizes_arg)
97#define DRM_IOCTL_PSB_FUSE_REG \
98 DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
99#define DRM_IOCTL_PSB_DC_STATE \
100 DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
101 struct drm_psb_dc_state_arg)
102#define DRM_IOCTL_PSB_ADB \
103 DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
104#define DRM_IOCTL_PSB_MODE_OPERATION \
105 DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
106 struct drm_psb_mode_operation_arg)
107#define DRM_IOCTL_PSB_STOLEN_MEMORY \
108 DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
109 struct drm_psb_stolen_memory_arg)
110#define DRM_IOCTL_PSB_REGISTER_RW \
111 DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
112 struct drm_psb_register_rw_arg)
113#define DRM_IOCTL_PSB_DPST \
114 DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
115 uint32_t)
116#define DRM_IOCTL_PSB_GAMMA \
117 DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
118 struct drm_psb_dpst_lut_arg)
119#define DRM_IOCTL_PSB_DPST_BL \
120 DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
121 uint32_t)
122#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
123 DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
124 struct drm_psb_get_pipe_from_crtc_id_arg)
125#define DRM_IOCTL_PSB_GEM_CREATE \
126 DRM_IOWR(DRM_PSB_GEM_CREATE + DRM_COMMAND_BASE, \
127 struct drm_psb_gem_create)
128#define DRM_IOCTL_PSB_2D_OP \
129 DRM_IOW(DRM_PSB_2D_OP + DRM_COMMAND_BASE, \
130 struct drm_psb_2d_op)
131#define DRM_IOCTL_PSB_GEM_MMAP \
132 DRM_IOWR(DRM_PSB_GEM_MMAP + DRM_COMMAND_BASE, \
133 struct drm_psb_gem_mmap)
134
135static int psb_sizes_ioctl(struct drm_device *dev, void *data,
136 struct drm_file *file_priv);
137static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
138 struct drm_file *file_priv);
139static int psb_adb_ioctl(struct drm_device *dev, void *data,
140 struct drm_file *file_priv);
141static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
142 struct drm_file *file_priv);
143static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
144 struct drm_file *file_priv);
145static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file_priv);
147static int psb_dpst_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file_priv);
149static int psb_gamma_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file_priv);
151static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
152 struct drm_file *file_priv);
153
154#define PSB_IOCTL_DEF(ioctl, func, flags) \
155 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
156
157static struct drm_ioctl_desc psb_ioctls[] = {
158 PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
159 PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
160 PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
161 PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
162 DRM_AUTH),
163 PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
164 DRM_AUTH),
165 PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
166 DRM_AUTH),
167 PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
168 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
169 PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
170 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
171 psb_intel_get_pipe_from_crtc_id, 0),
172 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
173 DRM_UNLOCKED | DRM_AUTH),
174 PSB_IOCTL_DEF(DRM_IOCTL_PSB_2D_OP, psb_accel_ioctl,
175 DRM_UNLOCKED| DRM_AUTH),
176 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
177 DRM_UNLOCKED | DRM_AUTH),
178};
179
180static void psb_lastclose(struct drm_device *dev)
181{
182 return;
183}
184
185static void psb_do_takedown(struct drm_device *dev)
186{
187}
188
189static int psb_do_init(struct drm_device *dev)
190{
191 struct drm_psb_private *dev_priv = dev->dev_private;
192 struct psb_gtt *pg = &dev_priv->gtt;
193
194 uint32_t stolen_gtt;
195
196 int ret = -ENOMEM;
197
198 if (pg->mmu_gatt_start & 0x0FFFFFFF) {
199 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
200 ret = -EINVAL;
201 goto out_err;
202 }
203
204
205 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
206 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
207 stolen_gtt =
208 (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
209
210 dev_priv->gatt_free_offset = pg->mmu_gatt_start +
211 (stolen_gtt << PAGE_SHIFT) * 1024;
212
213 if (1 || drm_debug) {
214 uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
215 uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
216 DRM_INFO("SGX core id = 0x%08x\n", core_id);
217 DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
218 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
219 _PSB_CC_REVISION_MAJOR_SHIFT,
220 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
221 _PSB_CC_REVISION_MINOR_SHIFT);
222 DRM_INFO
223 ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
224 (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
225 _PSB_CC_REVISION_MAINTENANCE_SHIFT,
226 (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
227 _PSB_CC_REVISION_DESIGNER_SHIFT);
228 }
229
230
231 spin_lock_init(&dev_priv->irqmask_lock);
232 spin_lock_init(&dev_priv->lock_2d);
233
234 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
235 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
236 PSB_RSGX32(PSB_CR_BIF_BANK1);
237 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
238 PSB_CR_BIF_CTRL);
239 psb_spank(dev_priv);
240
241 /* mmu_gatt ?? */
242 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
243 return 0;
244out_err:
245 psb_do_takedown(dev);
246 return ret;
247}
248
249static int psb_driver_unload(struct drm_device *dev)
250{
251 struct drm_psb_private *dev_priv = dev->dev_private;
252
253 /* Kill vblank etc here */
254
255 gma_backlight_exit(dev);
256
257 if (drm_psb_no_fb == 0)
258 psb_modeset_cleanup(dev);
259
260 if (dev_priv) {
261 psb_lid_timer_takedown(dev_priv);
262 gma_intel_opregion_exit(dev);
263
264 if (dev_priv->ops->chip_teardown)
265 dev_priv->ops->chip_teardown(dev);
266 psb_do_takedown(dev);
267
268
269 if (dev_priv->pf_pd) {
270 psb_mmu_free_pagedir(dev_priv->pf_pd);
271 dev_priv->pf_pd = NULL;
272 }
273 if (dev_priv->mmu) {
274 struct psb_gtt *pg = &dev_priv->gtt;
275
276 down_read(&pg->sem);
277 psb_mmu_remove_pfn_sequence(
278 psb_mmu_get_default_pd
279 (dev_priv->mmu),
280 pg->mmu_gatt_start,
281 dev_priv->vram_stolen_size >> PAGE_SHIFT);
282 up_read(&pg->sem);
283 psb_mmu_driver_takedown(dev_priv->mmu);
284 dev_priv->mmu = NULL;
285 }
286 psb_gtt_takedown(dev);
287 if (dev_priv->scratch_page) {
288 __free_page(dev_priv->scratch_page);
289 dev_priv->scratch_page = NULL;
290 }
291 if (dev_priv->vdc_reg) {
292 iounmap(dev_priv->vdc_reg);
293 dev_priv->vdc_reg = NULL;
294 }
295 if (dev_priv->sgx_reg) {
296 iounmap(dev_priv->sgx_reg);
297 dev_priv->sgx_reg = NULL;
298 }
299
300 kfree(dev_priv);
301 dev->dev_private = NULL;
302
303 /*destroy VBT data*/
304 psb_intel_destroy_bios(dev);
305 }
306
307 gma_power_uninit(dev);
308
309 return 0;
310}
311
312
313static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
314{
315 struct drm_psb_private *dev_priv;
316 unsigned long resource_start;
317 struct psb_gtt *pg;
318 unsigned long irqflags;
319 int ret = -ENOMEM;
320 uint32_t tt_pages;
321 struct drm_connector *connector;
322 struct psb_intel_output *psb_intel_output;
323
324 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
325 if (dev_priv == NULL)
326 return -ENOMEM;
327
328 dev_priv->ops = (struct psb_ops *)chipset;
329 dev_priv->dev = dev;
330 dev->dev_private = (void *) dev_priv;
331
332 if (!IS_PSB(dev)) {
333 if (pci_enable_msi(dev->pdev))
334 dev_warn(dev->dev, "Enabling MSI failed!\n");
335 }
336
337 dev_priv->num_pipe = dev_priv->ops->pipes;
338
339 resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
340
341 dev_priv->vdc_reg =
342 ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
343 if (!dev_priv->vdc_reg)
344 goto out_err;
345
346 dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
347 PSB_SGX_SIZE);
348 if (!dev_priv->sgx_reg)
349 goto out_err;
350
351 ret = dev_priv->ops->chip_setup(dev);
352 if (ret)
353 goto out_err;
354
355 /* Init OSPM support */
356 gma_power_init(dev);
357
358 ret = -ENOMEM;
359
360 dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
361 if (!dev_priv->scratch_page)
362 goto out_err;
363
364 set_pages_uc(dev_priv->scratch_page, 1);
365
366 ret = psb_gtt_init(dev, 0);
367 if (ret)
368 goto out_err;
369
370 dev_priv->mmu = psb_mmu_driver_init((void *)0,
371 drm_psb_trap_pagefaults, 0,
372 dev_priv);
373 if (!dev_priv->mmu)
374 goto out_err;
375
376 pg = &dev_priv->gtt;
377
378 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
379 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
380
381
382 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
383 if (!dev_priv->pf_pd)
384 goto out_err;
385
386 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
387 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
388
389 ret = psb_do_init(dev);
390 if (ret)
391 return ret;
392
393 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
394 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
395
396/* igd_opregion_init(&dev_priv->opregion_dev); */
397 acpi_video_register();
398 if (dev_priv->lid_state)
399 psb_lid_timer_init(dev_priv);
400
401 ret = drm_vblank_init(dev, dev_priv->num_pipe);
402 if (ret)
403 goto out_err;
404
405 /*
406 * Install interrupt handlers prior to powering off SGX or else we will
407 * crash.
408 */
409 dev_priv->vdc_irq_mask = 0;
410 dev_priv->pipestat[0] = 0;
411 dev_priv->pipestat[1] = 0;
412 dev_priv->pipestat[2] = 0;
413 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
414 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
415 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
416 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
417 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
418 if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
419 drm_irq_install(dev);
420
421 dev->vblank_disable_allowed = 1;
422
423 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
424
425 dev->driver->get_vblank_counter = psb_get_vblank_counter;
426
427#if defined(CONFIG_DRM_PSB_MFLD)
428 /* FIXME: this is not the right place for this stuff ! */
429 mdfld_output_setup(dev);
430#endif
431 if (drm_psb_no_fb == 0) {
432 psb_modeset_init(dev);
433 psb_fbdev_init(dev);
434 drm_kms_helper_poll_init(dev);
435 }
436
437 /* Only add backlight support if we have LVDS output */
438 list_for_each_entry(connector, &dev->mode_config.connector_list,
439 head) {
440 psb_intel_output = to_psb_intel_output(connector);
441
442 switch (psb_intel_output->type) {
443 case INTEL_OUTPUT_LVDS:
444 case INTEL_OUTPUT_MIPI:
445 ret = gma_backlight_init(dev);
446 break;
447 }
448 }
449
450 if (ret)
451 return ret;
452
453 /* Enable runtime pm at last */
454 pm_runtime_set_active(&dev->pdev->dev);
455 return 0;
456out_err:
457 psb_driver_unload(dev);
458 return ret;
459}
460
461int psb_driver_device_is_agp(struct drm_device *dev)
462{
463 return 0;
464}
465
466
467static int psb_sizes_ioctl(struct drm_device *dev, void *data,
468 struct drm_file *file_priv)
469{
470 struct drm_psb_private *dev_priv = psb_priv(dev);
471 struct drm_psb_sizes_arg *arg = data;
472
473 *arg = dev_priv->sizes;
474 return 0;
475}
476
477static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
478 struct drm_file *file_priv)
479{
480 uint32_t flags;
481 uint32_t obj_id;
482 struct drm_mode_object *obj;
483 struct drm_connector *connector;
484 struct drm_crtc *crtc;
485 struct drm_psb_dc_state_arg *arg = data;
486
487
488 /* Double check MRST case */
489 if (IS_MRST(dev) || IS_MFLD(dev))
490 return -EOPNOTSUPP;
491
492 flags = arg->flags;
493 obj_id = arg->obj_id;
494
495 if (flags & PSB_DC_CRTC_MASK) {
496 obj = drm_mode_object_find(dev, obj_id,
497 DRM_MODE_OBJECT_CRTC);
498 if (!obj) {
499 dev_dbg(dev->dev, "Invalid CRTC object.\n");
500 return -EINVAL;
501 }
502
503 crtc = obj_to_crtc(obj);
504
505 mutex_lock(&dev->mode_config.mutex);
506 if (drm_helper_crtc_in_use(crtc)) {
507 if (flags & PSB_DC_CRTC_SAVE)
508 crtc->funcs->save(crtc);
509 else
510 crtc->funcs->restore(crtc);
511 }
512 mutex_unlock(&dev->mode_config.mutex);
513
514 return 0;
515 } else if (flags & PSB_DC_OUTPUT_MASK) {
516 obj = drm_mode_object_find(dev, obj_id,
517 DRM_MODE_OBJECT_CONNECTOR);
518 if (!obj) {
519 dev_dbg(dev->dev, "Invalid connector id.\n");
520 return -EINVAL;
521 }
522
523 connector = obj_to_connector(obj);
524 if (flags & PSB_DC_OUTPUT_SAVE)
525 connector->funcs->save(connector);
526 else
527 connector->funcs->restore(connector);
528
529 return 0;
530 }
531 return -EINVAL;
532}
533
534static inline void get_brightness(struct backlight_device *bd)
535{
536#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
537 if (bd) {
538 bd->props.brightness = bd->ops->get_brightness(bd);
539 backlight_update_status(bd);
540 }
541#endif
542}
543
544static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
545 struct drm_file *file_priv)
546{
547 struct drm_psb_private *dev_priv = psb_priv(dev);
548 uint32_t *arg = data;
549
550 dev_priv->blc_adj2 = *arg;
551 get_brightness(dev_priv->backlight_device);
552 return 0;
553}
554
555static int psb_adb_ioctl(struct drm_device *dev, void *data,
556 struct drm_file *file_priv)
557{
558 struct drm_psb_private *dev_priv = psb_priv(dev);
559 uint32_t *arg = data;
560
561 dev_priv->blc_adj1 = *arg;
562 get_brightness(dev_priv->backlight_device);
563 return 0;
564}
565
566/* return the current mode to the dpst module */
567static int psb_dpst_ioctl(struct drm_device *dev, void *data,
568 struct drm_file *file_priv)
569{
570 struct drm_psb_private *dev_priv = psb_priv(dev);
571 uint32_t *arg = data;
572 uint32_t x;
573 uint32_t y;
574 uint32_t reg;
575
576 if (!gma_power_begin(dev, 0))
577 return -EIO;
578
579 reg = PSB_RVDC32(PIPEASRC);
580
581 gma_power_end(dev);
582
583 /* horizontal is the left 16 bits */
584 x = reg >> 16;
585 /* vertical is the right 16 bits */
586 y = reg & 0x0000ffff;
587
588 /* the values are the image size minus one */
589 x++;
590 y++;
591
592 *arg = (x << 16) | y;
593
594 return 0;
595}
596static int psb_gamma_ioctl(struct drm_device *dev, void *data,
597 struct drm_file *file_priv)
598{
599 struct drm_psb_dpst_lut_arg *lut_arg = data;
600 struct drm_mode_object *obj;
601 struct drm_crtc *crtc;
602 struct drm_connector *connector;
603 struct psb_intel_crtc *psb_intel_crtc;
604 int i = 0;
605 int32_t obj_id;
606
607 obj_id = lut_arg->output_id;
608 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
609 if (!obj) {
610 dev_dbg(dev->dev, "Invalid Connector object.\n");
611 return -EINVAL;
612 }
613
614 connector = obj_to_connector(obj);
615 crtc = connector->encoder->crtc;
616 psb_intel_crtc = to_psb_intel_crtc(crtc);
617
618 for (i = 0; i < 256; i++)
619 psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
620
621 psb_intel_crtc_load_lut(crtc);
622
623 return 0;
624}
625
626static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
627 struct drm_file *file_priv)
628{
629 uint32_t obj_id;
630 uint16_t op;
631 struct drm_mode_modeinfo *umode;
632 struct drm_display_mode *mode = NULL;
633 struct drm_psb_mode_operation_arg *arg;
634 struct drm_mode_object *obj;
635 struct drm_connector *connector;
636 struct drm_framebuffer *drm_fb;
637 struct psb_framebuffer *psb_fb;
638 struct drm_connector_helper_funcs *connector_funcs;
639 int ret = 0;
640 int resp = MODE_OK;
641 struct drm_psb_private *dev_priv = psb_priv(dev);
642
643 arg = (struct drm_psb_mode_operation_arg *)data;
644 obj_id = arg->obj_id;
645 op = arg->operation;
646
647 switch (op) {
648 case PSB_MODE_OPERATION_SET_DC_BASE:
649 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
650 if (!obj) {
651 dev_dbg(dev->dev, "Invalid FB id %d\n", obj_id);
652 return -EINVAL;
653 }
654
655 drm_fb = obj_to_fb(obj);
656 psb_fb = to_psb_fb(drm_fb);
657
658 if (gma_power_begin(dev, 0)) {
659 REG_WRITE(DSPASURF, psb_fb->gtt->offset);
660 REG_READ(DSPASURF);
661 gma_power_end(dev);
662 } else {
663 dev_priv->saveDSPASURF = psb_fb->gtt->offset;
664 }
665
666 return 0;
667 case PSB_MODE_OPERATION_MODE_VALID:
668 umode = &arg->mode;
669
670 mutex_lock(&dev->mode_config.mutex);
671
672 obj = drm_mode_object_find(dev, obj_id,
673 DRM_MODE_OBJECT_CONNECTOR);
674 if (!obj) {
675 ret = -EINVAL;
676 goto mode_op_out;
677 }
678
679 connector = obj_to_connector(obj);
680
681 mode = drm_mode_create(dev);
682 if (!mode) {
683 ret = -ENOMEM;
684 goto mode_op_out;
685 }
686
687 /* drm_crtc_convert_umode(mode, umode); */
688 {
689 mode->clock = umode->clock;
690 mode->hdisplay = umode->hdisplay;
691 mode->hsync_start = umode->hsync_start;
692 mode->hsync_end = umode->hsync_end;
693 mode->htotal = umode->htotal;
694 mode->hskew = umode->hskew;
695 mode->vdisplay = umode->vdisplay;
696 mode->vsync_start = umode->vsync_start;
697 mode->vsync_end = umode->vsync_end;
698 mode->vtotal = umode->vtotal;
699 mode->vscan = umode->vscan;
700 mode->vrefresh = umode->vrefresh;
701 mode->flags = umode->flags;
702 mode->type = umode->type;
703 strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
704 mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
705 }
706
707 connector_funcs = (struct drm_connector_helper_funcs *)
708 connector->helper_private;
709
710 if (connector_funcs->mode_valid) {
711 resp = connector_funcs->mode_valid(connector, mode);
712 arg->data = (void *)resp;
713 }
714
715 /*do some clean up work*/
716 if (mode)
717 drm_mode_destroy(dev, mode);
718mode_op_out:
719 mutex_unlock(&dev->mode_config.mutex);
720 return ret;
721
722 default:
723 dev_dbg(dev->dev, "Unsupported psb mode operation\n");
724 return -EOPNOTSUPP;
725 }
726
727 return 0;
728}
729
730static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
731 struct drm_file *file_priv)
732{
733 struct drm_psb_private *dev_priv = psb_priv(dev);
734 struct drm_psb_stolen_memory_arg *arg = data;
735
736 arg->base = dev_priv->stolen_base;
737 arg->size = dev_priv->vram_stolen_size;
738
739 return 0;
740}
741
742/* FIXME: needs Medfield changes */
743static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
744 struct drm_file *file_priv)
745{
746 struct drm_psb_private *dev_priv = psb_priv(dev);
747 struct drm_psb_register_rw_arg *arg = data;
748 bool usage = arg->b_force_hw_on ? true : false;
749
750 if (arg->display_write_mask != 0) {
751 if (gma_power_begin(dev, usage)) {
752 if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
753 PSB_WVDC32(arg->display.pfit_controls,
754 PFIT_CONTROL);
755 if (arg->display_write_mask &
756 REGRWBITS_PFIT_AUTOSCALE_RATIOS)
757 PSB_WVDC32(arg->display.pfit_autoscale_ratios,
758 PFIT_AUTO_RATIOS);
759 if (arg->display_write_mask &
760 REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
761 PSB_WVDC32(
762 arg->display.pfit_programmed_scale_ratios,
763 PFIT_PGM_RATIOS);
764 if (arg->display_write_mask & REGRWBITS_PIPEASRC)
765 PSB_WVDC32(arg->display.pipeasrc,
766 PIPEASRC);
767 if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
768 PSB_WVDC32(arg->display.pipebsrc,
769 PIPEBSRC);
770 if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
771 PSB_WVDC32(arg->display.vtotal_a,
772 VTOTAL_A);
773 if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
774 PSB_WVDC32(arg->display.vtotal_b,
775 VTOTAL_B);
776 gma_power_end(dev);
777 } else {
778 if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
779 dev_priv->savePFIT_CONTROL =
780 arg->display.pfit_controls;
781 if (arg->display_write_mask &
782 REGRWBITS_PFIT_AUTOSCALE_RATIOS)
783 dev_priv->savePFIT_AUTO_RATIOS =
784 arg->display.pfit_autoscale_ratios;
785 if (arg->display_write_mask &
786 REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
787 dev_priv->savePFIT_PGM_RATIOS =
788 arg->display.pfit_programmed_scale_ratios;
789 if (arg->display_write_mask & REGRWBITS_PIPEASRC)
790 dev_priv->savePIPEASRC = arg->display.pipeasrc;
791 if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
792 dev_priv->savePIPEBSRC = arg->display.pipebsrc;
793 if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
794 dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
795 if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
796 dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
797 }
798 }
799
800 if (arg->display_read_mask != 0) {
801 if (gma_power_begin(dev, usage)) {
802 if (arg->display_read_mask &
803 REGRWBITS_PFIT_CONTROLS)
804 arg->display.pfit_controls =
805 PSB_RVDC32(PFIT_CONTROL);
806 if (arg->display_read_mask &
807 REGRWBITS_PFIT_AUTOSCALE_RATIOS)
808 arg->display.pfit_autoscale_ratios =
809 PSB_RVDC32(PFIT_AUTO_RATIOS);
810 if (arg->display_read_mask &
811 REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
812 arg->display.pfit_programmed_scale_ratios =
813 PSB_RVDC32(PFIT_PGM_RATIOS);
814 if (arg->display_read_mask & REGRWBITS_PIPEASRC)
815 arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
816 if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
817 arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
818 if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
819 arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
820 if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
821 arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
822 gma_power_end(dev);
823 } else {
824 if (arg->display_read_mask &
825 REGRWBITS_PFIT_CONTROLS)
826 arg->display.pfit_controls =
827 dev_priv->savePFIT_CONTROL;
828 if (arg->display_read_mask &
829 REGRWBITS_PFIT_AUTOSCALE_RATIOS)
830 arg->display.pfit_autoscale_ratios =
831 dev_priv->savePFIT_AUTO_RATIOS;
832 if (arg->display_read_mask &
833 REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
834 arg->display.pfit_programmed_scale_ratios =
835 dev_priv->savePFIT_PGM_RATIOS;
836 if (arg->display_read_mask & REGRWBITS_PIPEASRC)
837 arg->display.pipeasrc = dev_priv->savePIPEASRC;
838 if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
839 arg->display.pipebsrc = dev_priv->savePIPEBSRC;
840 if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
841 arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
842 if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
843 arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
844 }
845 }
846
847 if (arg->overlay_write_mask != 0) {
848 if (gma_power_begin(dev, usage)) {
849 if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
850 PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
851 PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
852 PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
853 PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
854 PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
855 PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
856 }
857 if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
858 PSB_WVDC32(arg->overlay.OGAMC5, OVC_OGAMC5);
859 PSB_WVDC32(arg->overlay.OGAMC4, OVC_OGAMC4);
860 PSB_WVDC32(arg->overlay.OGAMC3, OVC_OGAMC3);
861 PSB_WVDC32(arg->overlay.OGAMC2, OVC_OGAMC2);
862 PSB_WVDC32(arg->overlay.OGAMC1, OVC_OGAMC1);
863 PSB_WVDC32(arg->overlay.OGAMC0, OVC_OGAMC0);
864 }
865
866 if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
867 PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
868
869 if (arg->overlay.b_wait_vblank) {
870 /* Wait for 20ms.*/
871 unsigned long vblank_timeout = jiffies
872 + HZ/50;
873 uint32_t temp;
874 while (time_before_eq(jiffies,
875 vblank_timeout)) {
876 temp = PSB_RVDC32(OV_DOVASTA);
877 if ((temp & (0x1 << 31)) != 0)
878 break;
879 cpu_relax();
880 }
881 }
882 }
883 if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) {
884 PSB_WVDC32(arg->overlay.OVADD, OVC_OVADD);
885 if (arg->overlay.b_wait_vblank) {
886 /* Wait for 20ms.*/
887 unsigned long vblank_timeout =
888 jiffies + HZ/50;
889 uint32_t temp;
890 while (time_before_eq(jiffies,
891 vblank_timeout)) {
892 temp = PSB_RVDC32(OVC_DOVCSTA);
893 if ((temp & (0x1 << 31)) != 0)
894 break;
895 cpu_relax();
896 }
897 }
898 }
899 gma_power_end(dev);
900 } else {
901 if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
902 dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
903 dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
904 dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
905 dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
906 dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
907 dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
908 }
909 if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
910 dev_priv->saveOVC_OGAMC5 = arg->overlay.OGAMC5;
911 dev_priv->saveOVC_OGAMC4 = arg->overlay.OGAMC4;
912 dev_priv->saveOVC_OGAMC3 = arg->overlay.OGAMC3;
913 dev_priv->saveOVC_OGAMC2 = arg->overlay.OGAMC2;
914 dev_priv->saveOVC_OGAMC1 = arg->overlay.OGAMC1;
915 dev_priv->saveOVC_OGAMC0 = arg->overlay.OGAMC0;
916 }
917 if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
918 dev_priv->saveOV_OVADD = arg->overlay.OVADD;
919 if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD)
920 dev_priv->saveOVC_OVADD = arg->overlay.OVADD;
921 }
922 }
923
924 if (arg->overlay_read_mask != 0) {
925 if (gma_power_begin(dev, usage)) {
926 if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
927 arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
928 arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
929 arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
930 arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
931 arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
932 arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
933 }
934 if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
935 arg->overlay.OGAMC5 = PSB_RVDC32(OVC_OGAMC5);
936 arg->overlay.OGAMC4 = PSB_RVDC32(OVC_OGAMC4);
937 arg->overlay.OGAMC3 = PSB_RVDC32(OVC_OGAMC3);
938 arg->overlay.OGAMC2 = PSB_RVDC32(OVC_OGAMC2);
939 arg->overlay.OGAMC1 = PSB_RVDC32(OVC_OGAMC1);
940 arg->overlay.OGAMC0 = PSB_RVDC32(OVC_OGAMC0);
941 }
942 if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
943 arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
944 if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
945 arg->overlay.OVADD = PSB_RVDC32(OVC_OVADD);
946 gma_power_end(dev);
947 } else {
948 if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
949 arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
950 arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
951 arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
952 arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
953 arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
954 arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
955 }
956 if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
957 arg->overlay.OGAMC5 = dev_priv->saveOVC_OGAMC5;
958 arg->overlay.OGAMC4 = dev_priv->saveOVC_OGAMC4;
959 arg->overlay.OGAMC3 = dev_priv->saveOVC_OGAMC3;
960 arg->overlay.OGAMC2 = dev_priv->saveOVC_OGAMC2;
961 arg->overlay.OGAMC1 = dev_priv->saveOVC_OGAMC1;
962 arg->overlay.OGAMC0 = dev_priv->saveOVC_OGAMC0;
963 }
964 if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
965 arg->overlay.OVADD = dev_priv->saveOV_OVADD;
966 if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
967 arg->overlay.OVADD = dev_priv->saveOVC_OVADD;
968 }
969 }
970
971 if (arg->sprite_enable_mask != 0) {
972 if (gma_power_begin(dev, usage)) {
973 PSB_WVDC32(0x1F3E, DSPARB);
974 PSB_WVDC32(arg->sprite.dspa_control
975 | PSB_RVDC32(DSPACNTR), DSPACNTR);
976 PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
977 PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
978 PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
979 PSB_RVDC32(DSPASURF);
980 PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
981 PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
982 PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
983 PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
984 PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
985 PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
986 PSB_RVDC32(DSPCSURF);
987 gma_power_end(dev);
988 }
989 }
990
991 if (arg->sprite_disable_mask != 0) {
992 if (gma_power_begin(dev, usage)) {
993 PSB_WVDC32(0x3F3E, DSPARB);
994 PSB_WVDC32(0x0, DSPCCNTR);
995 PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
996 PSB_RVDC32(DSPCSURF);
997 gma_power_end(dev);
998 }
999 }
1000
1001 if (arg->subpicture_enable_mask != 0) {
1002 if (gma_power_begin(dev, usage)) {
1003 uint32_t temp;
1004 if (arg->subpicture_enable_mask & REGRWBITS_DSPACNTR) {
1005 temp = PSB_RVDC32(DSPACNTR);
1006 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1007 temp &= ~DISPPLANE_BOTTOM;
1008 temp |= DISPPLANE_32BPP;
1009 PSB_WVDC32(temp, DSPACNTR);
1010
1011 temp = PSB_RVDC32(DSPABASE);
1012 PSB_WVDC32(temp, DSPABASE);
1013 PSB_RVDC32(DSPABASE);
1014 temp = PSB_RVDC32(DSPASURF);
1015 PSB_WVDC32(temp, DSPASURF);
1016 PSB_RVDC32(DSPASURF);
1017 }
1018 if (arg->subpicture_enable_mask & REGRWBITS_DSPBCNTR) {
1019 temp = PSB_RVDC32(DSPBCNTR);
1020 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1021 temp &= ~DISPPLANE_BOTTOM;
1022 temp |= DISPPLANE_32BPP;
1023 PSB_WVDC32(temp, DSPBCNTR);
1024
1025 temp = PSB_RVDC32(DSPBBASE);
1026 PSB_WVDC32(temp, DSPBBASE);
1027 PSB_RVDC32(DSPBBASE);
1028 temp = PSB_RVDC32(DSPBSURF);
1029 PSB_WVDC32(temp, DSPBSURF);
1030 PSB_RVDC32(DSPBSURF);
1031 }
1032 if (arg->subpicture_enable_mask & REGRWBITS_DSPCCNTR) {
1033 temp = PSB_RVDC32(DSPCCNTR);
1034 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1035 temp &= ~DISPPLANE_BOTTOM;
1036 temp |= DISPPLANE_32BPP;
1037 PSB_WVDC32(temp, DSPCCNTR);
1038
1039 temp = PSB_RVDC32(DSPCBASE);
1040 PSB_WVDC32(temp, DSPCBASE);
1041 PSB_RVDC32(DSPCBASE);
1042 temp = PSB_RVDC32(DSPCSURF);
1043 PSB_WVDC32(temp, DSPCSURF);
1044 PSB_RVDC32(DSPCSURF);
1045 }
1046 gma_power_end(dev);
1047 }
1048 }
1049
1050 if (arg->subpicture_disable_mask != 0) {
1051 if (gma_power_begin(dev, usage)) {
1052 uint32_t temp;
1053 if (arg->subpicture_disable_mask & REGRWBITS_DSPACNTR) {
1054 temp = PSB_RVDC32(DSPACNTR);
1055 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1056 temp |= DISPPLANE_32BPP_NO_ALPHA;
1057 PSB_WVDC32(temp, DSPACNTR);
1058
1059 temp = PSB_RVDC32(DSPABASE);
1060 PSB_WVDC32(temp, DSPABASE);
1061 PSB_RVDC32(DSPABASE);
1062 temp = PSB_RVDC32(DSPASURF);
1063 PSB_WVDC32(temp, DSPASURF);
1064 PSB_RVDC32(DSPASURF);
1065 }
1066 if (arg->subpicture_disable_mask & REGRWBITS_DSPBCNTR) {
1067 temp = PSB_RVDC32(DSPBCNTR);
1068 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1069 temp |= DISPPLANE_32BPP_NO_ALPHA;
1070 PSB_WVDC32(temp, DSPBCNTR);
1071
1072 temp = PSB_RVDC32(DSPBBASE);
1073 PSB_WVDC32(temp, DSPBBASE);
1074 PSB_RVDC32(DSPBBASE);
1075 temp = PSB_RVDC32(DSPBSURF);
1076 PSB_WVDC32(temp, DSPBSURF);
1077 PSB_RVDC32(DSPBSURF);
1078 }
1079 if (arg->subpicture_disable_mask & REGRWBITS_DSPCCNTR) {
1080 temp = PSB_RVDC32(DSPCCNTR);
1081 temp &= ~DISPPLANE_PIXFORMAT_MASK;
1082 temp |= DISPPLANE_32BPP_NO_ALPHA;
1083 PSB_WVDC32(temp, DSPCCNTR);
1084
1085 temp = PSB_RVDC32(DSPCBASE);
1086 PSB_WVDC32(temp, DSPCBASE);
1087 PSB_RVDC32(DSPCBASE);
1088 temp = PSB_RVDC32(DSPCSURF);
1089 PSB_WVDC32(temp, DSPCSURF);
1090 PSB_RVDC32(DSPCSURF);
1091 }
1092 gma_power_end(dev);
1093 }
1094 }
1095
1096 return 0;
1097}
1098
1099static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
1100{
1101 return 0;
1102}
1103
1104static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
1105{
1106}
1107
1108static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
1109 unsigned long arg)
1110{
1111 struct drm_file *file_priv = filp->private_data;
1112 struct drm_device *dev = file_priv->minor->dev;
1113 int ret;
1114
1115 pm_runtime_forbid(dev->dev);
1116 ret = drm_ioctl(filp, cmd, arg);
1117 pm_runtime_allow(dev->dev);
1118 return ret;
1119 /* FIXME: do we need to wrap the other side of this */
1120}
1121
1122
1123/* When a client dies:
1124 * - Check for and clean up flipped page state
1125 */
1126void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
1127{
1128}
1129
1130static void psb_remove(struct pci_dev *pdev)
1131{
1132 struct drm_device *dev = pci_get_drvdata(pdev);
1133 drm_put_dev(dev);
1134}
1135
1136static const struct dev_pm_ops psb_pm_ops = {
1137 .suspend = gma_power_suspend,
1138 .resume = gma_power_resume,
1139 .freeze = gma_power_suspend,
1140 .thaw = gma_power_resume,
1141 .poweroff = gma_power_suspend,
1142 .restore = gma_power_resume,
1143 .runtime_suspend = psb_runtime_suspend,
1144 .runtime_resume = psb_runtime_resume,
1145 .runtime_idle = psb_runtime_idle,
1146};
1147
1148static struct vm_operations_struct psb_gem_vm_ops = {
1149 .fault = psb_gem_fault,
1150 .open = drm_gem_vm_open,
1151 .close = drm_gem_vm_close,
1152};
1153
1154static const struct file_operations gma500_driver_fops = {
1155 .owner = THIS_MODULE,
1156 .open = drm_open,
1157 .release = drm_release,
1158 .unlocked_ioctl = psb_unlocked_ioctl,
1159 .mmap = drm_gem_mmap,
1160 .poll = drm_poll,
1161 .fasync = drm_fasync,
1162 .read = drm_read,
1163};
1164
1165static struct drm_driver driver = {
1166 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
1167 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
1168 .load = psb_driver_load,
1169 .unload = psb_driver_unload,
1170
1171 .ioctls = psb_ioctls,
1172 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
1173 .device_is_agp = psb_driver_device_is_agp,
1174 .irq_preinstall = psb_irq_preinstall,
1175 .irq_postinstall = psb_irq_postinstall,
1176 .irq_uninstall = psb_irq_uninstall,
1177 .irq_handler = psb_irq_handler,
1178 .enable_vblank = psb_enable_vblank,
1179 .disable_vblank = psb_disable_vblank,
1180 .get_vblank_counter = psb_get_vblank_counter,
1181 .lastclose = psb_lastclose,
1182 .open = psb_driver_open,
1183 .preclose = psb_driver_preclose,
1184 .postclose = psb_driver_close,
1185 .reclaim_buffers = drm_core_reclaim_buffers,
1186
1187 .gem_init_object = psb_gem_init_object,
1188 .gem_free_object = psb_gem_free_object,
1189 .gem_vm_ops = &psb_gem_vm_ops,
1190 .dumb_create = psb_gem_dumb_create,
1191 .dumb_map_offset = psb_gem_dumb_map_gtt,
1192 .dumb_destroy = psb_gem_dumb_destroy,
1193 .fops = &gma500_driver_fops,
1194 .name = DRIVER_NAME,
1195 .desc = DRIVER_DESC,
1196 .date = PSB_DRM_DRIVER_DATE,
1197 .major = PSB_DRM_DRIVER_MAJOR,
1198 .minor = PSB_DRM_DRIVER_MINOR,
1199 .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
1200};
1201
1202static struct pci_driver psb_pci_driver = {
1203 .name = DRIVER_NAME,
1204 .id_table = pciidlist,
1205 .probe = psb_probe,
1206 .remove = psb_remove,
1207 .driver.pm = &psb_pm_ops,
1208};
1209
1210static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1211{
1212 return drm_get_pci_dev(pdev, ent, &driver);
1213}
1214
1215static int __init psb_init(void)
1216{
1217 return drm_pci_init(&driver, &psb_pci_driver);
1218}
1219
1220static void __exit psb_exit(void)
1221{
1222 drm_pci_exit(&driver, &psb_pci_driver);
1223}
1224
1225late_initcall(psb_init);
1226module_exit(psb_exit);
1227
1228MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
1229MODULE_DESCRIPTION(DRIVER_DESC);
1230MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
deleted file mode 100644
index 11d963a055be..000000000000
--- a/drivers/staging/gma500/psb_drv.h
+++ /dev/null
@@ -1,952 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#ifndef _PSB_DRV_H_
21#define _PSB_DRV_H_
22
23#include <linux/kref.h>
24
25#include <drm/drmP.h>
26#include "drm_global.h"
27#include "gem_glue.h"
28#include "psb_drm.h"
29#include "psb_reg.h"
30#include "psb_intel_drv.h"
31#include "gtt.h"
32#include "power.h"
33#include "mrst.h"
34#include "medfield.h"
35
36/* Append new drm mode definition here, align with libdrm definition */
37#define DRM_MODE_SCALE_NO_SCALE 2
38
39enum {
40 CHIP_PSB_8108 = 0, /* Poulsbo */
41 CHIP_PSB_8109 = 1, /* Poulsbo */
42 CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
43 CHIP_MFLD_0130 = 3, /* Medfield */
44};
45
46#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
47#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
48#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
49
50/*
51 * Driver definitions
52 */
53
54#define DRIVER_NAME "gma500"
55#define DRIVER_DESC "DRM driver for the Intel GMA500"
56
57#define PSB_DRM_DRIVER_DATE "2011-06-06"
58#define PSB_DRM_DRIVER_MAJOR 1
59#define PSB_DRM_DRIVER_MINOR 0
60#define PSB_DRM_DRIVER_PATCHLEVEL 0
61
62/*
63 * Hardware offsets
64 */
65#define PSB_VDC_OFFSET 0x00000000
66#define PSB_VDC_SIZE 0x000080000
67#define MRST_MMIO_SIZE 0x0000C0000
68#define MDFLD_MMIO_SIZE 0x000100000
69#define PSB_SGX_SIZE 0x8000
70#define PSB_SGX_OFFSET 0x00040000
71#define MRST_SGX_OFFSET 0x00080000
72/*
73 * PCI resource identifiers
74 */
75#define PSB_MMIO_RESOURCE 0
76#define PSB_GATT_RESOURCE 2
77#define PSB_GTT_RESOURCE 3
78/*
79 * PCI configuration
80 */
81#define PSB_GMCH_CTRL 0x52
82#define PSB_BSM 0x5C
83#define _PSB_GMCH_ENABLED 0x4
84#define PSB_PGETBL_CTL 0x2020
85#define _PSB_PGETBL_ENABLED 0x00000001
86#define PSB_SGX_2D_SLAVE_PORT 0x4000
87
88/* To get rid of */
89#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
90#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
91
92/*
93 * SGX side MMU definitions (these can probably go)
94 */
95
96/*
97 * Flags for external memory type field.
98 */
99#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
100#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
101#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
102/*
103 * PTE's and PDE's
104 */
105#define PSB_PDE_MASK 0x003FFFFF
106#define PSB_PDE_SHIFT 22
107#define PSB_PTE_SHIFT 12
108/*
109 * Cache control
110 */
111#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
112#define PSB_PTE_WO 0x0002 /* Write only */
113#define PSB_PTE_RO 0x0004 /* Read only */
114#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
115
116/*
117 * VDC registers and bits
118 */
119#define PSB_MSVDX_CLOCKGATING 0x2064
120#define PSB_TOPAZ_CLOCKGATING 0x2068
121#define PSB_HWSTAM 0x2098
122#define PSB_INSTPM 0x20C0
123#define PSB_INT_IDENTITY_R 0x20A4
124#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
125#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
126#define _PSB_DPST_PIPEB_FLAG (1<<4)
127#define _MDFLD_PIPEB_EVENT_FLAG (1<<4)
128#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
129#define _PSB_DPST_PIPEA_FLAG (1<<6)
130#define _PSB_PIPEA_EVENT_FLAG (1<<6)
131#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
132#define _MDFLD_MIPIA_FLAG (1<<16)
133#define _MDFLD_MIPIC_FLAG (1<<17)
134#define _PSB_IRQ_SGX_FLAG (1<<18)
135#define _PSB_IRQ_MSVDX_FLAG (1<<19)
136#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
137
138#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
139 _PSB_VSYNC_PIPEB_FLAG)
140
141/* This flag includes all the display IRQ bits excepts the vblank irqs. */
142#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
143 _MDFLD_PIPEB_EVENT_FLAG | \
144 _PSB_PIPEA_EVENT_FLAG | \
145 _PSB_VSYNC_PIPEA_FLAG | \
146 _MDFLD_MIPIA_FLAG | \
147 _MDFLD_MIPIC_FLAG)
148#define PSB_INT_IDENTITY_R 0x20A4
149#define PSB_INT_MASK_R 0x20A8
150#define PSB_INT_ENABLE_R 0x20A0
151
152#define _PSB_MMU_ER_MASK 0x0001FF00
153#define _PSB_MMU_ER_HOST (1 << 16)
154#define GPIOA 0x5010
155#define GPIOB 0x5014
156#define GPIOC 0x5018
157#define GPIOD 0x501c
158#define GPIOE 0x5020
159#define GPIOF 0x5024
160#define GPIOG 0x5028
161#define GPIOH 0x502c
162#define GPIO_CLOCK_DIR_MASK (1 << 0)
163#define GPIO_CLOCK_DIR_IN (0 << 1)
164#define GPIO_CLOCK_DIR_OUT (1 << 1)
165#define GPIO_CLOCK_VAL_MASK (1 << 2)
166#define GPIO_CLOCK_VAL_OUT (1 << 3)
167#define GPIO_CLOCK_VAL_IN (1 << 4)
168#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
169#define GPIO_DATA_DIR_MASK (1 << 8)
170#define GPIO_DATA_DIR_IN (0 << 9)
171#define GPIO_DATA_DIR_OUT (1 << 9)
172#define GPIO_DATA_VAL_MASK (1 << 10)
173#define GPIO_DATA_VAL_OUT (1 << 11)
174#define GPIO_DATA_VAL_IN (1 << 12)
175#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
176
177#define VCLK_DIVISOR_VGA0 0x6000
178#define VCLK_DIVISOR_VGA1 0x6004
179#define VCLK_POST_DIV 0x6010
180
181#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
182#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
183#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
184#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
185#define PSB_COMM_USER_IRQ (1024 >> 2)
186#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
187#define PSB_COMM_FW (2048 >> 2)
188
189#define PSB_UIRQ_VISTEST 1
190#define PSB_UIRQ_OOM_REPLY 2
191#define PSB_UIRQ_FIRE_TA_REPLY 3
192#define PSB_UIRQ_FIRE_RASTER_REPLY 4
193
194#define PSB_2D_SIZE (256*1024*1024)
195#define PSB_MAX_RELOC_PAGES 1024
196
197#define PSB_LOW_REG_OFFS 0x0204
198#define PSB_HIGH_REG_OFFS 0x0600
199
200#define PSB_NUM_VBLANKS 2
201
202
203#define PSB_2D_SIZE (256*1024*1024)
204#define PSB_MAX_RELOC_PAGES 1024
205
206#define PSB_LOW_REG_OFFS 0x0204
207#define PSB_HIGH_REG_OFFS 0x0600
208
209#define PSB_NUM_VBLANKS 2
210#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
211#define PSB_LID_DELAY (DRM_HZ / 10)
212
213#define MDFLD_PNW_B0 0x04
214#define MDFLD_PNW_C0 0x08
215
216#define MDFLD_DSR_2D_3D_0 (1 << 0)
217#define MDFLD_DSR_2D_3D_2 (1 << 1)
218#define MDFLD_DSR_CURSOR_0 (1 << 2)
219#define MDFLD_DSR_CURSOR_2 (1 << 3)
220#define MDFLD_DSR_OVERLAY_0 (1 << 4)
221#define MDFLD_DSR_OVERLAY_2 (1 << 5)
222#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
223#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
224#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
225#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
226
227#define MDFLD_DSR_RR 45
228#define MDFLD_DPU_ENABLE (1 << 31)
229#define MDFLD_DSR_FULLSCREEN (1 << 30)
230#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
231
232#define PSB_PWR_STATE_ON 1
233#define PSB_PWR_STATE_OFF 2
234
235#define PSB_PMPOLICY_NOPM 0
236#define PSB_PMPOLICY_CLOCKGATING 1
237#define PSB_PMPOLICY_POWERDOWN 2
238
239#define PSB_PMSTATE_POWERUP 0
240#define PSB_PMSTATE_CLOCKGATED 1
241#define PSB_PMSTATE_POWERDOWN 2
242#define PSB_PCIx_MSI_ADDR_LOC 0x94
243#define PSB_PCIx_MSI_DATA_LOC 0x98
244
245/* Medfield crystal settings */
246#define KSEL_CRYSTAL_19 1
247#define KSEL_BYPASS_19 5
248#define KSEL_BYPASS_25 6
249#define KSEL_BYPASS_83_100 7
250
251struct opregion_header;
252struct opregion_acpi;
253struct opregion_swsci;
254struct opregion_asle;
255
256struct psb_intel_opregion {
257 struct opregion_header *header;
258 struct opregion_acpi *acpi;
259 struct opregion_swsci *swsci;
260 struct opregion_asle *asle;
261 int enabled;
262};
263
264struct psb_ops;
265
266struct drm_psb_private {
267 struct drm_device *dev;
268 const struct psb_ops *ops;
269
270 struct psb_gtt gtt;
271
272 /* GTT Memory manager */
273 struct psb_gtt_mm *gtt_mm;
274 struct page *scratch_page;
275 u32 *gtt_map;
276 uint32_t stolen_base;
277 void *vram_addr;
278 unsigned long vram_stolen_size;
279 int gtt_initialized;
280 u16 gmch_ctrl; /* Saved GTT setup */
281 u32 pge_ctl;
282
283 struct mutex gtt_mutex;
284 struct resource *gtt_mem; /* Our PCI resource */
285
286 struct psb_mmu_driver *mmu;
287 struct psb_mmu_pd *pf_pd;
288
289 /*
290 * Register base
291 */
292
293 uint8_t *sgx_reg;
294 uint8_t *vdc_reg;
295 uint32_t gatt_free_offset;
296
297 /*
298 * Fencing / irq.
299 */
300
301 uint32_t vdc_irq_mask;
302 uint32_t pipestat[PSB_NUM_PIPE];
303
304 spinlock_t irqmask_lock;
305
306 /*
307 * Power
308 */
309
310 bool suspended;
311 bool display_power;
312 int display_count;
313
314 /*
315 * Modesetting
316 */
317 struct psb_intel_mode_device mode_dev;
318
319 struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
320 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
321 uint32_t num_pipe;
322
323 /*
324 * OSPM info (Power management base) (can go ?)
325 */
326 uint32_t ospm_base;
327
328 /*
329 * Sizes info
330 */
331
332 struct drm_psb_sizes_arg sizes;
333
334 u32 fuse_reg_value;
335 u32 video_device_fuse;
336
337 /* PCI revision ID for B0:D2:F0 */
338 uint8_t platform_rev_id;
339
340 /*
341 * LVDS info
342 */
343 int backlight_duty_cycle; /* restore backlight to this value */
344 bool panel_wants_dither;
345 struct drm_display_mode *panel_fixed_mode;
346 struct drm_display_mode *lfp_lvds_vbt_mode;
347 struct drm_display_mode *sdvo_lvds_vbt_mode;
348
349 struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
350 struct psb_intel_i2c_chan *lvds_i2c_bus;
351
352 /* Feature bits from the VBIOS */
353 unsigned int int_tv_support:1;
354 unsigned int lvds_dither:1;
355 unsigned int lvds_vbt:1;
356 unsigned int int_crt_support:1;
357 unsigned int lvds_use_ssc:1;
358 int lvds_ssc_freq;
359 bool is_lvds_on;
360 bool is_mipi_on;
361 u32 mipi_ctrl_display;
362
363 unsigned int core_freq;
364 uint32_t iLVDS_enable;
365
366 /* Runtime PM state */
367 int rpm_enabled;
368
369 /* MID specific */
370 struct mrst_vbt vbt_data;
371 struct mrst_gct_data gct_data;
372
373 /* MIPI Panel type etc */
374 int panel_id;
375 bool dual_mipi; /* dual display - DPI & DBI */
376 bool dpi_panel_on; /* The DPI panel power is on */
377 bool dpi_panel_on2; /* The DPI panel power is on */
378 bool dbi_panel_on; /* The DBI panel power is on */
379 bool dbi_panel_on2; /* The DBI panel power is on */
380 u32 dsr_fb_update; /* DSR FB update counter */
381
382 /* Moorestown HDMI state */
383 struct mrst_hdmi_dev *hdmi_priv;
384
385 /* Moorestown pipe config register value cache */
386 uint32_t pipeconf;
387 uint32_t pipeconf1;
388 uint32_t pipeconf2;
389
390 /* Moorestown plane control register value cache */
391 uint32_t dspcntr;
392 uint32_t dspcntr1;
393 uint32_t dspcntr2;
394
395 /* Moorestown MM backlight cache */
396 uint8_t saveBKLTCNT;
397 uint8_t saveBKLTREQ;
398 uint8_t saveBKLTBRTL;
399
400 /*
401 * Register state
402 */
403 uint32_t saveDSPACNTR;
404 uint32_t saveDSPBCNTR;
405 uint32_t savePIPEACONF;
406 uint32_t savePIPEBCONF;
407 uint32_t savePIPEASRC;
408 uint32_t savePIPEBSRC;
409 uint32_t saveFPA0;
410 uint32_t saveFPA1;
411 uint32_t saveDPLL_A;
412 uint32_t saveDPLL_A_MD;
413 uint32_t saveHTOTAL_A;
414 uint32_t saveHBLANK_A;
415 uint32_t saveHSYNC_A;
416 uint32_t saveVTOTAL_A;
417 uint32_t saveVBLANK_A;
418 uint32_t saveVSYNC_A;
419 uint32_t saveDSPASTRIDE;
420 uint32_t saveDSPASIZE;
421 uint32_t saveDSPAPOS;
422 uint32_t saveDSPABASE;
423 uint32_t saveDSPASURF;
424 uint32_t saveDSPASTATUS;
425 uint32_t saveFPB0;
426 uint32_t saveFPB1;
427 uint32_t saveDPLL_B;
428 uint32_t saveDPLL_B_MD;
429 uint32_t saveHTOTAL_B;
430 uint32_t saveHBLANK_B;
431 uint32_t saveHSYNC_B;
432 uint32_t saveVTOTAL_B;
433 uint32_t saveVBLANK_B;
434 uint32_t saveVSYNC_B;
435 uint32_t saveDSPBSTRIDE;
436 uint32_t saveDSPBSIZE;
437 uint32_t saveDSPBPOS;
438 uint32_t saveDSPBBASE;
439 uint32_t saveDSPBSURF;
440 uint32_t saveDSPBSTATUS;
441 uint32_t saveVCLK_DIVISOR_VGA0;
442 uint32_t saveVCLK_DIVISOR_VGA1;
443 uint32_t saveVCLK_POST_DIV;
444 uint32_t saveVGACNTRL;
445 uint32_t saveADPA;
446 uint32_t saveLVDS;
447 uint32_t saveDVOA;
448 uint32_t saveDVOB;
449 uint32_t saveDVOC;
450 uint32_t savePP_ON;
451 uint32_t savePP_OFF;
452 uint32_t savePP_CONTROL;
453 uint32_t savePP_CYCLE;
454 uint32_t savePFIT_CONTROL;
455 uint32_t savePaletteA[256];
456 uint32_t savePaletteB[256];
457 uint32_t saveBLC_PWM_CTL2;
458 uint32_t saveBLC_PWM_CTL;
459 uint32_t saveCLOCKGATING;
460 uint32_t saveDSPARB;
461 uint32_t saveDSPATILEOFF;
462 uint32_t saveDSPBTILEOFF;
463 uint32_t saveDSPAADDR;
464 uint32_t saveDSPBADDR;
465 uint32_t savePFIT_AUTO_RATIOS;
466 uint32_t savePFIT_PGM_RATIOS;
467 uint32_t savePP_ON_DELAYS;
468 uint32_t savePP_OFF_DELAYS;
469 uint32_t savePP_DIVISOR;
470 uint32_t saveBSM;
471 uint32_t saveVBT;
472 uint32_t saveBCLRPAT_A;
473 uint32_t saveBCLRPAT_B;
474 uint32_t saveDSPALINOFF;
475 uint32_t saveDSPBLINOFF;
476 uint32_t savePERF_MODE;
477 uint32_t saveDSPFW1;
478 uint32_t saveDSPFW2;
479 uint32_t saveDSPFW3;
480 uint32_t saveDSPFW4;
481 uint32_t saveDSPFW5;
482 uint32_t saveDSPFW6;
483 uint32_t saveCHICKENBIT;
484 uint32_t saveDSPACURSOR_CTRL;
485 uint32_t saveDSPBCURSOR_CTRL;
486 uint32_t saveDSPACURSOR_BASE;
487 uint32_t saveDSPBCURSOR_BASE;
488 uint32_t saveDSPACURSOR_POS;
489 uint32_t saveDSPBCURSOR_POS;
490 uint32_t save_palette_a[256];
491 uint32_t save_palette_b[256];
492 uint32_t saveOV_OVADD;
493 uint32_t saveOV_OGAMC0;
494 uint32_t saveOV_OGAMC1;
495 uint32_t saveOV_OGAMC2;
496 uint32_t saveOV_OGAMC3;
497 uint32_t saveOV_OGAMC4;
498 uint32_t saveOV_OGAMC5;
499 uint32_t saveOVC_OVADD;
500 uint32_t saveOVC_OGAMC0;
501 uint32_t saveOVC_OGAMC1;
502 uint32_t saveOVC_OGAMC2;
503 uint32_t saveOVC_OGAMC3;
504 uint32_t saveOVC_OGAMC4;
505 uint32_t saveOVC_OGAMC5;
506
507 /* MSI reg save */
508 uint32_t msi_addr;
509 uint32_t msi_data;
510
511 /* Medfield specific register save state */
512 uint32_t saveHDMIPHYMISCCTL;
513 uint32_t saveHDMIB_CONTROL;
514 uint32_t saveDSPCCNTR;
515 uint32_t savePIPECCONF;
516 uint32_t savePIPECSRC;
517 uint32_t saveHTOTAL_C;
518 uint32_t saveHBLANK_C;
519 uint32_t saveHSYNC_C;
520 uint32_t saveVTOTAL_C;
521 uint32_t saveVBLANK_C;
522 uint32_t saveVSYNC_C;
523 uint32_t saveDSPCSTRIDE;
524 uint32_t saveDSPCSIZE;
525 uint32_t saveDSPCPOS;
526 uint32_t saveDSPCSURF;
527 uint32_t saveDSPCSTATUS;
528 uint32_t saveDSPCLINOFF;
529 uint32_t saveDSPCTILEOFF;
530 uint32_t saveDSPCCURSOR_CTRL;
531 uint32_t saveDSPCCURSOR_BASE;
532 uint32_t saveDSPCCURSOR_POS;
533 uint32_t save_palette_c[256];
534 uint32_t saveOV_OVADD_C;
535 uint32_t saveOV_OGAMC0_C;
536 uint32_t saveOV_OGAMC1_C;
537 uint32_t saveOV_OGAMC2_C;
538 uint32_t saveOV_OGAMC3_C;
539 uint32_t saveOV_OGAMC4_C;
540 uint32_t saveOV_OGAMC5_C;
541
542 /* DSI register save */
543 uint32_t saveDEVICE_READY_REG;
544 uint32_t saveINTR_EN_REG;
545 uint32_t saveDSI_FUNC_PRG_REG;
546 uint32_t saveHS_TX_TIMEOUT_REG;
547 uint32_t saveLP_RX_TIMEOUT_REG;
548 uint32_t saveTURN_AROUND_TIMEOUT_REG;
549 uint32_t saveDEVICE_RESET_REG;
550 uint32_t saveDPI_RESOLUTION_REG;
551 uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
552 uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
553 uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
554 uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
555 uint32_t saveVERT_SYNC_PAD_COUNT_REG;
556 uint32_t saveVERT_BACK_PORCH_COUNT_REG;
557 uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
558 uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
559 uint32_t saveINIT_COUNT_REG;
560 uint32_t saveMAX_RET_PAK_REG;
561 uint32_t saveVIDEO_FMT_REG;
562 uint32_t saveEOT_DISABLE_REG;
563 uint32_t saveLP_BYTECLK_REG;
564 uint32_t saveHS_LS_DBI_ENABLE_REG;
565 uint32_t saveTXCLKESC_REG;
566 uint32_t saveDPHY_PARAM_REG;
567 uint32_t saveMIPI_CONTROL_REG;
568 uint32_t saveMIPI;
569 uint32_t saveMIPI_C;
570
571 /* DPST register save */
572 uint32_t saveHISTOGRAM_INT_CONTROL_REG;
573 uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
574 uint32_t savePWM_CONTROL_LOGIC;
575
576 /*
577 * DSI info.
578 */
579 void * dbi_dsr_info;
580 void * dbi_dpu_info;
581 void * dsi_configs[2];
582 /*
583 * LID-Switch
584 */
585 spinlock_t lid_lock;
586 struct timer_list lid_timer;
587 struct psb_intel_opregion opregion;
588 u32 *lid_state;
589 u32 lid_last_state;
590
591 /*
592 * Watchdog
593 */
594
595 uint32_t apm_reg;
596 uint16_t apm_base;
597
598 /*
599 * Used for modifying backlight from
600 * xrandr -- consider removing and using HAL instead
601 */
602 struct backlight_device *backlight_device;
603 struct drm_property *backlight_property;
604 uint32_t blc_adj1;
605 uint32_t blc_adj2;
606
607 void *fbdev;
608 /* DPST state */
609 uint32_t dsr_idle_count;
610 bool is_in_idle;
611 bool dsr_enable;
612 void (*exit_idle)(struct drm_device *dev, u32 update_src);
613
614 /* 2D acceleration */
615 spinlock_t lock_2d;
616
617 /* FIXME: Arrays anyone ? */
618 struct mdfld_dsi_encoder *encoder0;
619 struct mdfld_dsi_encoder *encoder2;
620 struct mdfld_dsi_dbi_output * dbi_output;
621 struct mdfld_dsi_dbi_output * dbi_output2;
622 u32 bpp;
623 u32 bpp2;
624
625 bool dispstatus;
626};
627
628
629/*
630 * Operations for each board type
631 */
632
633struct psb_ops {
634 const char *name;
635 unsigned int accel_2d:1;
636 int pipes; /* Number of output pipes */
637 int crtcs; /* Number of CRTCs */
638 int sgx_offset; /* Base offset of SGX device */
639
640 /* Sub functions */
641 struct drm_crtc_helper_funcs const *crtc_helper;
642 struct drm_crtc_funcs const *crtc_funcs;
643
644 /* Setup hooks */
645 int (*chip_setup)(struct drm_device *dev);
646 void (*chip_teardown)(struct drm_device *dev);
647
648 /* Display management hooks */
649 int (*output_init)(struct drm_device *dev);
650 /* Power management hooks */
651 void (*init_pm)(struct drm_device *dev);
652 int (*save_regs)(struct drm_device *dev);
653 int (*restore_regs)(struct drm_device *dev);
654 int (*power_up)(struct drm_device *dev);
655 int (*power_down)(struct drm_device *dev);
656
657 void (*lvds_bl_power)(struct drm_device *dev, bool on);
658#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
659 /* Backlight */
660 int (*backlight_init)(struct drm_device *dev);
661#endif
662 int i2c_bus; /* I2C bus identifier for Moorestown */
663};
664
665
666
667struct psb_mmu_driver;
668
669extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
670extern int drm_pick_crtcs(struct drm_device *dev);
671
672static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
673{
674 return (struct drm_psb_private *) dev->dev_private;
675}
676
677/*
678 * MMU stuff.
679 */
680
681extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
682 int trap_pagefaults,
683 int invalid_type,
684 struct drm_psb_private *dev_priv);
685extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
686extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
687 *driver);
688extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
689 uint32_t gtt_start, uint32_t gtt_pages);
690extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
691 int trap_pagefaults,
692 int invalid_type);
693extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
694extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
695extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
696 unsigned long address,
697 uint32_t num_pages);
698extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
699 uint32_t start_pfn,
700 unsigned long address,
701 uint32_t num_pages, int type);
702extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
703 unsigned long *pfn);
704
705/*
706 * Enable / disable MMU for different requestors.
707 */
708
709
710extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
711extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
712 unsigned long address, uint32_t num_pages,
713 uint32_t desired_tile_stride,
714 uint32_t hw_tile_stride, int type);
715extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
716 unsigned long address, uint32_t num_pages,
717 uint32_t desired_tile_stride,
718 uint32_t hw_tile_stride);
719/*
720 *psb_irq.c
721 */
722
723extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
724extern int psb_irq_enable_dpst(struct drm_device *dev);
725extern int psb_irq_disable_dpst(struct drm_device *dev);
726extern void psb_irq_preinstall(struct drm_device *dev);
727extern int psb_irq_postinstall(struct drm_device *dev);
728extern void psb_irq_uninstall(struct drm_device *dev);
729extern void psb_irq_turn_on_dpst(struct drm_device *dev);
730extern void psb_irq_turn_off_dpst(struct drm_device *dev);
731
732extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
733extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
734extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
735extern int psb_enable_vblank(struct drm_device *dev, int crtc);
736extern void psb_disable_vblank(struct drm_device *dev, int crtc);
737void
738psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
739
740void
741psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
742
743extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
744
745extern int mdfld_enable_te(struct drm_device *dev, int pipe);
746extern void mdfld_disable_te(struct drm_device *dev, int pipe);
747
748/*
749 * intel_opregion.c
750 */
751extern int gma_intel_opregion_init(struct drm_device *dev);
752extern int gma_intel_opregion_exit(struct drm_device *dev);
753
754/*
755 * framebuffer.c
756 */
757extern int psbfb_probed(struct drm_device *dev);
758extern int psbfb_remove(struct drm_device *dev,
759 struct drm_framebuffer *fb);
760/*
761 * accel_2d.c
762 */
763extern void psbfb_copyarea(struct fb_info *info,
764 const struct fb_copyarea *region);
765extern int psbfb_sync(struct fb_info *info);
766extern void psb_spank(struct drm_psb_private *dev_priv);
767extern int psb_accel_ioctl(struct drm_device *dev, void *data,
768 struct drm_file *file);
769
770/*
771 * psb_reset.c
772 */
773
774extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
775extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
776extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
777
778/* modesetting */
779extern void psb_modeset_init(struct drm_device *dev);
780extern void psb_modeset_cleanup(struct drm_device *dev);
781extern int psb_fbdev_init(struct drm_device *dev);
782
783/* backlight.c */
784int gma_backlight_init(struct drm_device *dev);
785void gma_backlight_exit(struct drm_device *dev);
786
787/* mrst_crtc.c */
788extern const struct drm_crtc_helper_funcs mrst_helper_funcs;
789
790/* mrst_lvds.c */
791extern void mrst_lvds_init(struct drm_device *dev,
792 struct psb_intel_mode_device *mode_dev);
793
794/* psb_intel_display.c */
795extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
796extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
797
798/* psb_intel_lvds.c */
799extern const struct drm_connector_helper_funcs
800 psb_intel_lvds_connector_helper_funcs;
801extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
802
803/* gem.c */
804extern int psb_gem_init_object(struct drm_gem_object *obj);
805extern void psb_gem_free_object(struct drm_gem_object *obj);
806extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
807 struct drm_file *file);
808extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
809 struct drm_mode_create_dumb *args);
810extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
811 uint32_t handle);
812extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
813 uint32_t handle, uint64_t *offset);
814extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
815extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file);
817extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
818 struct drm_file *file);
819
820/* psb_device.c */
821extern const struct psb_ops psb_chip_ops;
822
823/* mrst_device.c */
824extern const struct psb_ops mrst_chip_ops;
825
826/* mdfld_device.c */
827extern const struct psb_ops mdfld_chip_ops;
828
829/* cdv_device.c */
830extern const struct psb_ops cdv_chip_ops;
831
832/*
833 * Debug print bits setting
834 */
835#define PSB_D_GENERAL (1 << 0)
836#define PSB_D_INIT (1 << 1)
837#define PSB_D_IRQ (1 << 2)
838#define PSB_D_ENTRY (1 << 3)
839/* debug the get H/V BP/FP count */
840#define PSB_D_HV (1 << 4)
841#define PSB_D_DBI_BF (1 << 5)
842#define PSB_D_PM (1 << 6)
843#define PSB_D_RENDER (1 << 7)
844#define PSB_D_REG (1 << 8)
845#define PSB_D_MSVDX (1 << 9)
846#define PSB_D_TOPAZ (1 << 10)
847
848extern int drm_psb_no_fb;
849extern int drm_idle_check_interval;
850
851/*
852 * Utilities
853 */
854
855static inline u32 MRST_MSG_READ32(uint port, uint offset)
856{
857 int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
858 uint32_t ret_val = 0;
859 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
860 pci_write_config_dword(pci_root, 0xD0, mcr);
861 pci_read_config_dword(pci_root, 0xD4, &ret_val);
862 pci_dev_put(pci_root);
863 return ret_val;
864}
865static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
866{
867 int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
868 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
869 pci_write_config_dword(pci_root, 0xD4, value);
870 pci_write_config_dword(pci_root, 0xD0, mcr);
871 pci_dev_put(pci_root);
872}
873static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
874{
875 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
876 uint32_t ret_val = 0;
877 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
878 pci_write_config_dword(pci_root, 0xD0, mcr);
879 pci_read_config_dword(pci_root, 0xD4, &ret_val);
880 pci_dev_put(pci_root);
881 return ret_val;
882}
883static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
884{
885 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
886 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
887 pci_write_config_dword(pci_root, 0xD4, value);
888 pci_write_config_dword(pci_root, 0xD0, mcr);
889 pci_dev_put(pci_root);
890}
891
892static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
893{
894 struct drm_psb_private *dev_priv = dev->dev_private;
895 return ioread32(dev_priv->vdc_reg + reg);
896}
897
898#define REG_READ(reg) REGISTER_READ(dev, (reg))
899
900static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
901 uint32_t val)
902{
903 struct drm_psb_private *dev_priv = dev->dev_private;
904 iowrite32((val), dev_priv->vdc_reg + (reg));
905}
906
907#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
908
909static inline void REGISTER_WRITE16(struct drm_device *dev,
910 uint32_t reg, uint32_t val)
911{
912 struct drm_psb_private *dev_priv = dev->dev_private;
913 iowrite16((val), dev_priv->vdc_reg + (reg));
914}
915
916#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
917
918static inline void REGISTER_WRITE8(struct drm_device *dev,
919 uint32_t reg, uint32_t val)
920{
921 struct drm_psb_private *dev_priv = dev->dev_private;
922 iowrite8((val), dev_priv->vdc_reg + (reg));
923}
924
925#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
926
927#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
928#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
929
930/* #define TRAP_SGX_PM_FAULT 1 */
931#ifdef TRAP_SGX_PM_FAULT
932#define PSB_RSGX32(_offs) \
933({ \
934 if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
935 printk(KERN_ERR \
936 "access sgx when it's off!! (READ) %s, %d\n", \
937 __FILE__, __LINE__); \
938 melay(1000); \
939 } \
940 ioread32(dev_priv->sgx_reg + (_offs)); \
941})
942#else
943#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
944#endif
945#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
946
947#define MSVDX_REG_DUMP 0
948
949#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
950#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
951
952#endif
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
deleted file mode 100644
index 85659613ae62..000000000000
--- a/drivers/staging/gma500/psb_intel_display.c
+++ /dev/null
@@ -1,1429 +0,0 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23
24#include <drm/drmP.h>
25#include "framebuffer.h"
26#include "psb_drv.h"
27#include "psb_intel_drv.h"
28#include "psb_intel_reg.h"
29#include "psb_intel_display.h"
30#include "power.h"
31
32#include "mdfld_output.h"
33
34struct psb_intel_clock_t {
35 /* given values */
36 int n;
37 int m1, m2;
38 int p1, p2;
39 /* derived values */
40 int dot;
41 int vco;
42 int m;
43 int p;
44};
45
46struct psb_intel_range_t {
47 int min, max;
48};
49
50struct psb_intel_p2_t {
51 int dot_limit;
52 int p2_slow, p2_fast;
53};
54
55#define INTEL_P2_NUM 2
56
57struct psb_intel_limit_t {
58 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct psb_intel_p2_t p2;
60};
61
62#define I8XX_DOT_MIN 25000
63#define I8XX_DOT_MAX 350000
64#define I8XX_VCO_MIN 930000
65#define I8XX_VCO_MAX 1400000
66#define I8XX_N_MIN 3
67#define I8XX_N_MAX 16
68#define I8XX_M_MIN 96
69#define I8XX_M_MAX 140
70#define I8XX_M1_MIN 18
71#define I8XX_M1_MAX 26
72#define I8XX_M2_MIN 6
73#define I8XX_M2_MAX 16
74#define I8XX_P_MIN 4
75#define I8XX_P_MAX 128
76#define I8XX_P1_MIN 2
77#define I8XX_P1_MAX 33
78#define I8XX_P1_LVDS_MIN 1
79#define I8XX_P1_LVDS_MAX 6
80#define I8XX_P2_SLOW 4
81#define I8XX_P2_FAST 2
82#define I8XX_P2_LVDS_SLOW 14
83#define I8XX_P2_LVDS_FAST 14 /* No fast option */
84#define I8XX_P2_SLOW_LIMIT 165000
85
86#define I9XX_DOT_MIN 20000
87#define I9XX_DOT_MAX 400000
88#define I9XX_VCO_MIN 1400000
89#define I9XX_VCO_MAX 2800000
90#define I9XX_N_MIN 3
91#define I9XX_N_MAX 8
92#define I9XX_M_MIN 70
93#define I9XX_M_MAX 120
94#define I9XX_M1_MIN 10
95#define I9XX_M1_MAX 20
96#define I9XX_M2_MIN 5
97#define I9XX_M2_MAX 9
98#define I9XX_P_SDVO_DAC_MIN 5
99#define I9XX_P_SDVO_DAC_MAX 80
100#define I9XX_P_LVDS_MIN 7
101#define I9XX_P_LVDS_MAX 98
102#define I9XX_P1_MIN 1
103#define I9XX_P1_MAX 8
104#define I9XX_P2_SDVO_DAC_SLOW 10
105#define I9XX_P2_SDVO_DAC_FAST 5
106#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
107#define I9XX_P2_LVDS_SLOW 14
108#define I9XX_P2_LVDS_FAST 7
109#define I9XX_P2_LVDS_SLOW_LIMIT 112000
110
111#define INTEL_LIMIT_I8XX_DVO_DAC 0
112#define INTEL_LIMIT_I8XX_LVDS 1
113#define INTEL_LIMIT_I9XX_SDVO_DAC 2
114#define INTEL_LIMIT_I9XX_LVDS 3
115
116static const struct psb_intel_limit_t psb_intel_limits[] = {
117 { /* INTEL_LIMIT_I8XX_DVO_DAC */
118 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
119 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
120 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
121 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
122 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
123 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
124 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
125 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
126 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
127 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
128 },
129 { /* INTEL_LIMIT_I8XX_LVDS */
130 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
131 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
132 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
133 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
134 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
135 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
136 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
137 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
138 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
139 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
140 },
141 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
142 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
143 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
144 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
145 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
146 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
147 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
148 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
149 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
150 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
151 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
152 I9XX_P2_SDVO_DAC_FAST},
153 },
154 { /* INTEL_LIMIT_I9XX_LVDS */
155 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
156 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
157 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
158 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
159 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
160 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
161 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
162 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
163 /* The single-channel range is 25-112Mhz, and dual-channel
164 * is 80-224Mhz. Prefer single channel as much as possible.
165 */
166 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
167 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
168 },
169};
170
171static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
172{
173 const struct psb_intel_limit_t *limit;
174
175 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
176 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
177 else
178 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
179 return limit;
180}
181
182/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
183
184static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
185{
186 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
187 clock->p = clock->p1 * clock->p2;
188 clock->vco = refclk * clock->m / (clock->n + 2);
189 clock->dot = clock->vco / clock->p;
190}
191
192/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
193
194static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
195{
196 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
197 clock->p = clock->p1 * clock->p2;
198 clock->vco = refclk * clock->m / (clock->n + 2);
199 clock->dot = clock->vco / clock->p;
200}
201
202static void psb_intel_clock(struct drm_device *dev, int refclk,
203 struct psb_intel_clock_t *clock)
204{
205 return i9xx_clock(refclk, clock);
206}
207
208/**
209 * Returns whether any output on the specified pipe is of the specified type
210 */
211bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
212{
213 struct drm_device *dev = crtc->dev;
214 struct drm_mode_config *mode_config = &dev->mode_config;
215 struct drm_connector *l_entry;
216
217 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
218 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
219 struct psb_intel_output *psb_intel_output =
220 to_psb_intel_output(l_entry);
221 if (psb_intel_output->type == type)
222 return true;
223 }
224 }
225 return false;
226}
227
228#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
229/**
230 * Returns whether the given set of divisors are valid for a given refclk with
231 * the given connectors.
232 */
233
234static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
235 struct psb_intel_clock_t *clock)
236{
237 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
238
239 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
240 INTELPllInvalid("p1 out of range\n");
241 if (clock->p < limit->p.min || limit->p.max < clock->p)
242 INTELPllInvalid("p out of range\n");
243 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
244 INTELPllInvalid("m2 out of range\n");
245 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
246 INTELPllInvalid("m1 out of range\n");
247 if (clock->m1 <= clock->m2)
248 INTELPllInvalid("m1 <= m2\n");
249 if (clock->m < limit->m.min || limit->m.max < clock->m)
250 INTELPllInvalid("m out of range\n");
251 if (clock->n < limit->n.min || limit->n.max < clock->n)
252 INTELPllInvalid("n out of range\n");
253 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
254 INTELPllInvalid("vco out of range\n");
255 /* XXX: We may need to be checking "Dot clock"
256 * depending on the multiplier, connector, etc.,
257 * rather than just a single range.
258 */
259 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
260 INTELPllInvalid("dot out of range\n");
261
262 return true;
263}
264
265/**
266 * Returns a set of divisors for the desired target clock with the given
267 * refclk, or FALSE. The returned values represent the clock equation:
268 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
269 */
270static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
271 int refclk,
272 struct psb_intel_clock_t *best_clock)
273{
274 struct drm_device *dev = crtc->dev;
275 struct psb_intel_clock_t clock;
276 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
277 int err = target;
278
279 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
280 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
281 /*
282 * For LVDS, if the panel is on, just rely on its current
283 * settings for dual-channel. We haven't figured out how to
284 * reliably set up different single/dual channel state, if we
285 * even can.
286 */
287 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
288 LVDS_CLKB_POWER_UP)
289 clock.p2 = limit->p2.p2_fast;
290 else
291 clock.p2 = limit->p2.p2_slow;
292 } else {
293 if (target < limit->p2.dot_limit)
294 clock.p2 = limit->p2.p2_slow;
295 else
296 clock.p2 = limit->p2.p2_fast;
297 }
298
299 memset(best_clock, 0, sizeof(*best_clock));
300
301 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
302 clock.m1++) {
303 for (clock.m2 = limit->m2.min;
304 clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
305 clock.m2++) {
306 for (clock.n = limit->n.min;
307 clock.n <= limit->n.max; clock.n++) {
308 for (clock.p1 = limit->p1.min;
309 clock.p1 <= limit->p1.max;
310 clock.p1++) {
311 int this_err;
312
313 psb_intel_clock(dev, refclk, &clock);
314
315 if (!psb_intel_PLL_is_valid
316 (crtc, &clock))
317 continue;
318
319 this_err = abs(clock.dot - target);
320 if (this_err < err) {
321 *best_clock = clock;
322 err = this_err;
323 }
324 }
325 }
326 }
327 }
328
329 return err != target;
330}
331
332void psb_intel_wait_for_vblank(struct drm_device *dev)
333{
334 /* Wait for 20ms, i.e. one cycle at 50hz. */
335 mdelay(20);
336}
337
338int psb_intel_pipe_set_base(struct drm_crtc *crtc,
339 int x, int y, struct drm_framebuffer *old_fb)
340{
341 struct drm_device *dev = crtc->dev;
342 /* struct drm_i915_master_private *master_priv; */
343 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
344 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
345 int pipe = psb_intel_crtc->pipe;
346 unsigned long start, offset;
347 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
348 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
349 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
350 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
351 u32 dspcntr;
352 int ret = 0;
353
354 if (!gma_power_begin(dev, true))
355 return 0;
356
357 /* no fb bound */
358 if (!crtc->fb) {
359 dev_dbg(dev->dev, "No FB bound\n");
360 goto psb_intel_pipe_cleaner;
361 }
362
363 /* We are displaying this buffer, make sure it is actually loaded
364 into the GTT */
365 ret = psb_gtt_pin(psbfb->gtt);
366 if (ret < 0)
367 goto psb_intel_pipe_set_base_exit;
368 start = psbfb->gtt->offset;
369
370 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
371
372 REG_WRITE(dspstride, crtc->fb->pitches[0]);
373
374 dspcntr = REG_READ(dspcntr_reg);
375 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
376
377 switch (crtc->fb->bits_per_pixel) {
378 case 8:
379 dspcntr |= DISPPLANE_8BPP;
380 break;
381 case 16:
382 if (crtc->fb->depth == 15)
383 dspcntr |= DISPPLANE_15_16BPP;
384 else
385 dspcntr |= DISPPLANE_16BPP;
386 break;
387 case 24:
388 case 32:
389 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
390 break;
391 default:
392 dev_err(dev->dev, "Unknown color depth\n");
393 ret = -EINVAL;
394 psb_gtt_unpin(psbfb->gtt);
395 goto psb_intel_pipe_set_base_exit;
396 }
397 REG_WRITE(dspcntr_reg, dspcntr);
398
399
400 if (0 /* FIXMEAC - check what PSB needs */) {
401 REG_WRITE(dspbase, offset);
402 REG_READ(dspbase);
403 REG_WRITE(dspsurf, start);
404 REG_READ(dspsurf);
405 } else {
406 REG_WRITE(dspbase, start + offset);
407 REG_READ(dspbase);
408 }
409
410psb_intel_pipe_cleaner:
411 /* If there was a previous display we can now unpin it */
412 if (old_fb)
413 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
414
415psb_intel_pipe_set_base_exit:
416 gma_power_end(dev);
417 return ret;
418}
419
420/**
421 * Sets the power management mode of the pipe and plane.
422 *
423 * This code should probably grow support for turning the cursor off and back
424 * on appropriately at the same time as we're turning the pipe off/on.
425 */
426static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
427{
428 struct drm_device *dev = crtc->dev;
429 /* struct drm_i915_master_private *master_priv; */
430 /* struct drm_i915_private *dev_priv = dev->dev_private; */
431 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
432 int pipe = psb_intel_crtc->pipe;
433 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
434 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
435 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
436 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
437 u32 temp;
438 bool enabled;
439
440 /* XXX: When our outputs are all unaware of DPMS modes other than off
441 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
442 */
443 switch (mode) {
444 case DRM_MODE_DPMS_ON:
445 case DRM_MODE_DPMS_STANDBY:
446 case DRM_MODE_DPMS_SUSPEND:
447 /* Enable the DPLL */
448 temp = REG_READ(dpll_reg);
449 if ((temp & DPLL_VCO_ENABLE) == 0) {
450 REG_WRITE(dpll_reg, temp);
451 REG_READ(dpll_reg);
452 /* Wait for the clocks to stabilize. */
453 udelay(150);
454 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
455 REG_READ(dpll_reg);
456 /* Wait for the clocks to stabilize. */
457 udelay(150);
458 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
459 REG_READ(dpll_reg);
460 /* Wait for the clocks to stabilize. */
461 udelay(150);
462 }
463
464 /* Enable the pipe */
465 temp = REG_READ(pipeconf_reg);
466 if ((temp & PIPEACONF_ENABLE) == 0)
467 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
468
469 /* Enable the plane */
470 temp = REG_READ(dspcntr_reg);
471 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
472 REG_WRITE(dspcntr_reg,
473 temp | DISPLAY_PLANE_ENABLE);
474 /* Flush the plane changes */
475 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
476 }
477
478 psb_intel_crtc_load_lut(crtc);
479
480 /* Give the overlay scaler a chance to enable
481 * if it's on this pipe */
482 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
483 break;
484 case DRM_MODE_DPMS_OFF:
485 /* Give the overlay scaler a chance to disable
486 * if it's on this pipe */
487 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
488
489 /* Disable the VGA plane that we never use */
490 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
491
492 /* Disable display plane */
493 temp = REG_READ(dspcntr_reg);
494 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
495 REG_WRITE(dspcntr_reg,
496 temp & ~DISPLAY_PLANE_ENABLE);
497 /* Flush the plane changes */
498 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
499 REG_READ(dspbase_reg);
500 }
501
502 /* Next, disable display pipes */
503 temp = REG_READ(pipeconf_reg);
504 if ((temp & PIPEACONF_ENABLE) != 0) {
505 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
506 REG_READ(pipeconf_reg);
507 }
508
509 /* Wait for vblank for the disable to take effect. */
510 psb_intel_wait_for_vblank(dev);
511
512 temp = REG_READ(dpll_reg);
513 if ((temp & DPLL_VCO_ENABLE) != 0) {
514 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
515 REG_READ(dpll_reg);
516 }
517
518 /* Wait for the clocks to turn off. */
519 udelay(150);
520 break;
521 }
522
523 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
524
525 /*Set FIFO Watermarks*/
526 REG_WRITE(DSPARB, 0x3F3E);
527}
528
529static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
530{
531 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
532 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
533}
534
535static void psb_intel_crtc_commit(struct drm_crtc *crtc)
536{
537 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
538 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
539}
540
541void psb_intel_encoder_prepare(struct drm_encoder *encoder)
542{
543 struct drm_encoder_helper_funcs *encoder_funcs =
544 encoder->helper_private;
545 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
546 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
547}
548
549void psb_intel_encoder_commit(struct drm_encoder *encoder)
550{
551 struct drm_encoder_helper_funcs *encoder_funcs =
552 encoder->helper_private;
553 /* lvds has its own version of commit see psb_intel_lvds_commit */
554 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
555}
556
557static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
558 struct drm_display_mode *mode,
559 struct drm_display_mode *adjusted_mode)
560{
561 return true;
562}
563
564
565/**
566 * Return the pipe currently connected to the panel fitter,
567 * or -1 if the panel fitter is not present or not in use
568 */
569static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
570{
571 u32 pfit_control;
572
573 pfit_control = REG_READ(PFIT_CONTROL);
574
575 /* See if the panel fitter is in use */
576 if ((pfit_control & PFIT_ENABLE) == 0)
577 return -1;
578 /* Must be on PIPE 1 for PSB */
579 return 1;
580}
581
582static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
583 struct drm_display_mode *mode,
584 struct drm_display_mode *adjusted_mode,
585 int x, int y,
586 struct drm_framebuffer *old_fb)
587{
588 struct drm_device *dev = crtc->dev;
589 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
590 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
591 int pipe = psb_intel_crtc->pipe;
592 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
593 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
594 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
595 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
596 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
597 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
598 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
599 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
600 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
601 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
602 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
603 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
604 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
605 int refclk;
606 struct psb_intel_clock_t clock;
607 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
608 bool ok, is_sdvo = false, is_dvo = false;
609 bool is_crt = false, is_lvds = false, is_tv = false;
610 struct drm_mode_config *mode_config = &dev->mode_config;
611 struct drm_connector *connector;
612
613 /* No scan out no play */
614 if (crtc->fb == NULL) {
615 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
616 return 0;
617 }
618
619 list_for_each_entry(connector, &mode_config->connector_list, head) {
620 struct psb_intel_output *psb_intel_output =
621 to_psb_intel_output(connector);
622
623 if (!connector->encoder
624 || connector->encoder->crtc != crtc)
625 continue;
626
627 switch (psb_intel_output->type) {
628 case INTEL_OUTPUT_LVDS:
629 is_lvds = true;
630 break;
631 case INTEL_OUTPUT_SDVO:
632 is_sdvo = true;
633 break;
634 case INTEL_OUTPUT_DVO:
635 is_dvo = true;
636 break;
637 case INTEL_OUTPUT_TVOUT:
638 is_tv = true;
639 break;
640 case INTEL_OUTPUT_ANALOG:
641 is_crt = true;
642 break;
643 }
644 }
645
646 refclk = 96000;
647
648 ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
649 &clock);
650 if (!ok) {
651 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
652 return 0;
653 }
654
655 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
656
657 dpll = DPLL_VGA_MODE_DIS;
658 if (is_lvds) {
659 dpll |= DPLLB_MODE_LVDS;
660 dpll |= DPLL_DVO_HIGH_SPEED;
661 } else
662 dpll |= DPLLB_MODE_DAC_SERIAL;
663 if (is_sdvo) {
664 int sdvo_pixel_multiply =
665 adjusted_mode->clock / mode->clock;
666 dpll |= DPLL_DVO_HIGH_SPEED;
667 dpll |=
668 (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
669 }
670
671 /* compute bitmask from p1 value */
672 dpll |= (1 << (clock.p1 - 1)) << 16;
673 switch (clock.p2) {
674 case 5:
675 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
676 break;
677 case 7:
678 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
679 break;
680 case 10:
681 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
682 break;
683 case 14:
684 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
685 break;
686 }
687
688 if (is_tv) {
689 /* XXX: just matching BIOS for now */
690/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
691 dpll |= 3;
692 }
693 dpll |= PLL_REF_INPUT_DREFCLK;
694
695 /* setup pipeconf */
696 pipeconf = REG_READ(pipeconf_reg);
697
698 /* Set up the display plane register */
699 dspcntr = DISPPLANE_GAMMA_ENABLE;
700
701 if (pipe == 0)
702 dspcntr |= DISPPLANE_SEL_PIPE_A;
703 else
704 dspcntr |= DISPPLANE_SEL_PIPE_B;
705
706 dspcntr |= DISPLAY_PLANE_ENABLE;
707 pipeconf |= PIPEACONF_ENABLE;
708 dpll |= DPLL_VCO_ENABLE;
709
710
711 /* Disable the panel fitter if it was on our pipe */
712 if (psb_intel_panel_fitter_pipe(dev) == pipe)
713 REG_WRITE(PFIT_CONTROL, 0);
714
715 drm_mode_debug_printmodeline(mode);
716
717 if (dpll & DPLL_VCO_ENABLE) {
718 REG_WRITE(fp_reg, fp);
719 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
720 REG_READ(dpll_reg);
721 udelay(150);
722 }
723
724 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
725 * This is an exception to the general rule that mode_set doesn't turn
726 * things on.
727 */
728 if (is_lvds) {
729 u32 lvds = REG_READ(LVDS);
730
731 lvds &= ~LVDS_PIPEB_SELECT;
732 if (pipe == 1)
733 lvds |= LVDS_PIPEB_SELECT;
734
735 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
736 /* Set the B0-B3 data pairs corresponding to
737 * whether we're going to
738 * set the DPLLs for dual-channel mode or not.
739 */
740 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
741 if (clock.p2 == 7)
742 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
743
744 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
745 * appropriately here, but we need to look more
746 * thoroughly into how panels behave in the two modes.
747 */
748
749 REG_WRITE(LVDS, lvds);
750 REG_READ(LVDS);
751 }
752
753 REG_WRITE(fp_reg, fp);
754 REG_WRITE(dpll_reg, dpll);
755 REG_READ(dpll_reg);
756 /* Wait for the clocks to stabilize. */
757 udelay(150);
758
759 /* write it again -- the BIOS does, after all */
760 REG_WRITE(dpll_reg, dpll);
761
762 REG_READ(dpll_reg);
763 /* Wait for the clocks to stabilize. */
764 udelay(150);
765
766 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
767 ((adjusted_mode->crtc_htotal - 1) << 16));
768 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
769 ((adjusted_mode->crtc_hblank_end - 1) << 16));
770 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
771 ((adjusted_mode->crtc_hsync_end - 1) << 16));
772 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
773 ((adjusted_mode->crtc_vtotal - 1) << 16));
774 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
775 ((adjusted_mode->crtc_vblank_end - 1) << 16));
776 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
777 ((adjusted_mode->crtc_vsync_end - 1) << 16));
778 /* pipesrc and dspsize control the size that is scaled from,
779 * which should always be the user's requested size.
780 */
781 REG_WRITE(dspsize_reg,
782 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
783 REG_WRITE(dsppos_reg, 0);
784 REG_WRITE(pipesrc_reg,
785 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
786 REG_WRITE(pipeconf_reg, pipeconf);
787 REG_READ(pipeconf_reg);
788
789 psb_intel_wait_for_vblank(dev);
790
791 REG_WRITE(dspcntr_reg, dspcntr);
792
793 /* Flush the plane changes */
794 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
795
796 psb_intel_wait_for_vblank(dev);
797
798 return 0;
799}
800
801/** Loads the palette/gamma unit for the CRTC with the prepared values */
802void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
803{
804 struct drm_device *dev = crtc->dev;
805 struct drm_psb_private *dev_priv =
806 (struct drm_psb_private *)dev->dev_private;
807 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
808 int palreg = PALETTE_A;
809 int i;
810
811 /* The clocks have to be on to load the palette. */
812 if (!crtc->enabled)
813 return;
814
815 switch (psb_intel_crtc->pipe) {
816 case 0:
817 break;
818 case 1:
819 palreg = PALETTE_B;
820 break;
821 case 2:
822 palreg = PALETTE_C;
823 break;
824 default:
825 dev_err(dev->dev, "Illegal Pipe Number.\n");
826 return;
827 }
828
829 if (gma_power_begin(dev, false)) {
830 for (i = 0; i < 256; i++) {
831 REG_WRITE(palreg + 4 * i,
832 ((psb_intel_crtc->lut_r[i] +
833 psb_intel_crtc->lut_adj[i]) << 16) |
834 ((psb_intel_crtc->lut_g[i] +
835 psb_intel_crtc->lut_adj[i]) << 8) |
836 (psb_intel_crtc->lut_b[i] +
837 psb_intel_crtc->lut_adj[i]));
838 }
839 gma_power_end(dev);
840 } else {
841 for (i = 0; i < 256; i++) {
842 dev_priv->save_palette_a[i] =
843 ((psb_intel_crtc->lut_r[i] +
844 psb_intel_crtc->lut_adj[i]) << 16) |
845 ((psb_intel_crtc->lut_g[i] +
846 psb_intel_crtc->lut_adj[i]) << 8) |
847 (psb_intel_crtc->lut_b[i] +
848 psb_intel_crtc->lut_adj[i]);
849 }
850
851 }
852}
853
854/**
855 * Save HW states of giving crtc
856 */
857static void psb_intel_crtc_save(struct drm_crtc *crtc)
858{
859 struct drm_device *dev = crtc->dev;
860 /* struct drm_psb_private *dev_priv =
861 (struct drm_psb_private *)dev->dev_private; */
862 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
863 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
864 int pipeA = (psb_intel_crtc->pipe == 0);
865 uint32_t paletteReg;
866 int i;
867
868 if (!crtc_state) {
869 dev_err(dev->dev, "No CRTC state found\n");
870 return;
871 }
872
873 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
874 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
875 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
876 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
877 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
878 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
879 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
880 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
881 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
882 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
883 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
884 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
885 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
886
887 /*NOTE: DSPSIZE DSPPOS only for psb*/
888 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
889 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
890
891 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
892
893 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
894 for (i = 0; i < 256; ++i)
895 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
896}
897
898/**
899 * Restore HW states of giving crtc
900 */
901static void psb_intel_crtc_restore(struct drm_crtc *crtc)
902{
903 struct drm_device *dev = crtc->dev;
904 /* struct drm_psb_private * dev_priv =
905 (struct drm_psb_private *)dev->dev_private; */
906 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
907 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
908 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
909 int pipeA = (psb_intel_crtc->pipe == 0);
910 uint32_t paletteReg;
911 int i;
912
913 if (!crtc_state) {
914 dev_err(dev->dev, "No crtc state\n");
915 return;
916 }
917
918 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
919 REG_WRITE(pipeA ? DPLL_A : DPLL_B,
920 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
921 REG_READ(pipeA ? DPLL_A : DPLL_B);
922 udelay(150);
923 }
924
925 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
926 REG_READ(pipeA ? FPA0 : FPB0);
927
928 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
929 REG_READ(pipeA ? FPA1 : FPB1);
930
931 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
932 REG_READ(pipeA ? DPLL_A : DPLL_B);
933 udelay(150);
934
935 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
936 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
937 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
938 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
939 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
940 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
941 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
942
943 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
944 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
945
946 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
947 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
948 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
949
950 psb_intel_wait_for_vblank(dev);
951
952 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
953 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
954
955 psb_intel_wait_for_vblank(dev);
956
957 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
958 for (i = 0; i < 256; ++i)
959 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
960}
961
962static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
963 struct drm_file *file_priv,
964 uint32_t handle,
965 uint32_t width, uint32_t height)
966{
967 struct drm_device *dev = crtc->dev;
968 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
969 int pipe = psb_intel_crtc->pipe;
970 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
971 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
972 uint32_t temp;
973 size_t addr = 0;
974 struct gtt_range *gt;
975 struct drm_gem_object *obj;
976 int ret;
977
978 /* if we want to turn of the cursor ignore width and height */
979 if (!handle) {
980 /* turn off the cursor */
981 temp = CURSOR_MODE_DISABLE;
982
983 if (gma_power_begin(dev, false)) {
984 REG_WRITE(control, temp);
985 REG_WRITE(base, 0);
986 gma_power_end(dev);
987 }
988
989 /* Unpin the old GEM object */
990 if (psb_intel_crtc->cursor_obj) {
991 gt = container_of(psb_intel_crtc->cursor_obj,
992 struct gtt_range, gem);
993 psb_gtt_unpin(gt);
994 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
995 psb_intel_crtc->cursor_obj = NULL;
996 }
997
998 return 0;
999 }
1000
1001 /* Currently we only support 64x64 cursors */
1002 if (width != 64 || height != 64) {
1003 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1004 return -EINVAL;
1005 }
1006
1007 obj = drm_gem_object_lookup(dev, file_priv, handle);
1008 if (!obj)
1009 return -ENOENT;
1010
1011 if (obj->size < width * height * 4) {
1012 dev_dbg(dev->dev, "buffer is to small\n");
1013 return -ENOMEM;
1014 }
1015
1016 gt = container_of(obj, struct gtt_range, gem);
1017
1018 /* Pin the memory into the GTT */
1019 ret = psb_gtt_pin(gt);
1020 if (ret) {
1021 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1022 return ret;
1023 }
1024
1025
1026 addr = gt->offset; /* Or resource.start ??? */
1027
1028 psb_intel_crtc->cursor_addr = addr;
1029
1030 temp = 0;
1031 /* set the pipe for the cursor */
1032 temp |= (pipe << 28);
1033 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1034
1035 if (gma_power_begin(dev, false)) {
1036 REG_WRITE(control, temp);
1037 REG_WRITE(base, addr);
1038 gma_power_end(dev);
1039 }
1040
1041 /* unpin the old bo */
1042 if (psb_intel_crtc->cursor_obj) {
1043 gt = container_of(psb_intel_crtc->cursor_obj,
1044 struct gtt_range, gem);
1045 psb_gtt_unpin(gt);
1046 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1047 psb_intel_crtc->cursor_obj = obj;
1048 }
1049 return 0;
1050}
1051
1052static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1053{
1054 struct drm_device *dev = crtc->dev;
1055 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1056 int pipe = psb_intel_crtc->pipe;
1057 uint32_t temp = 0;
1058 uint32_t addr;
1059
1060
1061 if (x < 0) {
1062 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1063 x = -x;
1064 }
1065 if (y < 0) {
1066 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1067 y = -y;
1068 }
1069
1070 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1071 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1072
1073 addr = psb_intel_crtc->cursor_addr;
1074
1075 if (gma_power_begin(dev, false)) {
1076 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1077 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
1078 gma_power_end(dev);
1079 }
1080 return 0;
1081}
1082
1083void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1084 u16 *green, u16 *blue, uint32_t type, uint32_t size)
1085{
1086 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1087 int i;
1088
1089 if (size != 256)
1090 return;
1091
1092 for (i = 0; i < 256; i++) {
1093 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1094 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1095 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1096 }
1097
1098 psb_intel_crtc_load_lut(crtc);
1099}
1100
1101static int psb_crtc_set_config(struct drm_mode_set *set)
1102{
1103 int ret;
1104 struct drm_device *dev = set->crtc->dev;
1105
1106 pm_runtime_forbid(&dev->pdev->dev);
1107 ret = drm_crtc_helper_set_config(set);
1108 pm_runtime_allow(&dev->pdev->dev);
1109 return ret;
1110}
1111
1112/* Returns the clock of the currently programmed mode of the given pipe. */
1113static int psb_intel_crtc_clock_get(struct drm_device *dev,
1114 struct drm_crtc *crtc)
1115{
1116 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1117 int pipe = psb_intel_crtc->pipe;
1118 u32 dpll;
1119 u32 fp;
1120 struct psb_intel_clock_t clock;
1121 bool is_lvds;
1122 struct drm_psb_private *dev_priv = dev->dev_private;
1123
1124 if (gma_power_begin(dev, false)) {
1125 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
1126 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1127 fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
1128 else
1129 fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
1130 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1131 gma_power_end(dev);
1132 } else {
1133 dpll = (pipe == 0) ?
1134 dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
1135
1136 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1137 fp = (pipe == 0) ?
1138 dev_priv->saveFPA0 :
1139 dev_priv->saveFPB0;
1140 else
1141 fp = (pipe == 0) ?
1142 dev_priv->saveFPA1 :
1143 dev_priv->saveFPB1;
1144
1145 is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
1146 }
1147
1148 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1149 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1150 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1151
1152 if (is_lvds) {
1153 clock.p1 =
1154 ffs((dpll &
1155 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
1156 DPLL_FPA01_P1_POST_DIV_SHIFT);
1157 clock.p2 = 14;
1158
1159 if ((dpll & PLL_REF_INPUT_MASK) ==
1160 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1161 /* XXX: might not be 66MHz */
1162 i8xx_clock(66000, &clock);
1163 } else
1164 i8xx_clock(48000, &clock);
1165 } else {
1166 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1167 clock.p1 = 2;
1168 else {
1169 clock.p1 =
1170 ((dpll &
1171 DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
1172 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
1173 }
1174 if (dpll & PLL_P2_DIVIDE_BY_4)
1175 clock.p2 = 4;
1176 else
1177 clock.p2 = 2;
1178
1179 i8xx_clock(48000, &clock);
1180 }
1181
1182 /* XXX: It would be nice to validate the clocks, but we can't reuse
1183 * i830PllIsValid() because it relies on the xf86_config connector
1184 * configuration being accurate, which it isn't necessarily.
1185 */
1186
1187 return clock.dot;
1188}
1189
1190/** Returns the currently programmed mode of the given pipe. */
1191struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1192 struct drm_crtc *crtc)
1193{
1194 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1195 int pipe = psb_intel_crtc->pipe;
1196 struct drm_display_mode *mode;
1197 int htot;
1198 int hsync;
1199 int vtot;
1200 int vsync;
1201 struct drm_psb_private *dev_priv = dev->dev_private;
1202
1203 if (gma_power_begin(dev, false)) {
1204 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
1205 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
1206 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
1207 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
1208 gma_power_end(dev);
1209 } else {
1210 htot = (pipe == 0) ?
1211 dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
1212 hsync = (pipe == 0) ?
1213 dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
1214 vtot = (pipe == 0) ?
1215 dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
1216 vsync = (pipe == 0) ?
1217 dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
1218 }
1219
1220 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
1221 if (!mode)
1222 return NULL;
1223
1224 mode->clock = psb_intel_crtc_clock_get(dev, crtc);
1225 mode->hdisplay = (htot & 0xffff) + 1;
1226 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
1227 mode->hsync_start = (hsync & 0xffff) + 1;
1228 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
1229 mode->vdisplay = (vtot & 0xffff) + 1;
1230 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
1231 mode->vsync_start = (vsync & 0xffff) + 1;
1232 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
1233
1234 drm_mode_set_name(mode);
1235 drm_mode_set_crtcinfo(mode, 0);
1236
1237 return mode;
1238}
1239
1240void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1241{
1242 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1243 struct gtt_range *gt;
1244
1245 /* Unpin the old GEM object */
1246 if (psb_intel_crtc->cursor_obj) {
1247 gt = container_of(psb_intel_crtc->cursor_obj,
1248 struct gtt_range, gem);
1249 psb_gtt_unpin(gt);
1250 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1251 psb_intel_crtc->cursor_obj = NULL;
1252 }
1253 kfree(psb_intel_crtc->crtc_state);
1254 drm_crtc_cleanup(crtc);
1255 kfree(psb_intel_crtc);
1256}
1257
1258const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1259 .dpms = psb_intel_crtc_dpms,
1260 .mode_fixup = psb_intel_crtc_mode_fixup,
1261 .mode_set = psb_intel_crtc_mode_set,
1262 .mode_set_base = psb_intel_pipe_set_base,
1263 .prepare = psb_intel_crtc_prepare,
1264 .commit = psb_intel_crtc_commit,
1265};
1266
1267const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1268 .save = psb_intel_crtc_save,
1269 .restore = psb_intel_crtc_restore,
1270 .cursor_set = psb_intel_crtc_cursor_set,
1271 .cursor_move = psb_intel_crtc_cursor_move,
1272 .gamma_set = psb_intel_crtc_gamma_set,
1273 .set_config = psb_crtc_set_config,
1274 .destroy = psb_intel_crtc_destroy,
1275};
1276
1277/*
1278 * Set the default value of cursor control and base register
1279 * to zero. This is a workaround for h/w defect on Oaktrail
1280 */
1281static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
1282{
1283 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
1284 u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
1285
1286 REG_WRITE(control[pipe], 0);
1287 REG_WRITE(base[pipe], 0);
1288}
1289
1290void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1291 struct psb_intel_mode_device *mode_dev)
1292{
1293 struct drm_psb_private *dev_priv = dev->dev_private;
1294 struct psb_intel_crtc *psb_intel_crtc;
1295 int i;
1296 uint16_t *r_base, *g_base, *b_base;
1297
1298 /* We allocate a extra array of drm_connector pointers
1299 * for fbdev after the crtc */
1300 psb_intel_crtc =
1301 kzalloc(sizeof(struct psb_intel_crtc) +
1302 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
1303 GFP_KERNEL);
1304 if (psb_intel_crtc == NULL)
1305 return;
1306
1307 psb_intel_crtc->crtc_state =
1308 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
1309 if (!psb_intel_crtc->crtc_state) {
1310 dev_err(dev->dev, "Crtc state error: No memory\n");
1311 kfree(psb_intel_crtc);
1312 return;
1313 }
1314
1315 /* Set the CRTC operations from the chip specific data */
1316 drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
1317
1318 drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
1319 psb_intel_crtc->pipe = pipe;
1320 psb_intel_crtc->plane = pipe;
1321
1322 r_base = psb_intel_crtc->base.gamma_store;
1323 g_base = r_base + 256;
1324 b_base = g_base + 256;
1325 for (i = 0; i < 256; i++) {
1326 psb_intel_crtc->lut_r[i] = i;
1327 psb_intel_crtc->lut_g[i] = i;
1328 psb_intel_crtc->lut_b[i] = i;
1329 r_base[i] = i << 8;
1330 g_base[i] = i << 8;
1331 b_base[i] = i << 8;
1332
1333 psb_intel_crtc->lut_adj[i] = 0;
1334 }
1335
1336 psb_intel_crtc->mode_dev = mode_dev;
1337 psb_intel_crtc->cursor_addr = 0;
1338
1339 drm_crtc_helper_add(&psb_intel_crtc->base,
1340 dev_priv->ops->crtc_helper);
1341
1342 /* Setup the array of drm_connector pointer array */
1343 psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
1344 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
1345 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
1346 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
1347 &psb_intel_crtc->base;
1348 dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
1349 &psb_intel_crtc->base;
1350 psb_intel_crtc->mode_set.connectors =
1351 (struct drm_connector **) (psb_intel_crtc + 1);
1352 psb_intel_crtc->mode_set.num_connectors = 0;
1353 psb_intel_cursor_init(dev, pipe);
1354}
1355
1356int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1357 struct drm_file *file_priv)
1358{
1359 struct drm_psb_private *dev_priv = dev->dev_private;
1360 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
1361 struct drm_mode_object *drmmode_obj;
1362 struct psb_intel_crtc *crtc;
1363
1364 if (!dev_priv) {
1365 dev_err(dev->dev, "called with no initialization\n");
1366 return -EINVAL;
1367 }
1368
1369 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
1370 DRM_MODE_OBJECT_CRTC);
1371
1372 if (!drmmode_obj) {
1373 dev_err(dev->dev, "no such CRTC id\n");
1374 return -EINVAL;
1375 }
1376
1377 crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
1378 pipe_from_crtc_id->pipe = crtc->pipe;
1379
1380 return 0;
1381}
1382
1383struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1384{
1385 struct drm_crtc *crtc = NULL;
1386
1387 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1388 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1389 if (psb_intel_crtc->pipe == pipe)
1390 break;
1391 }
1392 return crtc;
1393}
1394
1395int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
1396{
1397 int index_mask = 0;
1398 struct drm_connector *connector;
1399 int entry = 0;
1400
1401 list_for_each_entry(connector, &dev->mode_config.connector_list,
1402 head) {
1403 struct psb_intel_output *psb_intel_output =
1404 to_psb_intel_output(connector);
1405 if (type_mask & (1 << psb_intel_output->type))
1406 index_mask |= (1 << entry);
1407 entry++;
1408 }
1409 return index_mask;
1410}
1411
1412
1413void psb_intel_modeset_cleanup(struct drm_device *dev)
1414{
1415 drm_mode_config_cleanup(dev);
1416}
1417
1418
1419/* current intel driver doesn't take advantage of encoders
1420 always give back the encoder for the connector
1421*/
1422struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
1423{
1424 struct psb_intel_output *psb_intel_output =
1425 to_psb_intel_output(connector);
1426
1427 return &psb_intel_output->enc;
1428}
1429
diff --git a/drivers/staging/gma500/psb_intel_display.h b/drivers/staging/gma500/psb_intel_display.h
deleted file mode 100644
index 535b49a5e409..000000000000
--- a/drivers/staging/gma500/psb_intel_display.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* copyright (c) 2008, Intel Corporation
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License,
5 * version 2, as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 * more details.
11 *
12 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Authors:
17 * Eric Anholt <eric@anholt.net>
18 */
19
20#ifndef _INTEL_DISPLAY_H_
21#define _INTEL_DISPLAY_H_
22
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
24void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
25 u16 *green, u16 *blue, uint32_t type, uint32_t size);
26void psb_intel_crtc_destroy(struct drm_crtc *crtc);
27
28#endif
diff --git a/drivers/staging/gma500/psb_intel_drv.h b/drivers/staging/gma500/psb_intel_drv.h
deleted file mode 100644
index 36b554b5c335..000000000000
--- a/drivers/staging/gma500/psb_intel_drv.h
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * Copyright (c) 2009-2011, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18
19#ifndef __INTEL_DRV_H__
20#define __INTEL_DRV_H__
21
22#include <linux/i2c.h>
23#include <linux/i2c-algo-bit.h>
24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h>
26#include <linux/gpio.h>
27
28/*
29 * Display related stuff
30 */
31
32/* store information about an Ixxx DVO */
33/* The i830->i865 use multiple DVOs with multiple i2cs */
34/* the i915, i945 have a single sDVO i2c bus - which is different */
35#define MAX_OUTPUTS 6
36/* maximum connectors per crtcs in the mode set */
37#define INTELFB_CONN_LIMIT 4
38
39#define INTEL_I2C_BUS_DVO 1
40#define INTEL_I2C_BUS_SDVO 2
41
42/* these are outputs from the chip - integrated only
43 * external chips are via DVO or SDVO output */
44#define INTEL_OUTPUT_UNUSED 0
45#define INTEL_OUTPUT_ANALOG 1
46#define INTEL_OUTPUT_DVO 2
47#define INTEL_OUTPUT_SDVO 3
48#define INTEL_OUTPUT_LVDS 4
49#define INTEL_OUTPUT_TVOUT 5
50#define INTEL_OUTPUT_HDMI 6
51#define INTEL_OUTPUT_MIPI 7
52#define INTEL_OUTPUT_MIPI2 8
53
54#define INTEL_DVO_CHIP_NONE 0
55#define INTEL_DVO_CHIP_LVDS 1
56#define INTEL_DVO_CHIP_TMDS 2
57#define INTEL_DVO_CHIP_TVOUT 4
58
59/*
60 * Hold information useally put on the device driver privates here,
61 * since it needs to be shared across multiple of devices drivers privates.
62 */
63struct psb_intel_mode_device {
64
65 /*
66 * Abstracted memory manager operations
67 */
68 size_t(*bo_offset) (struct drm_device *dev, void *bo);
69
70 /*
71 * Cursor (Can go ?)
72 */
73 int cursor_needs_physical;
74
75 /*
76 * LVDS info
77 */
78 int backlight_duty_cycle; /* restore backlight to this value */
79 bool panel_wants_dither;
80 struct drm_display_mode *panel_fixed_mode;
81 struct drm_display_mode *panel_fixed_mode2;
82 struct drm_display_mode *vbt_mode; /* if any */
83
84 uint32_t saveBLC_PWM_CTL;
85};
86
87struct psb_intel_i2c_chan {
88 /* for getting at dev. private (mmio etc.) */
89 struct drm_device *drm_dev;
90 u32 reg; /* GPIO reg */
91 struct i2c_adapter adapter;
92 struct i2c_algo_bit_data algo;
93 u8 slave_addr;
94};
95
96struct psb_intel_output {
97 struct drm_connector base;
98
99 struct drm_encoder enc;
100 int type;
101
102 struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
103 struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
104 bool load_detect_temp;
105 void *dev_priv;
106
107 struct psb_intel_mode_device *mode_dev;
108 struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
109};
110
111struct psb_intel_crtc_state {
112 uint32_t saveDSPCNTR;
113 uint32_t savePIPECONF;
114 uint32_t savePIPESRC;
115 uint32_t saveDPLL;
116 uint32_t saveFP0;
117 uint32_t saveFP1;
118 uint32_t saveHTOTAL;
119 uint32_t saveHBLANK;
120 uint32_t saveHSYNC;
121 uint32_t saveVTOTAL;
122 uint32_t saveVBLANK;
123 uint32_t saveVSYNC;
124 uint32_t saveDSPSTRIDE;
125 uint32_t saveDSPSIZE;
126 uint32_t saveDSPPOS;
127 uint32_t saveDSPBASE;
128 uint32_t savePalette[256];
129};
130
131struct psb_intel_crtc {
132 struct drm_crtc base;
133 int pipe;
134 int plane;
135 uint32_t cursor_addr;
136 u8 lut_r[256], lut_g[256], lut_b[256];
137 u8 lut_adj[256];
138 struct psb_intel_framebuffer *fbdev_fb;
139 /* a mode_set for fbdev users on this crtc */
140 struct drm_mode_set mode_set;
141
142 /* GEM object that holds our cursor */
143 struct drm_gem_object *cursor_obj;
144
145 struct drm_display_mode saved_mode;
146 struct drm_display_mode saved_adjusted_mode;
147
148 struct psb_intel_mode_device *mode_dev;
149
150 /*crtc mode setting flags*/
151 u32 mode_flags;
152
153 /* Saved Crtc HW states */
154 struct psb_intel_crtc_state *crtc_state;
155};
156
157#define to_psb_intel_crtc(x) \
158 container_of(x, struct psb_intel_crtc, base)
159#define to_psb_intel_output(x) \
160 container_of(x, struct psb_intel_output, base)
161#define enc_to_psb_intel_output(x) \
162 container_of(x, struct psb_intel_output, enc)
163#define to_psb_intel_framebuffer(x) \
164 container_of(x, struct psb_intel_framebuffer, base)
165
166struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
167 const u32 reg, const char *name);
168void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
169int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
170extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
171
172extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
173 struct psb_intel_mode_device *mode_dev);
174extern void psb_intel_crt_init(struct drm_device *dev);
175extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
176extern void psb_intel_dvo_init(struct drm_device *dev);
177extern void psb_intel_tv_init(struct drm_device *dev);
178extern void psb_intel_lvds_init(struct drm_device *dev,
179 struct psb_intel_mode_device *mode_dev);
180extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
181extern void mrst_lvds_init(struct drm_device *dev,
182 struct psb_intel_mode_device *mode_dev);
183extern void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev);
184extern void mrst_dsi_init(struct drm_device *dev,
185 struct psb_intel_mode_device *mode_dev);
186extern void mid_dsi_init(struct drm_device *dev,
187 struct psb_intel_mode_device *mode_dev, int dsi_num);
188
189extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
190extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
191extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
192
193extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
194 *connector);
195
196extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
197 struct drm_crtc *crtc);
198extern void psb_intel_wait_for_vblank(struct drm_device *dev);
199extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
200 struct drm_file *file_priv);
201extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
202 int pipe);
203extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
204 int sdvoB);
205extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
206extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
207 int enable);
208extern int intelfb_probe(struct drm_device *dev);
209extern int intelfb_remove(struct drm_device *dev,
210 struct drm_framebuffer *fb);
211extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
212 *dev, struct
213 drm_mode_fb_cmd
214 *mode_cmd,
215 void *mm_private);
216extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
217 struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode);
219extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
220 struct drm_display_mode *mode);
221extern int psb_intel_lvds_set_property(struct drm_connector *connector,
222 struct drm_property *property,
223 uint64_t value);
224extern void psb_intel_lvds_destroy(struct drm_connector *connector);
225extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
226
227extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
228extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
229
230#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/staging/gma500/psb_intel_lvds.c b/drivers/staging/gma500/psb_intel_lvds.c
deleted file mode 100644
index 21022e1a977a..000000000000
--- a/drivers/staging/gma500/psb_intel_lvds.c
+++ /dev/null
@@ -1,854 +0,0 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <drm/drmP.h>
25
26#include "intel_bios.h"
27#include "psb_drv.h"
28#include "psb_intel_drv.h"
29#include "psb_intel_reg.h"
30#include "power.h"
31#include <linux/pm_runtime.h>
32
33/*
34 * LVDS I2C backlight control macros
35 */
36#define BRIGHTNESS_MAX_LEVEL 100
37#define BRIGHTNESS_MASK 0xFF
38#define BLC_I2C_TYPE 0x01
39#define BLC_PWM_TYPT 0x02
40
41#define BLC_POLARITY_NORMAL 0
42#define BLC_POLARITY_INVERSE 1
43
44#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
45#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
46#define PSB_BLC_PWM_PRECISION_FACTOR (10)
47#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
48#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
49
50struct psb_intel_lvds_priv {
51 /*
52 * Saved LVDO output states
53 */
54 uint32_t savePP_ON;
55 uint32_t savePP_OFF;
56 uint32_t saveLVDS;
57 uint32_t savePP_CONTROL;
58 uint32_t savePP_CYCLE;
59 uint32_t savePFIT_CONTROL;
60 uint32_t savePFIT_PGM_RATIOS;
61 uint32_t saveBLC_PWM_CTL;
62};
63
64
65/*
66 * Returns the maximum level of the backlight duty cycle field.
67 */
68static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
69{
70 struct drm_psb_private *dev_priv = dev->dev_private;
71 u32 ret;
72
73 if (gma_power_begin(dev, false)) {
74 ret = REG_READ(BLC_PWM_CTL);
75 gma_power_end(dev);
76 } else /* Powered off, use the saved value */
77 ret = dev_priv->saveBLC_PWM_CTL;
78
79 /* Top 15bits hold the frequency mask */
80 ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
81 BACKLIGHT_MODULATION_FREQ_SHIFT;
82
83 ret *= 2; /* Return a 16bit range as needed for setting */
84 if (ret == 0)
85 dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
86 REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
87 return ret;
88}
89
90/*
91 * Set LVDS backlight level by I2C command
92 *
93 * FIXME: at some point we need to both track this for PM and also
94 * disable runtime pm on MRST if the brightness is nil (ie blanked)
95 */
96static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
97 unsigned int level)
98{
99 struct drm_psb_private *dev_priv =
100 (struct drm_psb_private *)dev->dev_private;
101
102 struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
103 u8 out_buf[2];
104 unsigned int blc_i2c_brightness;
105
106 struct i2c_msg msgs[] = {
107 {
108 .addr = lvds_i2c_bus->slave_addr,
109 .flags = 0,
110 .len = 2,
111 .buf = out_buf,
112 }
113 };
114
115 blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
116 BRIGHTNESS_MASK /
117 BRIGHTNESS_MAX_LEVEL);
118
119 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
120 blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
121
122 out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
123 out_buf[1] = (u8)blc_i2c_brightness;
124
125 if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
126 dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
127 dev_priv->lvds_bl->brightnesscmd,
128 blc_i2c_brightness);
129 return 0;
130 }
131
132 dev_err(dev->dev, "I2C transfer error\n");
133 return -1;
134}
135
136
137static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
138{
139 struct drm_psb_private *dev_priv =
140 (struct drm_psb_private *)dev->dev_private;
141
142 u32 max_pwm_blc;
143 u32 blc_pwm_duty_cycle;
144
145 max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
146
147 /*BLC_PWM_CTL Should be initiated while backlight device init*/
148 BUG_ON(max_pwm_blc == 0);
149
150 blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
151
152 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
153 blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
154
155 blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
156 REG_WRITE(BLC_PWM_CTL,
157 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
158 (blc_pwm_duty_cycle));
159
160 dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
161 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
162 (blc_pwm_duty_cycle));
163
164 return 0;
165}
166
167/*
168 * Set LVDS backlight level either by I2C or PWM
169 */
170void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
171{
172 struct drm_psb_private *dev_priv = dev->dev_private;
173
174 dev_dbg(dev->dev, "backlight level is %d\n", level);
175
176 if (!dev_priv->lvds_bl) {
177 dev_err(dev->dev, "NO LVDS backlight info\n");
178 return;
179 }
180
181 if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
182 psb_lvds_i2c_set_brightness(dev, level);
183 else
184 psb_lvds_pwm_set_brightness(dev, level);
185}
186
187/*
188 * Sets the backlight level.
189 *
190 * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
191 */
192static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
193{
194 struct drm_psb_private *dev_priv = dev->dev_private;
195 u32 blc_pwm_ctl;
196
197 if (gma_power_begin(dev, false)) {
198 blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
199 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
200 REG_WRITE(BLC_PWM_CTL,
201 (blc_pwm_ctl |
202 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
203 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
204 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
205 gma_power_end(dev);
206 } else {
207 blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
208 ~BACKLIGHT_DUTY_CYCLE_MASK;
209 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
210 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
211 }
212}
213
214/*
215 * Sets the power state for the panel.
216 */
217static void psb_intel_lvds_set_power(struct drm_device *dev,
218 struct psb_intel_output *output, bool on)
219{
220 u32 pp_status;
221
222 if (!gma_power_begin(dev, true)) {
223 dev_err(dev->dev, "set power, chip off!\n");
224 return;
225 }
226
227 if (on) {
228 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
229 POWER_TARGET_ON);
230 do {
231 pp_status = REG_READ(PP_STATUS);
232 } while ((pp_status & PP_ON) == 0);
233
234 psb_intel_lvds_set_backlight(dev,
235 output->
236 mode_dev->backlight_duty_cycle);
237 } else {
238 psb_intel_lvds_set_backlight(dev, 0);
239
240 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
241 ~POWER_TARGET_ON);
242 do {
243 pp_status = REG_READ(PP_STATUS);
244 } while (pp_status & PP_ON);
245 }
246
247 gma_power_end(dev);
248}
249
250static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
251{
252 struct drm_device *dev = encoder->dev;
253 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
254
255 if (mode == DRM_MODE_DPMS_ON)
256 psb_intel_lvds_set_power(dev, output, true);
257 else
258 psb_intel_lvds_set_power(dev, output, false);
259
260 /* XXX: We never power down the LVDS pairs. */
261}
262
263static void psb_intel_lvds_save(struct drm_connector *connector)
264{
265 struct drm_device *dev = connector->dev;
266 struct drm_psb_private *dev_priv =
267 (struct drm_psb_private *)dev->dev_private;
268 struct psb_intel_output *psb_intel_output =
269 to_psb_intel_output(connector);
270 struct psb_intel_lvds_priv *lvds_priv =
271 (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
272
273 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
274 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
275 lvds_priv->saveLVDS = REG_READ(LVDS);
276 lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
277 lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
278 /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
279 lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
280 lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
281 lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
282
283 /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
284 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
285 BACKLIGHT_DUTY_CYCLE_MASK);
286
287 /*
288 * If the light is off at server startup,
289 * just make it full brightness
290 */
291 if (dev_priv->backlight_duty_cycle == 0)
292 dev_priv->backlight_duty_cycle =
293 psb_intel_lvds_get_max_backlight(dev);
294
295 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
296 lvds_priv->savePP_ON,
297 lvds_priv->savePP_OFF,
298 lvds_priv->saveLVDS,
299 lvds_priv->savePP_CONTROL,
300 lvds_priv->savePP_CYCLE,
301 lvds_priv->saveBLC_PWM_CTL);
302}
303
304static void psb_intel_lvds_restore(struct drm_connector *connector)
305{
306 struct drm_device *dev = connector->dev;
307 u32 pp_status;
308 struct psb_intel_output *psb_intel_output =
309 to_psb_intel_output(connector);
310 struct psb_intel_lvds_priv *lvds_priv =
311 (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
312
313 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
314 lvds_priv->savePP_ON,
315 lvds_priv->savePP_OFF,
316 lvds_priv->saveLVDS,
317 lvds_priv->savePP_CONTROL,
318 lvds_priv->savePP_CYCLE,
319 lvds_priv->saveBLC_PWM_CTL);
320
321 REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
322 REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
323 REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
324 REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
325 REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
326 /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
327 REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
328 REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
329 REG_WRITE(LVDS, lvds_priv->saveLVDS);
330
331 if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
332 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
333 POWER_TARGET_ON);
334 do {
335 pp_status = REG_READ(PP_STATUS);
336 } while ((pp_status & PP_ON) == 0);
337 } else {
338 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
339 ~POWER_TARGET_ON);
340 do {
341 pp_status = REG_READ(PP_STATUS);
342 } while (pp_status & PP_ON);
343 }
344}
345
346int psb_intel_lvds_mode_valid(struct drm_connector *connector,
347 struct drm_display_mode *mode)
348{
349 struct psb_intel_output *psb_intel_output =
350 to_psb_intel_output(connector);
351 struct drm_display_mode *fixed_mode =
352 psb_intel_output->mode_dev->panel_fixed_mode;
353
354 if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
355 fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2;
356
357 /* just in case */
358 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
359 return MODE_NO_DBLESCAN;
360
361 /* just in case */
362 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
363 return MODE_NO_INTERLACE;
364
365 if (fixed_mode) {
366 if (mode->hdisplay > fixed_mode->hdisplay)
367 return MODE_PANEL;
368 if (mode->vdisplay > fixed_mode->vdisplay)
369 return MODE_PANEL;
370 }
371 return MODE_OK;
372}
373
374bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
375 struct drm_display_mode *mode,
376 struct drm_display_mode *adjusted_mode)
377{
378 struct psb_intel_mode_device *mode_dev =
379 enc_to_psb_intel_output(encoder)->mode_dev;
380 struct drm_device *dev = encoder->dev;
381 struct psb_intel_crtc *psb_intel_crtc =
382 to_psb_intel_crtc(encoder->crtc);
383 struct drm_encoder *tmp_encoder;
384 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
385 struct psb_intel_output *psb_intel_output =
386 enc_to_psb_intel_output(encoder);
387
388 if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
389 panel_fixed_mode = mode_dev->panel_fixed_mode2;
390
391 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
392 if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
393 printk(KERN_ERR "Can't support LVDS on pipe A\n");
394 return false;
395 }
396 if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
397 printk(KERN_ERR "Must use PIPE A\n");
398 return false;
399 }
400 /* Should never happen!! */
401 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
402 head) {
403 if (tmp_encoder != encoder
404 && tmp_encoder->crtc == encoder->crtc) {
405 printk(KERN_ERR "Can't enable LVDS and another "
406 "encoder on the same pipe\n");
407 return false;
408 }
409 }
410
411 /*
412 * If we have timings from the BIOS for the panel, put them in
413 * to the adjusted mode. The CRTC will be set up for this mode,
414 * with the panel scaling set up to source from the H/VDisplay
415 * of the original mode.
416 */
417 if (panel_fixed_mode != NULL) {
418 adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
419 adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
420 adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
421 adjusted_mode->htotal = panel_fixed_mode->htotal;
422 adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
423 adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
424 adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
425 adjusted_mode->vtotal = panel_fixed_mode->vtotal;
426 adjusted_mode->clock = panel_fixed_mode->clock;
427 drm_mode_set_crtcinfo(adjusted_mode,
428 CRTC_INTERLACE_HALVE_V);
429 }
430
431 /*
432 * XXX: It would be nice to support lower refresh rates on the
433 * panels to reduce power consumption, and perhaps match the
434 * user's requested refresh rate.
435 */
436
437 return true;
438}
439
440static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
441{
442 struct drm_device *dev = encoder->dev;
443 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
444 struct psb_intel_mode_device *mode_dev = output->mode_dev;
445
446 if (!gma_power_begin(dev, true))
447 return;
448
449 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
450 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
451 BACKLIGHT_DUTY_CYCLE_MASK);
452
453 psb_intel_lvds_set_power(dev, output, false);
454
455 gma_power_end(dev);
456}
457
458static void psb_intel_lvds_commit(struct drm_encoder *encoder)
459{
460 struct drm_device *dev = encoder->dev;
461 struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
462 struct psb_intel_mode_device *mode_dev = output->mode_dev;
463
464 if (mode_dev->backlight_duty_cycle == 0)
465 mode_dev->backlight_duty_cycle =
466 psb_intel_lvds_get_max_backlight(dev);
467
468 psb_intel_lvds_set_power(dev, output, true);
469}
470
471static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
472 struct drm_display_mode *mode,
473 struct drm_display_mode *adjusted_mode)
474{
475 struct drm_device *dev = encoder->dev;
476 struct drm_psb_private *dev_priv = dev->dev_private;
477 u32 pfit_control;
478
479 /*
480 * The LVDS pin pair will already have been turned on in the
481 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
482 * settings.
483 */
484
485 /*
486 * Enable automatic panel scaling so that non-native modes fill the
487 * screen. Should be enabled before the pipe is enabled, according to
488 * register description and PRM.
489 */
490 if (mode->hdisplay != adjusted_mode->hdisplay ||
491 mode->vdisplay != adjusted_mode->vdisplay)
492 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
493 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
494 HORIZ_INTERP_BILINEAR);
495 else
496 pfit_control = 0;
497
498 if (dev_priv->lvds_dither)
499 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
500
501 REG_WRITE(PFIT_CONTROL, pfit_control);
502}
503
504/*
505 * Detect the LVDS connection.
506 *
507 * This always returns CONNECTOR_STATUS_CONNECTED.
508 * This connector should only have
509 * been set up if the LVDS was actually connected anyway.
510 */
511static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
512 *connector, bool force)
513{
514 return connector_status_connected;
515}
516
517/*
518 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
519 */
520static int psb_intel_lvds_get_modes(struct drm_connector *connector)
521{
522 struct drm_device *dev = connector->dev;
523 struct psb_intel_output *psb_intel_output =
524 to_psb_intel_output(connector);
525 struct psb_intel_mode_device *mode_dev =
526 psb_intel_output->mode_dev;
527 int ret = 0;
528
529 if (!IS_MRST(dev))
530 ret = psb_intel_ddc_get_modes(psb_intel_output);
531
532 if (ret)
533 return ret;
534
535 /* Didn't get an EDID, so
536 * Set wide sync ranges so we get all modes
537 * handed to valid_mode for checking
538 */
539 connector->display_info.min_vfreq = 0;
540 connector->display_info.max_vfreq = 200;
541 connector->display_info.min_hfreq = 0;
542 connector->display_info.max_hfreq = 200;
543
544 if (mode_dev->panel_fixed_mode != NULL) {
545 struct drm_display_mode *mode =
546 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
547 drm_mode_probed_add(connector, mode);
548 return 1;
549 }
550
551 return 0;
552}
553
554/**
555 * psb_intel_lvds_destroy - unregister and free LVDS structures
556 * @connector: connector to free
557 *
558 * Unregister the DDC bus for this connector then free the driver private
559 * structure.
560 */
561void psb_intel_lvds_destroy(struct drm_connector *connector)
562{
563 struct psb_intel_output *psb_intel_output =
564 to_psb_intel_output(connector);
565
566 if (psb_intel_output->ddc_bus)
567 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
568 drm_sysfs_connector_remove(connector);
569 drm_connector_cleanup(connector);
570 kfree(connector);
571}
572
573int psb_intel_lvds_set_property(struct drm_connector *connector,
574 struct drm_property *property,
575 uint64_t value)
576{
577 struct drm_encoder *encoder = connector->encoder;
578
579 if (!encoder)
580 return -1;
581
582 if (!strcmp(property->name, "scaling mode")) {
583 struct psb_intel_crtc *crtc =
584 to_psb_intel_crtc(encoder->crtc);
585 uint64_t curval;
586
587 if (!crtc)
588 goto set_prop_error;
589
590 switch (value) {
591 case DRM_MODE_SCALE_FULLSCREEN:
592 break;
593 case DRM_MODE_SCALE_NO_SCALE:
594 break;
595 case DRM_MODE_SCALE_ASPECT:
596 break;
597 default:
598 goto set_prop_error;
599 }
600
601 if (drm_connector_property_get_value(connector,
602 property,
603 &curval))
604 goto set_prop_error;
605
606 if (curval == value)
607 goto set_prop_done;
608
609 if (drm_connector_property_set_value(connector,
610 property,
611 value))
612 goto set_prop_error;
613
614 if (crtc->saved_mode.hdisplay != 0 &&
615 crtc->saved_mode.vdisplay != 0) {
616 if (!drm_crtc_helper_set_mode(encoder->crtc,
617 &crtc->saved_mode,
618 encoder->crtc->x,
619 encoder->crtc->y,
620 encoder->crtc->fb))
621 goto set_prop_error;
622 }
623 } else if (!strcmp(property->name, "backlight")) {
624 if (drm_connector_property_set_value(connector,
625 property,
626 value))
627 goto set_prop_error;
628 else {
629#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
630 struct drm_psb_private *devp =
631 encoder->dev->dev_private;
632 struct backlight_device *bd = devp->backlight_device;
633 if (bd) {
634 bd->props.brightness = value;
635 backlight_update_status(bd);
636 }
637#endif
638 }
639 } else if (!strcmp(property->name, "DPMS")) {
640 struct drm_encoder_helper_funcs *hfuncs
641 = encoder->helper_private;
642 hfuncs->dpms(encoder, value);
643 }
644
645set_prop_done:
646 return 0;
647set_prop_error:
648 return -1;
649}
650
651static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
652 .dpms = psb_intel_lvds_encoder_dpms,
653 .mode_fixup = psb_intel_lvds_mode_fixup,
654 .prepare = psb_intel_lvds_prepare,
655 .mode_set = psb_intel_lvds_mode_set,
656 .commit = psb_intel_lvds_commit,
657};
658
659const struct drm_connector_helper_funcs
660 psb_intel_lvds_connector_helper_funcs = {
661 .get_modes = psb_intel_lvds_get_modes,
662 .mode_valid = psb_intel_lvds_mode_valid,
663 .best_encoder = psb_intel_best_encoder,
664};
665
666const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
667 .dpms = drm_helper_connector_dpms,
668 .save = psb_intel_lvds_save,
669 .restore = psb_intel_lvds_restore,
670 .detect = psb_intel_lvds_detect,
671 .fill_modes = drm_helper_probe_single_connector_modes,
672 .set_property = psb_intel_lvds_set_property,
673 .destroy = psb_intel_lvds_destroy,
674};
675
676
677static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
678{
679 drm_encoder_cleanup(encoder);
680}
681
682const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
683 .destroy = psb_intel_lvds_enc_destroy,
684};
685
686
687
688/**
689 * psb_intel_lvds_init - setup LVDS connectors on this device
690 * @dev: drm device
691 *
692 * Create the connector, register the LVDS DDC bus, and try to figure out what
693 * modes we can display on the LVDS panel (if present).
694 */
695void psb_intel_lvds_init(struct drm_device *dev,
696 struct psb_intel_mode_device *mode_dev)
697{
698 struct psb_intel_output *psb_intel_output;
699 struct psb_intel_lvds_priv *lvds_priv;
700 struct drm_connector *connector;
701 struct drm_encoder *encoder;
702 struct drm_display_mode *scan; /* *modes, *bios_mode; */
703 struct drm_crtc *crtc;
704 struct drm_psb_private *dev_priv = dev->dev_private;
705 u32 lvds;
706 int pipe;
707
708 psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
709 if (!psb_intel_output)
710 return;
711
712 lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
713 if (!lvds_priv) {
714 kfree(psb_intel_output);
715 dev_err(dev->dev, "LVDS private allocation error\n");
716 return;
717 }
718
719 psb_intel_output->dev_priv = lvds_priv;
720 psb_intel_output->mode_dev = mode_dev;
721
722 connector = &psb_intel_output->base;
723 encoder = &psb_intel_output->enc;
724 drm_connector_init(dev, &psb_intel_output->base,
725 &psb_intel_lvds_connector_funcs,
726 DRM_MODE_CONNECTOR_LVDS);
727
728 drm_encoder_init(dev, &psb_intel_output->enc,
729 &psb_intel_lvds_enc_funcs,
730 DRM_MODE_ENCODER_LVDS);
731
732 drm_mode_connector_attach_encoder(&psb_intel_output->base,
733 &psb_intel_output->enc);
734 psb_intel_output->type = INTEL_OUTPUT_LVDS;
735
736 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
737 drm_connector_helper_add(connector,
738 &psb_intel_lvds_connector_helper_funcs);
739 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
740 connector->interlace_allowed = false;
741 connector->doublescan_allowed = false;
742
743 /*Attach connector properties*/
744 drm_connector_attach_property(connector,
745 dev->mode_config.scaling_mode_property,
746 DRM_MODE_SCALE_FULLSCREEN);
747 drm_connector_attach_property(connector,
748 dev_priv->backlight_property,
749 BRIGHTNESS_MAX_LEVEL);
750
751 /*
752 * Set up I2C bus
753 * FIXME: distroy i2c_bus when exit
754 */
755 psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
756 GPIOB,
757 "LVDSBLC_B");
758 if (!psb_intel_output->i2c_bus) {
759 dev_printk(KERN_ERR,
760 &dev->pdev->dev, "I2C bus registration failed.\n");
761 goto failed_blc_i2c;
762 }
763 psb_intel_output->i2c_bus->slave_addr = 0x2C;
764 dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
765
766 /*
767 * LVDS discovery:
768 * 1) check for EDID on DDC
769 * 2) check for VBT data
770 * 3) check to see if LVDS is already on
771 * if none of the above, no panel
772 * 4) make sure lid is open
773 * if closed, act like it's not there for now
774 */
775
776 /* Set up the DDC bus. */
777 psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
778 GPIOC,
779 "LVDSDDC_C");
780 if (!psb_intel_output->ddc_bus) {
781 dev_printk(KERN_ERR, &dev->pdev->dev,
782 "DDC bus registration " "failed.\n");
783 goto failed_ddc;
784 }
785
786 /*
787 * Attempt to get the fixed panel mode from DDC. Assume that the
788 * preferred mode is the right one.
789 */
790 psb_intel_ddc_get_modes(psb_intel_output);
791 list_for_each_entry(scan, &connector->probed_modes, head) {
792 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
793 mode_dev->panel_fixed_mode =
794 drm_mode_duplicate(dev, scan);
795 goto out; /* FIXME: check for quirks */
796 }
797 }
798
799 /* Failed to get EDID, what about VBT? do we need this? */
800 if (mode_dev->vbt_mode)
801 mode_dev->panel_fixed_mode =
802 drm_mode_duplicate(dev, mode_dev->vbt_mode);
803
804 if (!mode_dev->panel_fixed_mode)
805 if (dev_priv->lfp_lvds_vbt_mode)
806 mode_dev->panel_fixed_mode =
807 drm_mode_duplicate(dev,
808 dev_priv->lfp_lvds_vbt_mode);
809
810 /*
811 * If we didn't get EDID, try checking if the panel is already turned
812 * on. If so, assume that whatever is currently programmed is the
813 * correct mode.
814 */
815 lvds = REG_READ(LVDS);
816 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
817 crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
818
819 if (crtc && (lvds & LVDS_PORT_EN)) {
820 mode_dev->panel_fixed_mode =
821 psb_intel_crtc_mode_get(dev, crtc);
822 if (mode_dev->panel_fixed_mode) {
823 mode_dev->panel_fixed_mode->type |=
824 DRM_MODE_TYPE_PREFERRED;
825 goto out; /* FIXME: check for quirks */
826 }
827 }
828
829 /* If we still don't have a mode after all that, give up. */
830 if (!mode_dev->panel_fixed_mode) {
831 dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
832 goto failed_find;
833 }
834
835 /*
836 * Blacklist machines with BIOSes that list an LVDS panel without
837 * actually having one.
838 */
839out:
840 drm_sysfs_connector_add(connector);
841 return;
842
843failed_find:
844 if (psb_intel_output->ddc_bus)
845 psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
846failed_ddc:
847 if (psb_intel_output->i2c_bus)
848 psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
849failed_blc_i2c:
850 drm_encoder_cleanup(encoder);
851 drm_connector_cleanup(connector);
852 kfree(connector);
853}
854
diff --git a/drivers/staging/gma500/psb_intel_modes.c b/drivers/staging/gma500/psb_intel_modes.c
deleted file mode 100644
index bde1aff96190..000000000000
--- a/drivers/staging/gma500/psb_intel_modes.c
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (c) 2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authers: Jesse Barnes <jesse.barnes@intel.com>
18 */
19
20#include <linux/i2c.h>
21#include <linux/fb.h>
22#include <drm/drmP.h>
23#include "psb_intel_drv.h"
24
25/**
26 * psb_intel_ddc_probe
27 *
28 */
29bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
30{
31 u8 out_buf[] = { 0x0, 0x0 };
32 u8 buf[2];
33 int ret;
34 struct i2c_msg msgs[] = {
35 {
36 .addr = 0x50,
37 .flags = 0,
38 .len = 1,
39 .buf = out_buf,
40 },
41 {
42 .addr = 0x50,
43 .flags = I2C_M_RD,
44 .len = 1,
45 .buf = buf,
46 }
47 };
48
49 ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
50 if (ret == 2)
51 return true;
52
53 return false;
54}
55
56/**
57 * psb_intel_ddc_get_modes - get modelist from monitor
58 * @connector: DRM connector device to use
59 *
60 * Fetch the EDID information from @connector using the DDC bus.
61 */
62int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
63{
64 struct edid *edid;
65 int ret = 0;
66
67 edid =
68 drm_get_edid(&psb_intel_output->base,
69 &psb_intel_output->ddc_bus->adapter);
70 if (edid) {
71 drm_mode_connector_update_edid_property(&psb_intel_output->
72 base, edid);
73 ret = drm_add_edid_modes(&psb_intel_output->base, edid);
74 kfree(edid);
75 }
76 return ret;
77}
diff --git a/drivers/staging/gma500/psb_intel_reg.h b/drivers/staging/gma500/psb_intel_reg.h
deleted file mode 100644
index 1ac16aa791c9..000000000000
--- a/drivers/staging/gma500/psb_intel_reg.h
+++ /dev/null
@@ -1,1235 +0,0 @@
1/*
2 * Copyright (c) 2009, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __PSB_INTEL_REG_H__
18#define __PSB_INTEL_REG_H__
19
20#define BLC_PWM_CTL 0x61254
21#define BLC_PWM_CTL2 0x61250
22#define BLC_PWM_CTL_C 0x62254
23#define BLC_PWM_CTL2_C 0x62250
24#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
25/*
26 * This is the most significant 15 bits of the number of backlight cycles in a
27 * complete cycle of the modulated backlight control.
28 *
29 * The actual value is this field multiplied by two.
30 */
31#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
32#define BLM_LEGACY_MODE (1 << 16)
33/*
34 * This is the number of cycles out of the backlight modulation cycle for which
35 * the backlight is on.
36 *
37 * This field must be no greater than the number of cycles in the complete
38 * backlight modulation cycle.
39 */
40#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
41#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
42
43#define I915_GCFGC 0xf0
44#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
45#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
46#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
47#define I915_DISPLAY_CLOCK_MASK (7 << 4)
48
49#define I855_HPLLCC 0xc0
50#define I855_CLOCK_CONTROL_MASK (3 << 0)
51#define I855_CLOCK_133_200 (0 << 0)
52#define I855_CLOCK_100_200 (1 << 0)
53#define I855_CLOCK_100_133 (2 << 0)
54#define I855_CLOCK_166_250 (3 << 0)
55
56/* I830 CRTC registers */
57#define HTOTAL_A 0x60000
58#define HBLANK_A 0x60004
59#define HSYNC_A 0x60008
60#define VTOTAL_A 0x6000c
61#define VBLANK_A 0x60010
62#define VSYNC_A 0x60014
63#define PIPEASRC 0x6001c
64#define BCLRPAT_A 0x60020
65#define VSYNCSHIFT_A 0x60028
66
67#define HTOTAL_B 0x61000
68#define HBLANK_B 0x61004
69#define HSYNC_B 0x61008
70#define VTOTAL_B 0x6100c
71#define VBLANK_B 0x61010
72#define VSYNC_B 0x61014
73#define PIPEBSRC 0x6101c
74#define BCLRPAT_B 0x61020
75#define VSYNCSHIFT_B 0x61028
76
77#define HTOTAL_C 0x62000
78#define HBLANK_C 0x62004
79#define HSYNC_C 0x62008
80#define VTOTAL_C 0x6200c
81#define VBLANK_C 0x62010
82#define VSYNC_C 0x62014
83#define PIPECSRC 0x6201c
84#define BCLRPAT_C 0x62020
85#define VSYNCSHIFT_C 0x62028
86
87#define PP_STATUS 0x61200
88# define PP_ON (1 << 31)
89/*
90 * Indicates that all dependencies of the panel are on:
91 *
92 * - PLL enabled
93 * - pipe enabled
94 * - LVDS/DVOB/DVOC on
95 */
96#define PP_READY (1 << 30)
97#define PP_SEQUENCE_NONE (0 << 28)
98#define PP_SEQUENCE_ON (1 << 28)
99#define PP_SEQUENCE_OFF (2 << 28)
100#define PP_SEQUENCE_MASK 0x30000000
101#define PP_CONTROL 0x61204
102#define POWER_TARGET_ON (1 << 0)
103
104#define LVDSPP_ON 0x61208
105#define LVDSPP_OFF 0x6120c
106#define PP_CYCLE 0x61210
107
108#define PFIT_CONTROL 0x61230
109#define PFIT_ENABLE (1 << 31)
110#define PFIT_PIPE_MASK (3 << 29)
111#define PFIT_PIPE_SHIFT 29
112#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
113#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
114#define VERT_INTERP_DISABLE (0 << 10)
115#define VERT_INTERP_BILINEAR (1 << 10)
116#define VERT_INTERP_MASK (3 << 10)
117#define VERT_AUTO_SCALE (1 << 9)
118#define HORIZ_INTERP_DISABLE (0 << 6)
119#define HORIZ_INTERP_BILINEAR (1 << 6)
120#define HORIZ_INTERP_MASK (3 << 6)
121#define HORIZ_AUTO_SCALE (1 << 5)
122#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
123
124#define PFIT_PGM_RATIOS 0x61234
125#define PFIT_VERT_SCALE_MASK 0xfff00000
126#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
127
128#define PFIT_AUTO_RATIOS 0x61238
129
130#define DPLL_A 0x06014
131#define DPLL_B 0x06018
132#define DPLL_VCO_ENABLE (1 << 31)
133#define DPLL_DVO_HIGH_SPEED (1 << 30)
134#define DPLL_SYNCLOCK_ENABLE (1 << 29)
135#define DPLL_VGA_MODE_DIS (1 << 28)
136#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
137#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
138#define DPLL_MODE_MASK (3 << 26)
139#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
140#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
141#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
142#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
143#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
144#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
145#define DPLL_LOCK (1 << 15) /* CDV */
146
147/*
148 * The i830 generation, in DAC/serial mode, defines p1 as two plus this
149 * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
150 */
151# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
152/*
153 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
154 * this field (only one bit may be set).
155 */
156#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
157#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
158#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
159 * in DVO non-gang */
160# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
161#define PLL_REF_INPUT_DREFCLK (0 << 13)
162#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
163#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
164 * TVCLKIN */
165#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
166#define PLL_REF_INPUT_MASK (3 << 13)
167#define PLL_LOAD_PULSE_PHASE_SHIFT 9
168/*
169 * Parallel to Serial Load Pulse phase selection.
170 * Selects the phase for the 10X DPLL clock for the PCIe
171 * digital display port. The range is 4 to 13; 10 or more
172 * is just a flip delay. The default is 6
173 */
174#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
175#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
176
177/*
178 * SDVO multiplier for 945G/GM. Not used on 965.
179 *
180 * DPLL_MD_UDI_MULTIPLIER_MASK
181 */
182#define SDVO_MULTIPLIER_MASK 0x000000ff
183#define SDVO_MULTIPLIER_SHIFT_HIRES 4
184#define SDVO_MULTIPLIER_SHIFT_VGA 0
185
186/*
187 * PLL_MD
188 */
189/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
190#define DPLL_A_MD 0x0601c
191/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
192#define DPLL_B_MD 0x06020
193/*
194 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
195 *
196 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
197 */
198#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
199#define DPLL_MD_UDI_DIVIDER_SHIFT 24
200/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
201#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
202#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
203/*
204 * SDVO/UDI pixel multiplier.
205 *
206 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
207 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
208 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
209 * dummy bytes in the datastream at an increased clock rate, with both sides of
210 * the link knowing how many bytes are fill.
211 *
212 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
213 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
214 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
215 * through an SDVO command.
216 *
217 * This register field has values of multiplication factor minus 1, with
218 * a maximum multiplier of 5 for SDVO.
219 */
220#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
221#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
222/*
223 * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
224 * This best be set to the default value (3) or the CRT won't work. No,
225 * I don't entirely understand what this does...
226 */
227#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
228#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
229
230#define DPLL_TEST 0x606c
231#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
232#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
233#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
234#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
235#define DPLLB_TEST_N_BYPASS (1 << 19)
236#define DPLLB_TEST_M_BYPASS (1 << 18)
237#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
238#define DPLLA_TEST_N_BYPASS (1 << 3)
239#define DPLLA_TEST_M_BYPASS (1 << 2)
240#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
241
242#define ADPA 0x61100
243#define ADPA_DAC_ENABLE (1 << 31)
244#define ADPA_DAC_DISABLE 0
245#define ADPA_PIPE_SELECT_MASK (1 << 30)
246#define ADPA_PIPE_A_SELECT 0
247#define ADPA_PIPE_B_SELECT (1 << 30)
248#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
249#define ADPA_SETS_HVPOLARITY 0
250#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
251#define ADPA_VSYNC_CNTL_ENABLE 0
252#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
253#define ADPA_HSYNC_CNTL_ENABLE 0
254#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
255#define ADPA_VSYNC_ACTIVE_LOW 0
256#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
257#define ADPA_HSYNC_ACTIVE_LOW 0
258
259#define FPA0 0x06040
260#define FPA1 0x06044
261#define FPB0 0x06048
262#define FPB1 0x0604c
263#define FP_N_DIV_MASK 0x003f0000
264#define FP_N_DIV_SHIFT 16
265#define FP_M1_DIV_MASK 0x00003f00
266#define FP_M1_DIV_SHIFT 8
267#define FP_M2_DIV_MASK 0x0000003f
268#define FP_M2_DIV_SHIFT 0
269
270#define PORT_HOTPLUG_EN 0x61110
271#define SDVOB_HOTPLUG_INT_EN (1 << 26)
272#define SDVOC_HOTPLUG_INT_EN (1 << 25)
273#define TV_HOTPLUG_INT_EN (1 << 18)
274#define CRT_HOTPLUG_INT_EN (1 << 9)
275#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
276/* CDV.. */
277#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
278#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
279#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
280#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
281#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
282#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
283#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
284#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
285#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
286#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
287#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
288#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
289#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
290
291#define PORT_HOTPLUG_STAT 0x61114
292#define CRT_HOTPLUG_INT_STATUS (1 << 11)
293#define TV_HOTPLUG_INT_STATUS (1 << 10)
294#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
295#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
296#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
297#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
298#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
299#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
300
301#define SDVOB 0x61140
302#define SDVOC 0x61160
303#define SDVO_ENABLE (1 << 31)
304#define SDVO_PIPE_B_SELECT (1 << 30)
305#define SDVO_STALL_SELECT (1 << 29)
306#define SDVO_INTERRUPT_ENABLE (1 << 26)
307
308/**
309 * 915G/GM SDVO pixel multiplier.
310 *
311 * Programmed value is multiplier - 1, up to 5x.
312 *
313 * DPLL_MD_UDI_MULTIPLIER_MASK
314 */
315#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
316#define SDVO_PORT_MULTIPLY_SHIFT 23
317#define SDVO_PHASE_SELECT_MASK (15 << 19)
318#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
319#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
320#define SDVOC_GANG_MODE (1 << 16)
321#define SDVO_BORDER_ENABLE (1 << 7)
322#define SDVOB_PCIE_CONCURRENCY (1 << 3)
323#define SDVO_DETECTED (1 << 2)
324/* Bits to be preserved when writing */
325#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
326#define SDVOC_PRESERVE_MASK (1 << 17)
327
328/*
329 * This register controls the LVDS output enable, pipe selection, and data
330 * format selection.
331 *
332 * All of the clock/data pairs are force powered down by power sequencing.
333 */
334#define LVDS 0x61180
335/*
336 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
337 * the DPLL semantics change when the LVDS is assigned to that pipe.
338 */
339#define LVDS_PORT_EN (1 << 31)
340/* Selects pipe B for LVDS data. Must be set on pre-965. */
341#define LVDS_PIPEB_SELECT (1 << 30)
342
343/* Turns on border drawing to allow centered display. */
344#define LVDS_BORDER_EN (1 << 15)
345
346/*
347 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
348 * pixel.
349 */
350#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
351#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
352#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
353/*
354 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
355 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
356 * on.
357 */
358#define LVDS_A3_POWER_MASK (3 << 6)
359#define LVDS_A3_POWER_DOWN (0 << 6)
360#define LVDS_A3_POWER_UP (3 << 6)
361/*
362 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
363 * is set.
364 */
365#define LVDS_CLKB_POWER_MASK (3 << 4)
366#define LVDS_CLKB_POWER_DOWN (0 << 4)
367#define LVDS_CLKB_POWER_UP (3 << 4)
368/*
369 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
370 * setting for whether we are in dual-channel mode. The B3 pair will
371 * additionally only be powered up when LVDS_A3_POWER_UP is set.
372 */
373#define LVDS_B0B3_POWER_MASK (3 << 2)
374#define LVDS_B0B3_POWER_DOWN (0 << 2)
375#define LVDS_B0B3_POWER_UP (3 << 2)
376
377#define PIPEACONF 0x70008
378#define PIPEACONF_ENABLE (1 << 31)
379#define PIPEACONF_DISABLE 0
380#define PIPEACONF_DOUBLE_WIDE (1 << 30)
381#define PIPECONF_ACTIVE (1 << 30)
382#define I965_PIPECONF_ACTIVE (1 << 30)
383#define PIPECONF_DSIPLL_LOCK (1 << 29)
384#define PIPEACONF_SINGLE_WIDE 0
385#define PIPEACONF_PIPE_UNLOCKED 0
386#define PIPEACONF_DSR (1 << 26)
387#define PIPEACONF_PIPE_LOCKED (1 << 25)
388#define PIPEACONF_PALETTE 0
389#define PIPECONF_FORCE_BORDER (1 << 25)
390#define PIPEACONF_GAMMA (1 << 24)
391#define PIPECONF_PROGRESSIVE (0 << 21)
392#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
393#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
394#define PIPECONF_PLANE_OFF (1 << 19)
395#define PIPECONF_CURSOR_OFF (1 << 18)
396
397#define PIPEBCONF 0x71008
398#define PIPEBCONF_ENABLE (1 << 31)
399#define PIPEBCONF_DISABLE 0
400#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
401#define PIPEBCONF_DISABLE 0
402#define PIPEBCONF_GAMMA (1 << 24)
403#define PIPEBCONF_PALETTE 0
404
405#define PIPECCONF 0x72008
406
407#define PIPEBGCMAXRED 0x71010
408#define PIPEBGCMAXGREEN 0x71014
409#define PIPEBGCMAXBLUE 0x71018
410
411#define PIPEASTAT 0x70024
412#define PIPEBSTAT 0x71024
413#define PIPECSTAT 0x72024
414#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
415#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
416#define PIPE_VBLANK_CLEAR (1 << 1)
417#define PIPE_VBLANK_STATUS (1 << 1)
418#define PIPE_TE_STATUS (1UL << 6)
419#define PIPE_DPST_EVENT_STATUS (1UL << 7)
420#define PIPE_VSYNC_CLEAR (1UL << 9)
421#define PIPE_VSYNC_STATUS (1UL << 9)
422#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
423#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
424#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
425#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
426#define PIPE_TE_ENABLE (1UL << 22)
427#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
428#define PIPE_VSYNC_ENABL (1UL << 25)
429#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
430#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
431#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
432 PIPE_HDMI_AUDIO_BUFFER_DONE)
433#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
434#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
435#define HISTOGRAM_INT_CONTROL 0x61268
436#define HISTOGRAM_BIN_DATA 0X61264
437#define HISTOGRAM_LOGIC_CONTROL 0x61260
438#define PWM_CONTROL_LOGIC 0x61250
439#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
440#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
441#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
442#define PWM_LOGIC_ENABLE (1UL << 31)
443#define PWM_PHASEIN_ENABLE (1UL << 25)
444#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
445#define PWM_PHASEIN_VB_COUNT 0x00001f00
446#define PWM_PHASEIN_INC 0x0000001f
447#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
448#define DPST_YUV_LUMA_MODE 0
449
450struct dpst_ie_histogram_control {
451 union {
452 uint32_t data;
453 struct {
454 uint32_t bin_reg_index:7;
455 uint32_t reserved:4;
456 uint32_t bin_reg_func_select:1;
457 uint32_t sync_to_phase_in:1;
458 uint32_t alt_enhancement_mode:2;
459 uint32_t reserved1:1;
460 uint32_t sync_to_phase_in_count:8;
461 uint32_t histogram_mode_select:1;
462 uint32_t reserved2:4;
463 uint32_t ie_pipe_assignment:1;
464 uint32_t ie_mode_table_enabled:1;
465 uint32_t ie_histogram_enable:1;
466 };
467 };
468};
469
470struct dpst_guardband {
471 union {
472 uint32_t data;
473 struct {
474 uint32_t guardband:22;
475 uint32_t guardband_interrupt_delay:8;
476 uint32_t interrupt_status:1;
477 uint32_t interrupt_enable:1;
478 };
479 };
480};
481
482#define PIPEAFRAMEHIGH 0x70040
483#define PIPEAFRAMEPIXEL 0x70044
484#define PIPEBFRAMEHIGH 0x71040
485#define PIPEBFRAMEPIXEL 0x71044
486#define PIPECFRAMEHIGH 0x72040
487#define PIPECFRAMEPIXEL 0x72044
488#define PIPE_FRAME_HIGH_MASK 0x0000ffff
489#define PIPE_FRAME_HIGH_SHIFT 0
490#define PIPE_FRAME_LOW_MASK 0xff000000
491#define PIPE_FRAME_LOW_SHIFT 24
492#define PIPE_PIXEL_MASK 0x00ffffff
493#define PIPE_PIXEL_SHIFT 0
494
495#define DSPARB 0x70030
496#define DSPFW1 0x70034
497#define DSPFW2 0x70038
498#define DSPFW3 0x7003c
499#define DSPFW4 0x70050
500#define DSPFW5 0x70054
501#define DSPFW6 0x70058
502#define DSPCHICKENBIT 0x70400
503#define DSPACNTR 0x70180
504#define DSPBCNTR 0x71180
505#define DSPCCNTR 0x72180
506#define DISPLAY_PLANE_ENABLE (1 << 31)
507#define DISPLAY_PLANE_DISABLE 0
508#define DISPPLANE_GAMMA_ENABLE (1 << 30)
509#define DISPPLANE_GAMMA_DISABLE 0
510#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
511#define DISPPLANE_8BPP (0x2 << 26)
512#define DISPPLANE_15_16BPP (0x4 << 26)
513#define DISPPLANE_16BPP (0x5 << 26)
514#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
515#define DISPPLANE_32BPP (0x7 << 26)
516#define DISPPLANE_STEREO_ENABLE (1 << 25)
517#define DISPPLANE_STEREO_DISABLE 0
518#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
519#define DISPPLANE_SEL_PIPE_POS 24
520#define DISPPLANE_SEL_PIPE_A 0
521#define DISPPLANE_SEL_PIPE_B (1 << 24)
522#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
523#define DISPPLANE_SRC_KEY_DISABLE 0
524#define DISPPLANE_LINE_DOUBLE (1 << 20)
525#define DISPPLANE_NO_LINE_DOUBLE 0
526#define DISPPLANE_STEREO_POLARITY_FIRST 0
527#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
528/* plane B only */
529#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
530#define DISPPLANE_ALPHA_TRANS_DISABLE 0
531#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
532#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
533#define DISPPLANE_BOTTOM (4)
534
535#define DSPABASE 0x70184
536#define DSPALINOFF 0x70184
537#define DSPASTRIDE 0x70188
538
539#define DSPBBASE 0x71184
540#define DSPBLINOFF 0X71184
541#define DSPBADDR DSPBBASE
542#define DSPBSTRIDE 0x71188
543
544#define DSPCBASE 0x72184
545#define DSPCLINOFF 0x72184
546#define DSPCSTRIDE 0x72188
547
548#define DSPAKEYVAL 0x70194
549#define DSPAKEYMASK 0x70198
550
551#define DSPAPOS 0x7018C /* reserved */
552#define DSPASIZE 0x70190
553#define DSPBPOS 0x7118C
554#define DSPBSIZE 0x71190
555#define DSPCPOS 0x7218C
556#define DSPCSIZE 0x72190
557
558#define DSPASURF 0x7019C
559#define DSPATILEOFF 0x701A4
560
561#define DSPBSURF 0x7119C
562#define DSPBTILEOFF 0x711A4
563
564#define DSPCSURF 0x7219C
565#define DSPCTILEOFF 0x721A4
566#define DSPCKEYMAXVAL 0x721A0
567#define DSPCKEYMINVAL 0x72194
568#define DSPCKEYMSK 0x72198
569
570#define VGACNTRL 0x71400
571#define VGA_DISP_DISABLE (1 << 31)
572#define VGA_2X_MODE (1 << 30)
573#define VGA_PIPE_B_SELECT (1 << 29)
574
575/*
576 * Overlay registers
577 */
578#define OV_C_OFFSET 0x08000
579#define OV_OVADD 0x30000
580#define OV_DOVASTA 0x30008
581# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
582# define OV_PIPE_SELECT_POS 6
583# define OV_PIPE_A 0
584# define OV_PIPE_C 1
585#define OV_OGAMC5 0x30010
586#define OV_OGAMC4 0x30014
587#define OV_OGAMC3 0x30018
588#define OV_OGAMC2 0x3001C
589#define OV_OGAMC1 0x30020
590#define OV_OGAMC0 0x30024
591#define OVC_OVADD 0x38000
592#define OVC_DOVCSTA 0x38008
593#define OVC_OGAMC5 0x38010
594#define OVC_OGAMC4 0x38014
595#define OVC_OGAMC3 0x38018
596#define OVC_OGAMC2 0x3801C
597#define OVC_OGAMC1 0x38020
598#define OVC_OGAMC0 0x38024
599
600/*
601 * Some BIOS scratch area registers. The 845 (and 830?) store the amount
602 * of video memory available to the BIOS in SWF1.
603 */
604#define SWF0 0x71410
605#define SWF1 0x71414
606#define SWF2 0x71418
607#define SWF3 0x7141c
608#define SWF4 0x71420
609#define SWF5 0x71424
610#define SWF6 0x71428
611
612/*
613 * 855 scratch registers.
614 */
615#define SWF00 0x70410
616#define SWF01 0x70414
617#define SWF02 0x70418
618#define SWF03 0x7041c
619#define SWF04 0x70420
620#define SWF05 0x70424
621#define SWF06 0x70428
622
623#define SWF10 SWF0
624#define SWF11 SWF1
625#define SWF12 SWF2
626#define SWF13 SWF3
627#define SWF14 SWF4
628#define SWF15 SWF5
629#define SWF16 SWF6
630
631#define SWF30 0x72414
632#define SWF31 0x72418
633#define SWF32 0x7241c
634
635
636/*
637 * Palette registers
638 */
639#define PALETTE_A 0x0a000
640#define PALETTE_B 0x0a800
641#define PALETTE_C 0x0ac00
642
643/* Cursor A & B regs */
644#define CURACNTR 0x70080
645#define CURSOR_MODE_DISABLE 0x00
646#define CURSOR_MODE_64_32B_AX 0x07
647#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
648#define MCURSOR_GAMMA_ENABLE (1 << 26)
649#define CURABASE 0x70084
650#define CURAPOS 0x70088
651#define CURSOR_POS_MASK 0x007FF
652#define CURSOR_POS_SIGN 0x8000
653#define CURSOR_X_SHIFT 0
654#define CURSOR_Y_SHIFT 16
655#define CURBCNTR 0x700c0
656#define CURBBASE 0x700c4
657#define CURBPOS 0x700c8
658#define CURCCNTR 0x700e0
659#define CURCBASE 0x700e4
660#define CURCPOS 0x700e8
661
662/*
663 * Interrupt Registers
664 */
665#define IER 0x020a0
666#define IIR 0x020a4
667#define IMR 0x020a8
668#define ISR 0x020ac
669
670/*
671 * MOORESTOWN delta registers
672 */
673#define MRST_DPLL_A 0x0f014
674#define MDFLD_DPLL_B 0x0f018
675#define MDFLD_INPUT_REF_SEL (1 << 14)
676#define MDFLD_VCO_SEL (1 << 16)
677#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
678#define MDFLD_PLL_LATCHEN (1 << 28)
679#define MDFLD_PWR_GATE_EN (1 << 30)
680#define MDFLD_P1_MASK (0x1FF << 17)
681#define MRST_FPA0 0x0f040
682#define MRST_FPA1 0x0f044
683#define MDFLD_DPLL_DIV0 0x0f048
684#define MDFLD_DPLL_DIV1 0x0f04c
685#define MRST_PERF_MODE 0x020f4
686
687/*
688 * MEDFIELD HDMI registers
689 */
690#define HDMIPHYMISCCTL 0x61134
691#define HDMI_PHY_POWER_DOWN 0x7f
692#define HDMIB_CONTROL 0x61140
693#define HDMIB_PORT_EN (1 << 31)
694#define HDMIB_PIPE_B_SELECT (1 << 30)
695#define HDMIB_NULL_PACKET (1 << 9)
696#define HDMIB_HDCP_PORT (1 << 5)
697
698/* #define LVDS 0x61180 */
699#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
700#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
701#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
702
703#define MIPI 0x61190
704#define MIPI_C 0x62190
705#define MIPI_PORT_EN (1 << 31)
706/* Turns on border drawing to allow centered display. */
707#define SEL_FLOPPED_HSTX (1 << 23)
708#define PASS_FROM_SPHY_TO_AFE (1 << 16)
709#define MIPI_BORDER_EN (1 << 15)
710#define MIPIA_3LANE_MIPIC_1LANE 0x1
711#define MIPIA_2LANE_MIPIC_2LANE 0x2
712#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
713#define TE_TRIGGER_GPIO_PIN (1 << 3)
714#define MIPI_TE_COUNT 0x61194
715
716/* #define PP_CONTROL 0x61204 */
717#define POWER_DOWN_ON_RESET (1 << 1)
718
719/* #define PFIT_CONTROL 0x61230 */
720#define PFIT_PIPE_SELECT (3 << 29)
721#define PFIT_PIPE_SELECT_SHIFT (29)
722
723/* #define BLC_PWM_CTL 0x61254 */
724#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
725#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
726
727/* #define PIPEACONF 0x70008 */
728#define PIPEACONF_PIPE_STATE (1 << 30)
729/* #define DSPACNTR 0x70180 */
730
731#define MRST_DSPABASE 0x7019c
732#define MRST_DSPBBASE 0x7119c
733#define MDFLD_DSPCBASE 0x7219c
734
735/*
736 * Moorestown registers.
737 */
738
739/*
740 * MIPI IP registers
741 */
742#define MIPIC_REG_OFFSET 0x800
743
744#define DEVICE_READY_REG 0xb000
745#define LP_OUTPUT_HOLD (1 << 16)
746#define EXIT_ULPS_DEV_READY 0x3
747#define LP_OUTPUT_HOLD_RELEASE 0x810000
748# define ENTERING_ULPS (2 << 1)
749# define EXITING_ULPS (1 << 1)
750# define ULPS_MASK (3 << 1)
751# define BUS_POSSESSION (1 << 3)
752#define INTR_STAT_REG 0xb004
753#define RX_SOT_ERROR (1 << 0)
754#define RX_SOT_SYNC_ERROR (1 << 1)
755#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
756#define RX_LP_TX_SYNC_ERROR (1 << 4)
757#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
758#define RX_FALSE_CONTROL_ERROR (1 << 6)
759#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
760#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
761#define RX_CHECKSUM_ERROR (1 << 9)
762#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
763#define RX_DSI_VC_ID_INVALID (1 << 11)
764#define TX_FALSE_CONTROL_ERROR (1 << 12)
765#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
766#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
767#define TX_CHECKSUM_ERROR (1 << 15)
768#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
769#define TX_DSI_VC_ID_INVALID (1 << 17)
770#define HIGH_CONTENTION (1 << 18)
771#define LOW_CONTENTION (1 << 19)
772#define DPI_FIFO_UNDER_RUN (1 << 20)
773#define HS_TX_TIMEOUT (1 << 21)
774#define LP_RX_TIMEOUT (1 << 22)
775#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
776#define ACK_WITH_NO_ERROR (1 << 24)
777#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
778#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
779#define SPL_PKT_SENT (1 << 30)
780#define INTR_EN_REG 0xb008
781#define DSI_FUNC_PRG_REG 0xb00c
782#define DPI_CHANNEL_NUMBER_POS 0x03
783#define DBI_CHANNEL_NUMBER_POS 0x05
784#define FMT_DPI_POS 0x07
785#define FMT_DBI_POS 0x0A
786#define DBI_DATA_WIDTH_POS 0x0D
787
788/* DPI PIXEL FORMATS */
789#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
790#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
791#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
792 * 666 FORMAT
793 */
794#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
795#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
796#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
797#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
798#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
799
800#define DBI_NOT_SUPPORTED 0x00 /* command mode
801 * is not supported
802 */
803#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
804#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
805#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
806#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
807#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
808
809#define HS_TX_TIMEOUT_REG 0xb010
810#define LP_RX_TIMEOUT_REG 0xb014
811#define TURN_AROUND_TIMEOUT_REG 0xb018
812#define DEVICE_RESET_REG 0xb01C
813#define DPI_RESOLUTION_REG 0xb020
814#define RES_V_POS 0x10
815#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
816#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
817#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
818#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
819#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
820#define VERT_SYNC_PAD_COUNT_REG 0xb038
821#define VERT_BACK_PORCH_COUNT_REG 0xb03c
822#define VERT_FRONT_PORCH_COUNT_REG 0xb040
823#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
824#define DPI_CONTROL_REG 0xb048
825#define DPI_SHUT_DOWN (1 << 0)
826#define DPI_TURN_ON (1 << 1)
827#define DPI_COLOR_MODE_ON (1 << 2)
828#define DPI_COLOR_MODE_OFF (1 << 3)
829#define DPI_BACK_LIGHT_ON (1 << 4)
830#define DPI_BACK_LIGHT_OFF (1 << 5)
831#define DPI_LP (1 << 6)
832#define DPI_DATA_REG 0xb04c
833#define DPI_BACK_LIGHT_ON_DATA 0x07
834#define DPI_BACK_LIGHT_OFF_DATA 0x17
835#define INIT_COUNT_REG 0xb050
836#define MAX_RET_PAK_REG 0xb054
837#define VIDEO_FMT_REG 0xb058
838#define COMPLETE_LAST_PCKT (1 << 2)
839#define EOT_DISABLE_REG 0xb05c
840#define ENABLE_CLOCK_STOPPING (1 << 1)
841#define LP_BYTECLK_REG 0xb060
842#define LP_GEN_DATA_REG 0xb064
843#define HS_GEN_DATA_REG 0xb068
844#define LP_GEN_CTRL_REG 0xb06C
845#define HS_GEN_CTRL_REG 0xb070
846#define DCS_CHANNEL_NUMBER_POS 0x6
847#define MCS_COMMANDS_POS 0x8
848#define WORD_COUNTS_POS 0x8
849#define MCS_PARAMETER_POS 0x10
850#define GEN_FIFO_STAT_REG 0xb074
851#define HS_DATA_FIFO_FULL (1 << 0)
852#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
853#define HS_DATA_FIFO_EMPTY (1 << 2)
854#define LP_DATA_FIFO_FULL (1 << 8)
855#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
856#define LP_DATA_FIFO_EMPTY (1 << 10)
857#define HS_CTRL_FIFO_FULL (1 << 16)
858#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
859#define HS_CTRL_FIFO_EMPTY (1 << 18)
860#define LP_CTRL_FIFO_FULL (1 << 24)
861#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
862#define LP_CTRL_FIFO_EMPTY (1 << 26)
863#define DBI_FIFO_EMPTY (1 << 27)
864#define DPI_FIFO_EMPTY (1 << 28)
865#define HS_LS_DBI_ENABLE_REG 0xb078
866#define TXCLKESC_REG 0xb07c
867#define DPHY_PARAM_REG 0xb080
868#define DBI_BW_CTRL_REG 0xb084
869#define CLK_LANE_SWT_REG 0xb088
870
871/*
872 * MIPI Adapter registers
873 */
874#define MIPI_CONTROL_REG 0xb104
875#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
876#define MIPI_DATA_ADDRESS_REG 0xb108
877#define MIPI_DATA_LENGTH_REG 0xb10C
878#define MIPI_COMMAND_ADDRESS_REG 0xb110
879#define MIPI_COMMAND_LENGTH_REG 0xb114
880#define MIPI_READ_DATA_RETURN_REG0 0xb118
881#define MIPI_READ_DATA_RETURN_REG1 0xb11C
882#define MIPI_READ_DATA_RETURN_REG2 0xb120
883#define MIPI_READ_DATA_RETURN_REG3 0xb124
884#define MIPI_READ_DATA_RETURN_REG4 0xb128
885#define MIPI_READ_DATA_RETURN_REG5 0xb12C
886#define MIPI_READ_DATA_RETURN_REG6 0xb130
887#define MIPI_READ_DATA_RETURN_REG7 0xb134
888#define MIPI_READ_DATA_VALID_REG 0xb138
889
890/* DBI COMMANDS */
891#define soft_reset 0x01
892/*
893 * The display module performs a software reset.
894 * Registers are written with their SW Reset default values.
895 */
896#define get_power_mode 0x0a
897/*
898 * The display module returns the current power mode
899 */
900#define get_address_mode 0x0b
901/*
902 * The display module returns the current status.
903 */
904#define get_pixel_format 0x0c
905/*
906 * This command gets the pixel format for the RGB image data
907 * used by the interface.
908 */
909#define get_display_mode 0x0d
910/*
911 * The display module returns the Display Image Mode status.
912 */
913#define get_signal_mode 0x0e
914/*
915 * The display module returns the Display Signal Mode.
916 */
917#define get_diagnostic_result 0x0f
918/*
919 * The display module returns the self-diagnostic results following
920 * a Sleep Out command.
921 */
922#define enter_sleep_mode 0x10
923/*
924 * This command causes the display module to enter the Sleep mode.
925 * In this mode, all unnecessary blocks inside the display module are
926 * disabled except interface communication. This is the lowest power
927 * mode the display module supports.
928 */
929#define exit_sleep_mode 0x11
930/*
931 * This command causes the display module to exit Sleep mode.
932 * All blocks inside the display module are enabled.
933 */
934#define enter_partial_mode 0x12
935/*
936 * This command causes the display module to enter the Partial Display
937 * Mode. The Partial Display Mode window is described by the
938 * set_partial_area command.
939 */
940#define enter_normal_mode 0x13
941/*
942 * This command causes the display module to enter the Normal mode.
943 * Normal Mode is defined as Partial Display mode and Scroll mode are off
944 */
945#define exit_invert_mode 0x20
946/*
947 * This command causes the display module to stop inverting the image
948 * data on the display device. The frame memory contents remain unchanged.
949 * No status bits are changed.
950 */
951#define enter_invert_mode 0x21
952/*
953 * This command causes the display module to invert the image data only on
954 * the display device. The frame memory contents remain unchanged.
955 * No status bits are changed.
956 */
957#define set_gamma_curve 0x26
958/*
959 * This command selects the desired gamma curve for the display device.
960 * Four fixed gamma curves are defined in section DCS spec.
961 */
962#define set_display_off 0x28
963/* ************************************************************************* *\
964This command causes the display module to stop displaying the image data
965on the display device. The frame memory contents remain unchanged.
966No status bits are changed.
967\* ************************************************************************* */
968#define set_display_on 0x29
969/* ************************************************************************* *\
970This command causes the display module to start displaying the image data
971on the display device. The frame memory contents remain unchanged.
972No status bits are changed.
973\* ************************************************************************* */
974#define set_column_address 0x2a
975/*
976 * This command defines the column extent of the frame memory accessed by
977 * the hostprocessor with the read_memory_continue and
978 * write_memory_continue commands.
979 * No status bits are changed.
980 */
981#define set_page_addr 0x2b
982/*
983 * This command defines the page extent of the frame memory accessed by
984 * the host processor with the write_memory_continue and
985 * read_memory_continue command.
986 * No status bits are changed.
987 */
988#define write_mem_start 0x2c
989/*
990 * This command transfers image data from the host processor to the
991 * display modules frame memory starting at the pixel location specified
992 * by preceding set_column_address and set_page_address commands.
993 */
994#define set_partial_area 0x30
995/*
996 * This command defines the Partial Display mode s display area.
997 * There are two parameters associated with this command, the first
998 * defines the Start Row (SR) and the second the End Row (ER). SR and ER
999 * refer to the Frame Memory Line Pointer.
1000 */
1001#define set_scroll_area 0x33
1002/*
1003 * This command defines the display modules Vertical Scrolling Area.
1004 */
1005#define set_tear_off 0x34
1006/*
1007 * This command turns off the display modules Tearing Effect output
1008 * signal on the TE signal line.
1009 */
1010#define set_tear_on 0x35
1011/*
1012 * This command turns on the display modules Tearing Effect output signal
1013 * on the TE signal line.
1014 */
1015#define set_address_mode 0x36
1016/*
1017 * This command sets the data order for transfers from the host processor
1018 * to display modules frame memory,bits B[7:5] and B3, and from the
1019 * display modules frame memory to the display device, bits B[2:0] and B4.
1020 */
1021#define set_scroll_start 0x37
1022/*
1023 * This command sets the start of the vertical scrolling area in the frame
1024 * memory. The vertical scrolling area is fully defined when this command
1025 * is used with the set_scroll_area command The set_scroll_start command
1026 * has one parameter, the Vertical Scroll Pointer. The VSP defines the
1027 * line in the frame memory that is written to the display device as the
1028 * first line of the vertical scroll area.
1029 */
1030#define exit_idle_mode 0x38
1031/*
1032 * This command causes the display module to exit Idle mode.
1033 */
1034#define enter_idle_mode 0x39
1035/*
1036 * This command causes the display module to enter Idle Mode.
1037 * In Idle Mode, color expression is reduced. Colors are shown on the
1038 * display device using the MSB of each of the R, G and B color
1039 * components in the frame memory
1040 */
1041#define set_pixel_format 0x3a
1042/*
1043 * This command sets the pixel format for the RGB image data used by the
1044 * interface.
1045 * Bits D[6:4] DPI Pixel Format Definition
1046 * Bits D[2:0] DBI Pixel Format Definition
1047 * Bits D7 and D3 are not used.
1048 */
1049#define DCS_PIXEL_FORMAT_3bpp 0x1
1050#define DCS_PIXEL_FORMAT_8bpp 0x2
1051#define DCS_PIXEL_FORMAT_12bpp 0x3
1052#define DCS_PIXEL_FORMAT_16bpp 0x5
1053#define DCS_PIXEL_FORMAT_18bpp 0x6
1054#define DCS_PIXEL_FORMAT_24bpp 0x7
1055
1056#define write_mem_cont 0x3c
1057
1058/*
1059 * This command transfers image data from the host processor to the
1060 * display module's frame memory continuing from the pixel location
1061 * following the previous write_memory_continue or write_memory_start
1062 * command.
1063 */
1064#define set_tear_scanline 0x44
1065/*
1066 * This command turns on the display modules Tearing Effect output signal
1067 * on the TE signal line when the display module reaches line N.
1068 */
1069#define get_scanline 0x45
1070/*
1071 * The display module returns the current scanline, N, used to update the
1072 * display device. The total number of scanlines on a display device is
1073 * defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
1074 * the first line of V Sync and is denoted as Line 0.
1075 * When in Sleep Mode, the value returned by get_scanline is undefined.
1076 */
1077
1078/* MCS or Generic COMMANDS */
1079/* MCS/generic data type */
1080#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
1081#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
1082#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
1083#define GEN_READ_0 0x04 /* generic read, no parameters */
1084#define GEN_READ_1 0x14 /* generic read, 1 parameters */
1085#define GEN_READ_2 0x24 /* generic read, 2 parameters */
1086#define GEN_LONG_WRITE 0x29 /* generic long write */
1087#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
1088#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
1089#define MCS_READ 0x06 /* MCS read, no parameters */
1090#define MCS_LONG_WRITE 0x39 /* MCS long write */
1091/* MCS/generic commands */
1092/* TPO MCS */
1093#define write_display_profile 0x50
1094#define write_display_brightness 0x51
1095#define write_ctrl_display 0x53
1096#define write_ctrl_cabc 0x55
1097 #define UI_IMAGE 0x01
1098 #define STILL_IMAGE 0x02
1099 #define MOVING_IMAGE 0x03
1100#define write_hysteresis 0x57
1101#define write_gamma_setting 0x58
1102#define write_cabc_min_bright 0x5e
1103#define write_kbbc_profile 0x60
1104/* TMD MCS */
1105#define tmd_write_display_brightness 0x8c
1106
1107/*
1108 * This command is used to control ambient light, panel backlight
1109 * brightness and gamma settings.
1110 */
1111#define BRIGHT_CNTL_BLOCK_ON (1 << 5)
1112#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
1113#define DISPLAY_DIMMING_ON (1 << 3)
1114#define BACKLIGHT_ON (1 << 2)
1115#define DISPLAY_BRIGHTNESS_AUTO (1 << 1)
1116#define GAMMA_AUTO (1 << 0)
1117
1118/* DCS Interface Pixel Formats */
1119#define DCS_PIXEL_FORMAT_3BPP 0x1
1120#define DCS_PIXEL_FORMAT_8BPP 0x2
1121#define DCS_PIXEL_FORMAT_12BPP 0x3
1122#define DCS_PIXEL_FORMAT_16BPP 0x5
1123#define DCS_PIXEL_FORMAT_18BPP 0x6
1124#define DCS_PIXEL_FORMAT_24BPP 0x7
1125/* ONE PARAMETER READ DATA */
1126#define addr_mode_data 0xfc
1127#define diag_res_data 0x00
1128#define disp_mode_data 0x23
1129#define pxl_fmt_data 0x77
1130#define pwr_mode_data 0x74
1131#define sig_mode_data 0x00
1132/* TWO PARAMETERS READ DATA */
1133#define scanline_data1 0xff
1134#define scanline_data2 0xff
1135#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
1136 * with Sync Pulse
1137 */
1138#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
1139 * with Sync events
1140 */
1141#define BURST_MODE 0x03 /* Burst Mode */
1142#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
1143 /* Allocate at least
1144 * 0x100 Byte with 32
1145 * byte alignment
1146 */
1147#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
1148 * 0x100 Byte with 32
1149 * byte alignment
1150 */
1151#define DBI_CB_TIME_OUT 0xFFFF
1152
1153#define GEN_FB_TIME_OUT 2000
1154
1155#define SKU_83 0x01
1156#define SKU_100 0x02
1157#define SKU_100L 0x04
1158#define SKU_BYPASS 0x08
1159
1160/* Some handy macros for playing with bitfields. */
1161#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
1162#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
1163#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1164
1165#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
1166
1167/* PCI config space */
1168
1169#define SB_PCKT 0x02100 /* cedarview */
1170# define SB_OPCODE_MASK PSB_MASK(31, 16)
1171# define SB_OPCODE_SHIFT 16
1172# define SB_OPCODE_READ 0
1173# define SB_OPCODE_WRITE 1
1174# define SB_DEST_MASK PSB_MASK(15, 8)
1175# define SB_DEST_SHIFT 8
1176# define SB_DEST_DPLL 0x88
1177# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
1178# define SB_BYTE_ENABLE_SHIFT 4
1179# define SB_BUSY (1 << 0)
1180
1181
1182/* 32-bit value read/written from the DPIO reg. */
1183#define SB_DATA 0x02104 /* cedarview */
1184/* 32-bit address of the DPIO reg to be read/written. */
1185#define SB_ADDR 0x02108 /* cedarview */
1186#define DPIO_CFG 0x02110 /* cedarview */
1187# define DPIO_MODE_SELECT_1 (1 << 3)
1188# define DPIO_MODE_SELECT_0 (1 << 2)
1189# define DPIO_SFR_BYPASS (1 << 1)
1190/* reset is active low */
1191# define DPIO_CMN_RESET_N (1 << 0)
1192
1193/* Cedarview sideband registers */
1194#define _SB_M_A 0x8008
1195#define _SB_M_B 0x8028
1196#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
1197# define SB_M_DIVIDER_MASK (0xFF << 24)
1198# define SB_M_DIVIDER_SHIFT 24
1199
1200#define _SB_N_VCO_A 0x8014
1201#define _SB_N_VCO_B 0x8034
1202#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
1203#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
1204#define SB_N_VCO_SEL_SHIFT 30
1205#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
1206#define SB_N_DIVIDER_SHIFT 26
1207#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
1208#define SB_N_CB_TUNE_SHIFT 24
1209
1210#define _SB_REF_A 0x8018
1211#define _SB_REF_B 0x8038
1212#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
1213
1214#define _SB_P_A 0x801c
1215#define _SB_P_B 0x803c
1216#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
1217#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
1218#define SB_P2_DIVIDER_SHIFT 30
1219#define SB_P2_10 0 /* HDMI, DP, DAC */
1220#define SB_P2_5 1 /* DAC */
1221#define SB_P2_14 2 /* LVDS single */
1222#define SB_P2_7 3 /* LVDS double */
1223#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
1224#define SB_P1_DIVIDER_SHIFT 12
1225
1226#define PSB_LANE0 0x120
1227#define PSB_LANE1 0x220
1228#define PSB_LANE2 0x2320
1229#define PSB_LANE3 0x2420
1230
1231#define LANE_PLL_MASK (0x7 << 20)
1232#define LANE_PLL_ENABLE (0x3 << 20)
1233
1234
1235#endif
diff --git a/drivers/staging/gma500/psb_intel_sdvo.c b/drivers/staging/gma500/psb_intel_sdvo.c
deleted file mode 100644
index a4bad1af4b7c..000000000000
--- a/drivers/staging/gma500/psb_intel_sdvo.c
+++ /dev/null
@@ -1,1293 +0,0 @@
1/*
2 * Copyright (c) 2006-2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/delay.h>
23/* #include <drm/drm_crtc.h> */
24#include <drm/drmP.h>
25#include "psb_drv.h"
26#include "psb_intel_drv.h"
27#include "psb_intel_reg.h"
28#include "psb_intel_sdvo_regs.h"
29
30struct psb_intel_sdvo_priv {
31 struct psb_intel_i2c_chan *i2c_bus;
32 int slaveaddr;
33 int output_device;
34
35 u16 active_outputs;
36
37 struct psb_intel_sdvo_caps caps;
38 int pixel_clock_min, pixel_clock_max;
39
40 int save_sdvo_mult;
41 u16 save_active_outputs;
42 struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
43 struct psb_intel_sdvo_dtd save_output_dtd[16];
44 u32 save_SDVOX;
45 u8 in_out_map[4];
46
47 u8 by_input_wiring;
48 u32 active_device;
49};
50
51/**
52 * Writes the SDVOB or SDVOC with the given value, but always writes both
53 * SDVOB and SDVOC to work around apparent hardware issues (according to
54 * comments in the BIOS).
55 */
56void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
57 u32 val)
58{
59 struct drm_device *dev = psb_intel_output->base.dev;
60 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
61 u32 bval = val, cval = val;
62 int i;
63
64 if (sdvo_priv->output_device == SDVOB)
65 cval = REG_READ(SDVOC);
66 else
67 bval = REG_READ(SDVOB);
68 /*
69 * Write the registers twice for luck. Sometimes,
70 * writing them only once doesn't appear to 'stick'.
71 * The BIOS does this too. Yay, magic
72 */
73 for (i = 0; i < 2; i++) {
74 REG_WRITE(SDVOB, bval);
75 REG_READ(SDVOB);
76 REG_WRITE(SDVOC, cval);
77 REG_READ(SDVOC);
78 }
79}
80
81static bool psb_intel_sdvo_read_byte(
82 struct psb_intel_output *psb_intel_output,
83 u8 addr, u8 *ch)
84{
85 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
86 u8 out_buf[2];
87 u8 buf[2];
88 int ret;
89
90 struct i2c_msg msgs[] = {
91 {
92 .addr = sdvo_priv->i2c_bus->slave_addr,
93 .flags = 0,
94 .len = 1,
95 .buf = out_buf,
96 },
97 {
98 .addr = sdvo_priv->i2c_bus->slave_addr,
99 .flags = I2C_M_RD,
100 .len = 1,
101 .buf = buf,
102 }
103 };
104
105 out_buf[0] = addr;
106 out_buf[1] = 0;
107
108 ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
109 if (ret == 2) {
110 *ch = buf[0];
111 return true;
112 }
113
114 return false;
115}
116
117static bool psb_intel_sdvo_write_byte(
118 struct psb_intel_output *psb_intel_output,
119 int addr, u8 ch)
120{
121 u8 out_buf[2];
122 struct i2c_msg msgs[] = {
123 {
124 .addr = psb_intel_output->i2c_bus->slave_addr,
125 .flags = 0,
126 .len = 2,
127 .buf = out_buf,
128 }
129 };
130
131 out_buf[0] = addr;
132 out_buf[1] = ch;
133
134 if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
135 return true;
136 return false;
137}
138
139#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
140/** Mapping of command numbers to names, for debug output */
141static const struct _sdvo_cmd_name {
142 u8 cmd;
143 char *name;
144} sdvo_cmd_names[] = {
145SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
146 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
147 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
148 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
149 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
150 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
151 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
152 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
153 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
154 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
155 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
156 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
157 SDVO_CMD_NAME_ENTRY
158 (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
159 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
160 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
161 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
162 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
163 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
164 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
165 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
166 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
167 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
168 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
169 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
170 SDVO_CMD_NAME_ENTRY
171 (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
172 SDVO_CMD_NAME_ENTRY
173 (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
174 SDVO_CMD_NAME_ENTRY
175 (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
176 SDVO_CMD_NAME_ENTRY
177 (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
178 SDVO_CMD_NAME_ENTRY
179 (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
180 SDVO_CMD_NAME_ENTRY
181 (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
182 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
183 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
184 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
185 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
186 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
187 SDVO_CMD_NAME_ENTRY
188 (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
189 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
190
191#define SDVO_NAME(dev_priv) \
192 ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
193#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
194
195static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
196 u8 cmd,
197 void *args,
198 int args_len)
199{
200 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
201 int i;
202
203 if (0) {
204 printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
205 for (i = 0; i < args_len; i++)
206 printk(KERN_CONT "%02X ", ((u8 *) args)[i]);
207 for (; i < 8; i++)
208 printk(KERN_CONT " ");
209 for (i = 0;
210 i <
211 sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
212 i++) {
213 if (cmd == sdvo_cmd_names[i].cmd) {
214 printk(KERN_CONT
215 "(%s)", sdvo_cmd_names[i].name);
216 break;
217 }
218 }
219 if (i ==
220 sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
221 printk(KERN_CONT "(%02X)", cmd);
222 printk(KERN_CONT "\n");
223 }
224
225 for (i = 0; i < args_len; i++) {
226 psb_intel_sdvo_write_byte(psb_intel_output,
227 SDVO_I2C_ARG_0 - i,
228 ((u8 *) args)[i]);
229 }
230
231 psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
232}
233
234static const char *const cmd_status_names[] = {
235 "Power on",
236 "Success",
237 "Not supported",
238 "Invalid arg",
239 "Pending",
240 "Target not specified",
241 "Scaling not supported"
242};
243
244static u8 psb_intel_sdvo_read_response(
245 struct psb_intel_output *psb_intel_output,
246 void *response, int response_len)
247{
248 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
249 int i;
250 u8 status;
251 u8 retry = 50;
252
253 while (retry--) {
254 /* Read the command response */
255 for (i = 0; i < response_len; i++) {
256 psb_intel_sdvo_read_byte(psb_intel_output,
257 SDVO_I2C_RETURN_0 + i,
258 &((u8 *) response)[i]);
259 }
260
261 /* read the return status */
262 psb_intel_sdvo_read_byte(psb_intel_output,
263 SDVO_I2C_CMD_STATUS,
264 &status);
265
266 if (0) {
267 pr_debug("%s: R: ", SDVO_NAME(sdvo_priv));
268 for (i = 0; i < response_len; i++)
269 printk(KERN_CONT "%02X ", ((u8 *) response)[i]);
270 for (; i < 8; i++)
271 printk(" ");
272 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
273 printk(KERN_CONT "(%s)",
274 cmd_status_names[status]);
275 else
276 printk(KERN_CONT "(??? %d)", status);
277 printk(KERN_CONT "\n");
278 }
279
280 if (status != SDVO_CMD_STATUS_PENDING)
281 return status;
282
283 mdelay(50);
284 }
285
286 return status;
287}
288
289int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
290{
291 if (mode->clock >= 100000)
292 return 1;
293 else if (mode->clock >= 50000)
294 return 2;
295 else
296 return 4;
297}
298
299/**
300 * Don't check status code from this as it switches the bus back to the
301 * SDVO chips which defeats the purpose of doing a bus switch in the first
302 * place.
303 */
304void psb_intel_sdvo_set_control_bus_switch(
305 struct psb_intel_output *psb_intel_output,
306 u8 target)
307{
308 psb_intel_sdvo_write_cmd(psb_intel_output,
309 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
310 &target,
311 1);
312}
313
314static bool psb_intel_sdvo_set_target_input(
315 struct psb_intel_output *psb_intel_output,
316 bool target_0, bool target_1)
317{
318 struct psb_intel_sdvo_set_target_input_args targets = { 0 };
319 u8 status;
320
321 if (target_0 && target_1)
322 return SDVO_CMD_STATUS_NOTSUPP;
323
324 if (target_1)
325 targets.target_1 = 1;
326
327 psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
328 &targets, sizeof(targets));
329
330 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
331
332 return status == SDVO_CMD_STATUS_SUCCESS;
333}
334
335/**
336 * Return whether each input is trained.
337 *
338 * This function is making an assumption about the layout of the response,
339 * which should be checked against the docs.
340 */
341static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
342 *psb_intel_output, bool *input_1,
343 bool *input_2)
344{
345 struct psb_intel_sdvo_get_trained_inputs_response response;
346 u8 status;
347
348 psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
349 NULL, 0);
350 status =
351 psb_intel_sdvo_read_response(psb_intel_output, &response,
352 sizeof(response));
353 if (status != SDVO_CMD_STATUS_SUCCESS)
354 return false;
355
356 *input_1 = response.input0_trained;
357 *input_2 = response.input1_trained;
358 return true;
359}
360
361static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
362 *psb_intel_output, u16 *outputs)
363{
364 u8 status;
365
366 psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
367 NULL, 0);
368 status =
369 psb_intel_sdvo_read_response(psb_intel_output, outputs,
370 sizeof(*outputs));
371
372 return status == SDVO_CMD_STATUS_SUCCESS;
373}
374
375static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
376 *psb_intel_output, u16 outputs)
377{
378 u8 status;
379
380 psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
381 &outputs, sizeof(outputs));
382 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
383 return status == SDVO_CMD_STATUS_SUCCESS;
384}
385
386static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
387 *psb_intel_output, int mode)
388{
389 u8 status, state = SDVO_ENCODER_STATE_ON;
390
391 switch (mode) {
392 case DRM_MODE_DPMS_ON:
393 state = SDVO_ENCODER_STATE_ON;
394 break;
395 case DRM_MODE_DPMS_STANDBY:
396 state = SDVO_ENCODER_STATE_STANDBY;
397 break;
398 case DRM_MODE_DPMS_SUSPEND:
399 state = SDVO_ENCODER_STATE_SUSPEND;
400 break;
401 case DRM_MODE_DPMS_OFF:
402 state = SDVO_ENCODER_STATE_OFF;
403 break;
404 }
405
406 psb_intel_sdvo_write_cmd(psb_intel_output,
407 SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
408 sizeof(state));
409 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
410
411 return status == SDVO_CMD_STATUS_SUCCESS;
412}
413
414static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
415 *psb_intel_output,
416 int *clock_min,
417 int *clock_max)
418{
419 struct psb_intel_sdvo_pixel_clock_range clocks;
420 u8 status;
421
422 psb_intel_sdvo_write_cmd(psb_intel_output,
423 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
424 0);
425
426 status =
427 psb_intel_sdvo_read_response(psb_intel_output, &clocks,
428 sizeof(clocks));
429
430 if (status != SDVO_CMD_STATUS_SUCCESS)
431 return false;
432
433 /* Convert the values from units of 10 kHz to kHz. */
434 *clock_min = clocks.min * 10;
435 *clock_max = clocks.max * 10;
436
437 return true;
438}
439
440static bool psb_intel_sdvo_set_target_output(
441 struct psb_intel_output *psb_intel_output,
442 u16 outputs)
443{
444 u8 status;
445
446 psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
447 &outputs, sizeof(outputs));
448
449 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
450 return status == SDVO_CMD_STATUS_SUCCESS;
451}
452
453static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
454 u8 cmd, struct psb_intel_sdvo_dtd *dtd)
455{
456 u8 status;
457
458 psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
459 status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
460 sizeof(dtd->part1));
461 if (status != SDVO_CMD_STATUS_SUCCESS)
462 return false;
463
464 psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
465 status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
466 sizeof(dtd->part2));
467 if (status != SDVO_CMD_STATUS_SUCCESS)
468 return false;
469
470 return true;
471}
472
473static bool psb_intel_sdvo_get_input_timing(
474 struct psb_intel_output *psb_intel_output,
475 struct psb_intel_sdvo_dtd *dtd)
476{
477 return psb_intel_sdvo_get_timing(psb_intel_output,
478 SDVO_CMD_GET_INPUT_TIMINGS_PART1,
479 dtd);
480}
481
482static bool psb_intel_sdvo_set_timing(
483 struct psb_intel_output *psb_intel_output,
484 u8 cmd,
485 struct psb_intel_sdvo_dtd *dtd)
486{
487 u8 status;
488
489 psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
490 sizeof(dtd->part1));
491 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
492 if (status != SDVO_CMD_STATUS_SUCCESS)
493 return false;
494
495 psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
496 sizeof(dtd->part2));
497 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
498 if (status != SDVO_CMD_STATUS_SUCCESS)
499 return false;
500
501 return true;
502}
503
504static bool psb_intel_sdvo_set_input_timing(
505 struct psb_intel_output *psb_intel_output,
506 struct psb_intel_sdvo_dtd *dtd)
507{
508 return psb_intel_sdvo_set_timing(psb_intel_output,
509 SDVO_CMD_SET_INPUT_TIMINGS_PART1,
510 dtd);
511}
512
513static bool psb_intel_sdvo_set_output_timing(
514 struct psb_intel_output *psb_intel_output,
515 struct psb_intel_sdvo_dtd *dtd)
516{
517 return psb_intel_sdvo_set_timing(psb_intel_output,
518 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
519 dtd);
520}
521
522static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
523 *psb_intel_output)
524{
525 u8 response, status;
526
527 psb_intel_sdvo_write_cmd(psb_intel_output,
528 SDVO_CMD_GET_CLOCK_RATE_MULT,
529 NULL,
530 0);
531
532 status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
533
534 if (status != SDVO_CMD_STATUS_SUCCESS) {
535 DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
536 return SDVO_CLOCK_RATE_MULT_1X;
537 } else {
538 DRM_DEBUG("Current clock rate multiplier: %d\n", response);
539 }
540
541 return response;
542}
543
544static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
545 *psb_intel_output, u8 val)
546{
547 u8 status;
548
549 psb_intel_sdvo_write_cmd(psb_intel_output,
550 SDVO_CMD_SET_CLOCK_RATE_MULT,
551 &val,
552 1);
553
554 status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
555 if (status != SDVO_CMD_STATUS_SUCCESS)
556 return false;
557
558 return true;
559}
560
561static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
562 u32 in0outputmask,
563 u32 in1outputmask)
564{
565 u8 byArgs[4];
566 u8 status;
567 int i;
568 struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
569
570 /* Make all fields of the args/ret to zero */
571 memset(byArgs, 0, sizeof(byArgs));
572
573 /* Fill up the argument values; */
574 byArgs[0] = (u8) (in0outputmask & 0xFF);
575 byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
576 byArgs[2] = (u8) (in1outputmask & 0xFF);
577 byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
578
579
580 /*save inoutmap arg here*/
581 for (i = 0; i < 4; i++)
582 sdvo_priv->in_out_map[i] = byArgs[0];
583
584 psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
585 status = psb_intel_sdvo_read_response(output, NULL, 0);
586
587 if (status != SDVO_CMD_STATUS_SUCCESS)
588 return false;
589 return true;
590}
591
592
593static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
594{
595 u32 dwCurrentSDVOIn0 = 0;
596 u32 dwCurrentSDVOIn1 = 0;
597 u32 dwDevMask = 0;
598
599
600 struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
601
602 /* Please DO NOT change the following code. */
603 /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
604 /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
605 if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
606 switch (sdvo_priv->active_device) {
607 case SDVO_DEVICE_LVDS:
608 dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
609 break;
610 case SDVO_DEVICE_TMDS:
611 dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
612 break;
613 case SDVO_DEVICE_TV:
614 dwDevMask =
615 SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
616 SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
617 SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
618 SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
619 break;
620 case SDVO_DEVICE_CRT:
621 dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
622 break;
623 }
624 dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
625 } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
626 switch (sdvo_priv->active_device) {
627 case SDVO_DEVICE_LVDS:
628 dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
629 break;
630 case SDVO_DEVICE_TMDS:
631 dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
632 break;
633 case SDVO_DEVICE_TV:
634 dwDevMask =
635 SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
636 SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
637 SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
638 SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
639 break;
640 case SDVO_DEVICE_CRT:
641 dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
642 break;
643 }
644 dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
645 }
646
647 psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
648 dwCurrentSDVOIn1);
649}
650
651
652static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
653 struct drm_display_mode *mode,
654 struct drm_display_mode *adjusted_mode)
655{
656 /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
657 * device will be told of the multiplier during mode_set.
658 */
659 adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
660 return true;
661}
662
663static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
664 struct drm_display_mode *mode,
665 struct drm_display_mode *adjusted_mode)
666{
667 struct drm_device *dev = encoder->dev;
668 struct drm_crtc *crtc = encoder->crtc;
669 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
670 struct psb_intel_output *psb_intel_output =
671 enc_to_psb_intel_output(encoder);
672 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
673 u16 width, height;
674 u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
675 u16 h_sync_offset, v_sync_offset;
676 u32 sdvox;
677 struct psb_intel_sdvo_dtd output_dtd;
678 int sdvo_pixel_multiply;
679
680 if (!mode)
681 return;
682
683 psb_intel_sdvo_set_target_output(psb_intel_output, 0);
684
685 width = mode->crtc_hdisplay;
686 height = mode->crtc_vdisplay;
687
688 /* do some mode translations */
689 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
690 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
691
692 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
693 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
694
695 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
696 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
697
698 output_dtd.part1.clock = mode->clock / 10;
699 output_dtd.part1.h_active = width & 0xff;
700 output_dtd.part1.h_blank = h_blank_len & 0xff;
701 output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
702 ((h_blank_len >> 8) & 0xf);
703 output_dtd.part1.v_active = height & 0xff;
704 output_dtd.part1.v_blank = v_blank_len & 0xff;
705 output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
706 ((v_blank_len >> 8) & 0xf);
707
708 output_dtd.part2.h_sync_off = h_sync_offset;
709 output_dtd.part2.h_sync_width = h_sync_len & 0xff;
710 output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
711 (v_sync_len & 0xf);
712 output_dtd.part2.sync_off_width_high =
713 ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
714 ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
715
716 output_dtd.part2.dtd_flags = 0x18;
717 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
718 output_dtd.part2.dtd_flags |= 0x2;
719 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
720 output_dtd.part2.dtd_flags |= 0x4;
721
722 output_dtd.part2.sdvo_flags = 0;
723 output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
724 output_dtd.part2.reserved = 0;
725
726 /* Set the output timing to the screen */
727 psb_intel_sdvo_set_target_output(psb_intel_output,
728 sdvo_priv->active_outputs);
729
730 /* Set the input timing to the screen. Assume always input 0. */
731 psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
732
733 psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
734
735 /* We would like to use i830_sdvo_create_preferred_input_timing() to
736 * provide the device with a timing it can support, if it supports that
737 * feature. However, presumably we would need to adjust the CRTC to
738 * output the preferred timing, and we don't support that currently.
739 */
740 psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
741
742 switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
743 case 1:
744 psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
745 SDVO_CLOCK_RATE_MULT_1X);
746 break;
747 case 2:
748 psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
749 SDVO_CLOCK_RATE_MULT_2X);
750 break;
751 case 4:
752 psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
753 SDVO_CLOCK_RATE_MULT_4X);
754 break;
755 }
756
757 /* Set the SDVO control regs. */
758 sdvox = REG_READ(sdvo_priv->output_device);
759 switch (sdvo_priv->output_device) {
760 case SDVOB:
761 sdvox &= SDVOB_PRESERVE_MASK;
762 break;
763 case SDVOC:
764 sdvox &= SDVOC_PRESERVE_MASK;
765 break;
766 }
767 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
768 if (psb_intel_crtc->pipe == 1)
769 sdvox |= SDVO_PIPE_B_SELECT;
770
771 sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
772
773 psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
774
775 psb_intel_sdvo_set_iomap(psb_intel_output);
776}
777
778static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
779{
780 struct drm_device *dev = encoder->dev;
781 struct psb_intel_output *psb_intel_output =
782 enc_to_psb_intel_output(encoder);
783 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
784 u32 temp;
785
786 if (mode != DRM_MODE_DPMS_ON) {
787 psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
788 if (0)
789 psb_intel_sdvo_set_encoder_power_state(
790 psb_intel_output,
791 mode);
792
793 if (mode == DRM_MODE_DPMS_OFF) {
794 temp = REG_READ(sdvo_priv->output_device);
795 if ((temp & SDVO_ENABLE) != 0) {
796 psb_intel_sdvo_write_sdvox(psb_intel_output,
797 temp &
798 ~SDVO_ENABLE);
799 }
800 }
801 } else {
802 bool input1, input2;
803 int i;
804 u8 status;
805
806 temp = REG_READ(sdvo_priv->output_device);
807 if ((temp & SDVO_ENABLE) == 0)
808 psb_intel_sdvo_write_sdvox(psb_intel_output,
809 temp | SDVO_ENABLE);
810 for (i = 0; i < 2; i++)
811 psb_intel_wait_for_vblank(dev);
812
813 status =
814 psb_intel_sdvo_get_trained_inputs(psb_intel_output,
815 &input1,
816 &input2);
817
818
819 /* Warn if the device reported failure to sync.
820 * A lot of SDVO devices fail to notify of sync, but it's
821 * a given it the status is a success, we succeeded.
822 */
823 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
824 DRM_DEBUG
825 ("First %s output reported failure to sync\n",
826 SDVO_NAME(sdvo_priv));
827 }
828
829 if (0)
830 psb_intel_sdvo_set_encoder_power_state(
831 psb_intel_output,
832 mode);
833 psb_intel_sdvo_set_active_outputs(psb_intel_output,
834 sdvo_priv->active_outputs);
835 }
836 return;
837}
838
839static void psb_intel_sdvo_save(struct drm_connector *connector)
840{
841 struct drm_device *dev = connector->dev;
842 struct psb_intel_output *psb_intel_output =
843 to_psb_intel_output(connector);
844 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
845 /*int o;*/
846
847 sdvo_priv->save_sdvo_mult =
848 psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
849 psb_intel_sdvo_get_active_outputs(psb_intel_output,
850 &sdvo_priv->save_active_outputs);
851
852 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
853 psb_intel_sdvo_set_target_input(psb_intel_output,
854 true,
855 false);
856 psb_intel_sdvo_get_input_timing(psb_intel_output,
857 &sdvo_priv->save_input_dtd_1);
858 }
859
860 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
861 psb_intel_sdvo_set_target_input(psb_intel_output,
862 false,
863 true);
864 psb_intel_sdvo_get_input_timing(psb_intel_output,
865 &sdvo_priv->save_input_dtd_2);
866 }
867 sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
868
869 /*TODO: save the in_out_map state*/
870}
871
872static void psb_intel_sdvo_restore(struct drm_connector *connector)
873{
874 struct drm_device *dev = connector->dev;
875 struct psb_intel_output *psb_intel_output =
876 to_psb_intel_output(connector);
877 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
878 /*int o;*/
879 int i;
880 bool input1, input2;
881 u8 status;
882
883 psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
884
885 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
886 psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
887 psb_intel_sdvo_set_input_timing(psb_intel_output,
888 &sdvo_priv->save_input_dtd_1);
889 }
890
891 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
892 psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
893 psb_intel_sdvo_set_input_timing(psb_intel_output,
894 &sdvo_priv->save_input_dtd_2);
895 }
896
897 psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
898 sdvo_priv->save_sdvo_mult);
899
900 REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
901
902 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
903 for (i = 0; i < 2; i++)
904 psb_intel_wait_for_vblank(dev);
905 status =
906 psb_intel_sdvo_get_trained_inputs(psb_intel_output,
907 &input1,
908 &input2);
909 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
910 DRM_DEBUG
911 ("First %s output reported failure to sync\n",
912 SDVO_NAME(sdvo_priv));
913 }
914
915 psb_intel_sdvo_set_active_outputs(psb_intel_output,
916 sdvo_priv->save_active_outputs);
917
918 /*TODO: restore in_out_map*/
919 psb_intel_sdvo_write_cmd(psb_intel_output,
920 SDVO_CMD_SET_IN_OUT_MAP,
921 sdvo_priv->in_out_map,
922 4);
923
924 psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
925}
926
927static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
928 struct drm_display_mode *mode)
929{
930 struct psb_intel_output *psb_intel_output =
931 to_psb_intel_output(connector);
932 struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
933
934 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
935 return MODE_NO_DBLESCAN;
936
937 if (sdvo_priv->pixel_clock_min > mode->clock)
938 return MODE_CLOCK_LOW;
939
940 if (sdvo_priv->pixel_clock_max < mode->clock)
941 return MODE_CLOCK_HIGH;
942
943 return MODE_OK;
944}
945
946static bool psb_intel_sdvo_get_capabilities(
947 struct psb_intel_output *psb_intel_output,
948 struct psb_intel_sdvo_caps *caps)
949{
950 u8 status;
951
952 psb_intel_sdvo_write_cmd(psb_intel_output,
953 SDVO_CMD_GET_DEVICE_CAPS,
954 NULL,
955 0);
956 status = psb_intel_sdvo_read_response(psb_intel_output,
957 caps,
958 sizeof(*caps));
959 if (status != SDVO_CMD_STATUS_SUCCESS)
960 return false;
961
962 return true;
963}
964
965struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
966{
967 struct drm_connector *connector = NULL;
968 struct psb_intel_output *iout = NULL;
969 struct psb_intel_sdvo_priv *sdvo;
970
971 /* find the sdvo connector */
972 list_for_each_entry(connector, &dev->mode_config.connector_list,
973 head) {
974 iout = to_psb_intel_output(connector);
975
976 if (iout->type != INTEL_OUTPUT_SDVO)
977 continue;
978
979 sdvo = iout->dev_priv;
980
981 if (sdvo->output_device == SDVOB && sdvoB)
982 return connector;
983
984 if (sdvo->output_device == SDVOC && !sdvoB)
985 return connector;
986
987 }
988
989 return NULL;
990}
991
992int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
993{
994 u8 response[2];
995 u8 status;
996 struct psb_intel_output *psb_intel_output;
997
998 if (!connector)
999 return 0;
1000
1001 psb_intel_output = to_psb_intel_output(connector);
1002
1003 psb_intel_sdvo_write_cmd(psb_intel_output,
1004 SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1005 NULL,
1006 0);
1007 status = psb_intel_sdvo_read_response(psb_intel_output,
1008 &response,
1009 2);
1010
1011 if (response[0] != 0)
1012 return 1;
1013
1014 return 0;
1015}
1016
1017void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1018{
1019 u8 response[2];
1020 u8 status;
1021 struct psb_intel_output *psb_intel_output =
1022 to_psb_intel_output(connector);
1023
1024 psb_intel_sdvo_write_cmd(psb_intel_output,
1025 SDVO_CMD_GET_ACTIVE_HOT_PLUG,
1026 NULL,
1027 0);
1028 psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
1029
1030 if (on) {
1031 psb_intel_sdvo_write_cmd(psb_intel_output,
1032 SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
1033 0);
1034 status = psb_intel_sdvo_read_response(psb_intel_output,
1035 &response,
1036 2);
1037
1038 psb_intel_sdvo_write_cmd(psb_intel_output,
1039 SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1040 &response, 2);
1041 } else {
1042 response[0] = 0;
1043 response[1] = 0;
1044 psb_intel_sdvo_write_cmd(psb_intel_output,
1045 SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1046 &response, 2);
1047 }
1048
1049 psb_intel_sdvo_write_cmd(psb_intel_output,
1050 SDVO_CMD_GET_ACTIVE_HOT_PLUG,
1051 NULL,
1052 0);
1053 psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
1054}
1055
1056static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
1057 *connector, bool force)
1058{
1059 u8 response[2];
1060 u8 status;
1061 struct psb_intel_output *psb_intel_output =
1062 to_psb_intel_output(connector);
1063
1064 psb_intel_sdvo_write_cmd(psb_intel_output,
1065 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1066 NULL,
1067 0);
1068 status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
1069
1070 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
1071 if ((response[0] != 0) || (response[1] != 0))
1072 return connector_status_connected;
1073 else
1074 return connector_status_disconnected;
1075}
1076
1077static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
1078{
1079 struct psb_intel_output *psb_intel_output =
1080 to_psb_intel_output(connector);
1081
1082 /* set the bus switch and get the modes */
1083 psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
1084 SDVO_CONTROL_BUS_DDC2);
1085 psb_intel_ddc_get_modes(psb_intel_output);
1086
1087 if (list_empty(&connector->probed_modes))
1088 return 0;
1089 return 1;
1090}
1091
1092static void psb_intel_sdvo_destroy(struct drm_connector *connector)
1093{
1094 struct psb_intel_output *psb_intel_output =
1095 to_psb_intel_output(connector);
1096
1097 if (psb_intel_output->i2c_bus)
1098 psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
1099 drm_sysfs_connector_remove(connector);
1100 drm_connector_cleanup(connector);
1101 kfree(psb_intel_output);
1102}
1103
1104static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1105 .dpms = psb_intel_sdvo_dpms,
1106 .mode_fixup = psb_intel_sdvo_mode_fixup,
1107 .prepare = psb_intel_encoder_prepare,
1108 .mode_set = psb_intel_sdvo_mode_set,
1109 .commit = psb_intel_encoder_commit,
1110};
1111
1112static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1113 .dpms = drm_helper_connector_dpms,
1114 .save = psb_intel_sdvo_save,
1115 .restore = psb_intel_sdvo_restore,
1116 .detect = psb_intel_sdvo_detect,
1117 .fill_modes = drm_helper_probe_single_connector_modes,
1118 .destroy = psb_intel_sdvo_destroy,
1119};
1120
1121static const struct drm_connector_helper_funcs
1122 psb_intel_sdvo_connector_helper_funcs = {
1123 .get_modes = psb_intel_sdvo_get_modes,
1124 .mode_valid = psb_intel_sdvo_mode_valid,
1125 .best_encoder = psb_intel_best_encoder,
1126};
1127
1128void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1129{
1130 drm_encoder_cleanup(encoder);
1131}
1132
1133static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
1134 .destroy = psb_intel_sdvo_enc_destroy,
1135};
1136
1137
1138void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
1139{
1140 struct drm_connector *connector;
1141 struct psb_intel_output *psb_intel_output;
1142 struct psb_intel_sdvo_priv *sdvo_priv;
1143 struct psb_intel_i2c_chan *i2cbus = NULL;
1144 int connector_type;
1145 u8 ch[0x40];
1146 int i;
1147 int encoder_type, output_id;
1148
1149 psb_intel_output =
1150 kcalloc(sizeof(struct psb_intel_output) +
1151 sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
1152 if (!psb_intel_output)
1153 return;
1154
1155 connector = &psb_intel_output->base;
1156
1157 drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
1158 DRM_MODE_CONNECTOR_Unknown);
1159 drm_connector_helper_add(connector,
1160 &psb_intel_sdvo_connector_helper_funcs);
1161 sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
1162 psb_intel_output->type = INTEL_OUTPUT_SDVO;
1163
1164 connector->interlace_allowed = 0;
1165 connector->doublescan_allowed = 0;
1166
1167 /* setup the DDC bus. */
1168 if (output_device == SDVOB)
1169 i2cbus =
1170 psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
1171 else
1172 i2cbus =
1173 psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1174
1175 if (!i2cbus)
1176 goto err_connector;
1177
1178 sdvo_priv->i2c_bus = i2cbus;
1179
1180 if (output_device == SDVOB) {
1181 output_id = 1;
1182 sdvo_priv->by_input_wiring = SDVOB_IN0;
1183 sdvo_priv->i2c_bus->slave_addr = 0x38;
1184 } else {
1185 output_id = 2;
1186 sdvo_priv->i2c_bus->slave_addr = 0x39;
1187 }
1188
1189 sdvo_priv->output_device = output_device;
1190 psb_intel_output->i2c_bus = i2cbus;
1191 psb_intel_output->dev_priv = sdvo_priv;
1192
1193
1194 /* Read the regs to test if we can talk to the device */
1195 for (i = 0; i < 0x40; i++) {
1196 if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
1197 dev_dbg(dev->dev, "No SDVO device found on SDVO%c\n",
1198 output_device == SDVOB ? 'B' : 'C');
1199 goto err_i2c;
1200 }
1201 }
1202
1203 psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
1204
1205 memset(&sdvo_priv->active_outputs, 0,
1206 sizeof(sdvo_priv->active_outputs));
1207
1208 /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
1209 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
1210 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
1211 sdvo_priv->active_device = SDVO_DEVICE_CRT;
1212 connector->display_info.subpixel_order =
1213 SubPixelHorizontalRGB;
1214 encoder_type = DRM_MODE_ENCODER_DAC;
1215 connector_type = DRM_MODE_CONNECTOR_VGA;
1216 } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
1217 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
1218 sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
1219 connector->display_info.subpixel_order =
1220 SubPixelHorizontalRGB;
1221 encoder_type = DRM_MODE_ENCODER_DAC;
1222 connector_type = DRM_MODE_CONNECTOR_VGA;
1223 } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
1224 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
1225 sdvo_priv->active_device = SDVO_DEVICE_TMDS;
1226 connector->display_info.subpixel_order =
1227 SubPixelHorizontalRGB;
1228 encoder_type = DRM_MODE_ENCODER_TMDS;
1229 connector_type = DRM_MODE_CONNECTOR_DVID;
1230 } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
1231 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
1232 sdvo_priv->active_device = SDVO_DEVICE_TMDS;
1233 connector->display_info.subpixel_order =
1234 SubPixelHorizontalRGB;
1235 encoder_type = DRM_MODE_ENCODER_TMDS;
1236 connector_type = DRM_MODE_CONNECTOR_DVID;
1237 } else {
1238 unsigned char bytes[2];
1239
1240 memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
1241 dev_dbg(dev->dev, "%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
1242 SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
1243 goto err_i2c;
1244 }
1245
1246 drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
1247 encoder_type);
1248 drm_encoder_helper_add(&psb_intel_output->enc,
1249 &psb_intel_sdvo_helper_funcs);
1250 connector->connector_type = connector_type;
1251
1252 drm_mode_connector_attach_encoder(&psb_intel_output->base,
1253 &psb_intel_output->enc);
1254 drm_sysfs_connector_add(connector);
1255
1256 /* Set the input timing to the screen. Assume always input 0. */
1257 psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
1258
1259 psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
1260 &sdvo_priv->pixel_clock_min,
1261 &sdvo_priv->
1262 pixel_clock_max);
1263
1264
1265 dev_dbg(dev->dev, "%s device VID/DID: %02X:%02X.%02X, "
1266 "clock range %dMHz - %dMHz, "
1267 "input 1: %c, input 2: %c, "
1268 "output 1: %c, output 2: %c\n",
1269 SDVO_NAME(sdvo_priv),
1270 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
1271 sdvo_priv->caps.device_rev_id,
1272 sdvo_priv->pixel_clock_min / 1000,
1273 sdvo_priv->pixel_clock_max / 1000,
1274 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
1275 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
1276 /* check currently supported outputs */
1277 sdvo_priv->caps.output_flags &
1278 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
1279 sdvo_priv->caps.output_flags &
1280 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1281
1282 psb_intel_output->ddc_bus = i2cbus;
1283
1284 return;
1285
1286err_i2c:
1287 psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
1288err_connector:
1289 drm_connector_cleanup(connector);
1290 kfree(psb_intel_output);
1291
1292 return;
1293}
diff --git a/drivers/staging/gma500/psb_intel_sdvo_regs.h b/drivers/staging/gma500/psb_intel_sdvo_regs.h
deleted file mode 100644
index 96862ea65aba..000000000000
--- a/drivers/staging/gma500/psb_intel_sdvo_regs.h
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * SDVO command definitions and structures.
3 *
4 * Copyright (c) 2008, Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors:
20 * Eric Anholt <eric@anholt.net>
21 */
22
23#define SDVO_OUTPUT_FIRST (0)
24#define SDVO_OUTPUT_TMDS0 (1 << 0)
25#define SDVO_OUTPUT_RGB0 (1 << 1)
26#define SDVO_OUTPUT_CVBS0 (1 << 2)
27#define SDVO_OUTPUT_SVID0 (1 << 3)
28#define SDVO_OUTPUT_YPRPB0 (1 << 4)
29#define SDVO_OUTPUT_SCART0 (1 << 5)
30#define SDVO_OUTPUT_LVDS0 (1 << 6)
31#define SDVO_OUTPUT_TMDS1 (1 << 8)
32#define SDVO_OUTPUT_RGB1 (1 << 9)
33#define SDVO_OUTPUT_CVBS1 (1 << 10)
34#define SDVO_OUTPUT_SVID1 (1 << 11)
35#define SDVO_OUTPUT_YPRPB1 (1 << 12)
36#define SDVO_OUTPUT_SCART1 (1 << 13)
37#define SDVO_OUTPUT_LVDS1 (1 << 14)
38#define SDVO_OUTPUT_LAST (14)
39
40struct psb_intel_sdvo_caps {
41 u8 vendor_id;
42 u8 device_id;
43 u8 device_rev_id;
44 u8 sdvo_version_major;
45 u8 sdvo_version_minor;
46 unsigned int sdvo_inputs_mask:2;
47 unsigned int smooth_scaling:1;
48 unsigned int sharp_scaling:1;
49 unsigned int up_scaling:1;
50 unsigned int down_scaling:1;
51 unsigned int stall_support:1;
52 unsigned int pad:1;
53 u16 output_flags;
54} __packed;
55
56/** This matches the EDID DTD structure, more or less */
57struct psb_intel_sdvo_dtd {
58 struct {
59 u16 clock; /**< pixel clock, in 10kHz units */
60 u8 h_active; /**< lower 8 bits (pixels) */
61 u8 h_blank; /**< lower 8 bits (pixels) */
62 u8 h_high; /**< upper 4 bits each h_active, h_blank */
63 u8 v_active; /**< lower 8 bits (lines) */
64 u8 v_blank; /**< lower 8 bits (lines) */
65 u8 v_high; /**< upper 4 bits each v_active, v_blank */
66 } part1;
67
68 struct {
69 u8 h_sync_off;
70 /**< lower 8 bits, from hblank start */
71 u8 h_sync_width;/**< lower 8 bits (pixels) */
72 /** lower 4 bits each vsync offset, vsync width */
73 u8 v_sync_off_width;
74 /**
75 * 2 high bits of hsync offset, 2 high bits of hsync width,
76 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
77 */
78 u8 sync_off_width_high;
79 u8 dtd_flags;
80 u8 sdvo_flags;
81 /** bits 6-7 of vsync offset at bits 6-7 */
82 u8 v_sync_off_high;
83 u8 reserved;
84 } part2;
85} __packed;
86
87struct psb_intel_sdvo_pixel_clock_range {
88 u16 min; /**< pixel clock, in 10kHz units */
89 u16 max; /**< pixel clock, in 10kHz units */
90} __packed;
91
92struct psb_intel_sdvo_preferred_input_timing_args {
93 u16 clock;
94 u16 width;
95 u16 height;
96} __packed;
97
98/* I2C registers for SDVO */
99#define SDVO_I2C_ARG_0 0x07
100#define SDVO_I2C_ARG_1 0x06
101#define SDVO_I2C_ARG_2 0x05
102#define SDVO_I2C_ARG_3 0x04
103#define SDVO_I2C_ARG_4 0x03
104#define SDVO_I2C_ARG_5 0x02
105#define SDVO_I2C_ARG_6 0x01
106#define SDVO_I2C_ARG_7 0x00
107#define SDVO_I2C_OPCODE 0x08
108#define SDVO_I2C_CMD_STATUS 0x09
109#define SDVO_I2C_RETURN_0 0x0a
110#define SDVO_I2C_RETURN_1 0x0b
111#define SDVO_I2C_RETURN_2 0x0c
112#define SDVO_I2C_RETURN_3 0x0d
113#define SDVO_I2C_RETURN_4 0x0e
114#define SDVO_I2C_RETURN_5 0x0f
115#define SDVO_I2C_RETURN_6 0x10
116#define SDVO_I2C_RETURN_7 0x11
117#define SDVO_I2C_VENDOR_BEGIN 0x20
118
119/* Status results */
120#define SDVO_CMD_STATUS_POWER_ON 0x0
121#define SDVO_CMD_STATUS_SUCCESS 0x1
122#define SDVO_CMD_STATUS_NOTSUPP 0x2
123#define SDVO_CMD_STATUS_INVALID_ARG 0x3
124#define SDVO_CMD_STATUS_PENDING 0x4
125#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
126#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
127
128/* SDVO commands, argument/result registers */
129
130#define SDVO_CMD_RESET 0x01
131
132/** Returns a struct psb_intel_sdvo_caps */
133#define SDVO_CMD_GET_DEVICE_CAPS 0x02
134
135#define SDVO_CMD_GET_FIRMWARE_REV 0x86
136# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
137# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
138# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
139
140/**
141 * Reports which inputs are trained (managed to sync).
142 *
143 * Devices must have trained within 2 vsyncs of a mode change.
144 */
145#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
146struct psb_intel_sdvo_get_trained_inputs_response {
147 unsigned int input0_trained:1;
148 unsigned int input1_trained:1;
149 unsigned int pad:6;
150} __packed;
151
152/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
153#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
154
155/**
156 * Sets the current set of active outputs.
157 *
158 * Takes a struct psb_intel_sdvo_output_flags.
159 * Must be preceded by a SET_IN_OUT_MAP
160 * on multi-output devices.
161 */
162#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
163
164/**
165 * Returns the current mapping of SDVO inputs to outputs on the device.
166 *
167 * Returns two struct psb_intel_sdvo_output_flags structures.
168 */
169#define SDVO_CMD_GET_IN_OUT_MAP 0x06
170
171/**
172 * Sets the current mapping of SDVO inputs to outputs on the device.
173 *
174 * Takes two struct i380_sdvo_output_flags structures.
175 */
176#define SDVO_CMD_SET_IN_OUT_MAP 0x07
177
178/**
179 * Returns a struct psb_intel_sdvo_output_flags of attached displays.
180 */
181#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
182
183/**
184 * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
185 */
186#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
187
188/**
189 * Takes a struct psb_intel_sdvo_output_flags.
190 */
191#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
192
193/**
194 * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
195 * interrupts enabled.
196 */
197#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
198
199#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
200struct psb_intel_sdvo_get_interrupt_event_source_response {
201 u16 interrupt_status;
202 unsigned int ambient_light_interrupt:1;
203 unsigned int pad:7;
204} __packed;
205
206/**
207 * Selects which input is affected by future input commands.
208 *
209 * Commands affected include SET_INPUT_TIMINGS_PART[12],
210 * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
211 * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
212 */
213#define SDVO_CMD_SET_TARGET_INPUT 0x10
214struct psb_intel_sdvo_set_target_input_args {
215 unsigned int target_1:1;
216 unsigned int pad:7;
217} __packed;
218
219/**
220 * Takes a struct psb_intel_sdvo_output_flags of which outputs are targeted by
221 * future output commands.
222 *
223 * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
224 * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
225 */
226#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
227
228#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
229#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
230#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
231#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
232#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
233#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
234#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
235#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
236/* Part 1 */
237# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
238# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
239# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
240# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
241# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
242# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
243# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
244# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
245/* Part 2 */
246# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
247# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
248# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
249# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
250# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
251# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
252# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
253# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
254# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
255# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
256# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
257# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
258# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
259# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
260# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
261# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
262# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
263# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
264
265/**
266 * Generates a DTD based on the given width, height, and flags.
267 *
268 * This will be supported by any device supporting scaling or interlaced
269 * modes.
270 */
271#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
272# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
273# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
274# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
275# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
276# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
277# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
278# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
279# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
280# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
281
282#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
283#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
284
285/** Returns a struct psb_intel_sdvo_pixel_clock_range */
286#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
287/** Returns a struct psb_intel_sdvo_pixel_clock_range */
288#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
289
290/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
291#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
292
293/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
294#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
295/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
296#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
297# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
298# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
299# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
300
301#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
302
303#define SDVO_CMD_GET_TV_FORMAT 0x28
304
305#define SDVO_CMD_SET_TV_FORMAT 0x29
306
307#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
308#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
309#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
310# define SDVO_ENCODER_STATE_ON (1 << 0)
311# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
312# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
313# define SDVO_ENCODER_STATE_OFF (1 << 3)
314
315#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
316
317#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
318# define SDVO_CONTROL_BUS_PROM 0x0
319# define SDVO_CONTROL_BUS_DDC1 0x1
320# define SDVO_CONTROL_BUS_DDC2 0x2
321# define SDVO_CONTROL_BUS_DDC3 0x3
322
323/* SDVO Bus & SDVO Inputs wiring details*/
324/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
325/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
326/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
327/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
328#define SDVOB_IN0 0x01
329#define SDVOB_IN1 0x02
330#define SDVOC_IN0 0x04
331#define SDVOC_IN1 0x08
332
333#define SDVO_DEVICE_NONE 0x00
334#define SDVO_DEVICE_CRT 0x01
335#define SDVO_DEVICE_TV 0x02
336#define SDVO_DEVICE_LVDS 0x04
337#define SDVO_DEVICE_TMDS 0x08
338
diff --git a/drivers/staging/gma500/psb_irq.c b/drivers/staging/gma500/psb_irq.c
deleted file mode 100644
index 36dd63044b06..000000000000
--- a/drivers/staging/gma500/psb_irq.c
+++ /dev/null
@@ -1,627 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22/*
23 */
24
25#include <drm/drmP.h>
26#include "psb_drv.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include "power.h"
30#include "mdfld_output.h"
31
32/*
33 * inline functions
34 */
35
36static inline u32
37psb_pipestat(int pipe)
38{
39 if (pipe == 0)
40 return PIPEASTAT;
41 if (pipe == 1)
42 return PIPEBSTAT;
43 if (pipe == 2)
44 return PIPECSTAT;
45 BUG();
46}
47
48static inline u32
49mid_pipe_event(int pipe)
50{
51 if (pipe == 0)
52 return _PSB_PIPEA_EVENT_FLAG;
53 if (pipe == 1)
54 return _MDFLD_PIPEB_EVENT_FLAG;
55 if (pipe == 2)
56 return _MDFLD_PIPEC_EVENT_FLAG;
57 BUG();
58}
59
60static inline u32
61mid_pipe_vsync(int pipe)
62{
63 if (pipe == 0)
64 return _PSB_VSYNC_PIPEA_FLAG;
65 if (pipe == 1)
66 return _PSB_VSYNC_PIPEB_FLAG;
67 if (pipe == 2)
68 return _MDFLD_PIPEC_VBLANK_FLAG;
69 BUG();
70}
71
72static inline u32
73mid_pipeconf(int pipe)
74{
75 if (pipe == 0)
76 return PIPEACONF;
77 if (pipe == 1)
78 return PIPEBCONF;
79 if (pipe == 2)
80 return PIPECCONF;
81 BUG();
82}
83
84void
85psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
86{
87 if ((dev_priv->pipestat[pipe] & mask) != mask) {
88 u32 reg = psb_pipestat(pipe);
89 dev_priv->pipestat[pipe] |= mask;
90 /* Enable the interrupt, clear any pending status */
91 if (gma_power_begin(dev_priv->dev, false)) {
92 u32 writeVal = PSB_RVDC32(reg);
93 writeVal |= (mask | (mask >> 16));
94 PSB_WVDC32(writeVal, reg);
95 (void) PSB_RVDC32(reg);
96 gma_power_end(dev_priv->dev);
97 }
98 }
99}
100
101void
102psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
103{
104 if ((dev_priv->pipestat[pipe] & mask) != 0) {
105 u32 reg = psb_pipestat(pipe);
106 dev_priv->pipestat[pipe] &= ~mask;
107 if (gma_power_begin(dev_priv->dev, false)) {
108 u32 writeVal = PSB_RVDC32(reg);
109 writeVal &= ~mask;
110 PSB_WVDC32(writeVal, reg);
111 (void) PSB_RVDC32(reg);
112 gma_power_end(dev_priv->dev);
113 }
114 }
115}
116
117void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
118{
119 if (gma_power_begin(dev_priv->dev, false)) {
120 u32 pipe_event = mid_pipe_event(pipe);
121 dev_priv->vdc_irq_mask |= pipe_event;
122 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
123 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
124 gma_power_end(dev_priv->dev);
125 }
126}
127
128void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
129{
130 if (dev_priv->pipestat[pipe] == 0) {
131 if (gma_power_begin(dev_priv->dev, false)) {
132 u32 pipe_event = mid_pipe_event(pipe);
133 dev_priv->vdc_irq_mask &= ~pipe_event;
134 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
135 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
136 gma_power_end(dev_priv->dev);
137 }
138 }
139}
140
141/**
142 * Display controller interrupt handler for pipe event.
143 *
144 */
145static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
146{
147 struct drm_psb_private *dev_priv =
148 (struct drm_psb_private *) dev->dev_private;
149
150 uint32_t pipe_stat_val = 0;
151 uint32_t pipe_stat_reg = psb_pipestat(pipe);
152 uint32_t pipe_enable = dev_priv->pipestat[pipe];
153 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
154 uint32_t pipe_clear;
155 uint32_t i = 0;
156
157 spin_lock(&dev_priv->irqmask_lock);
158
159 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
160 pipe_stat_val &= pipe_enable | pipe_status;
161 pipe_stat_val &= pipe_stat_val >> 16;
162
163 spin_unlock(&dev_priv->irqmask_lock);
164
165 /* Clear the 2nd level interrupt status bits
166 * Sometimes the bits are very sticky so we repeat until they unstick */
167 for (i = 0; i < 0xffff; i++) {
168 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
169 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
170
171 if (pipe_clear == 0)
172 break;
173 }
174
175 if (pipe_clear)
176 dev_err(dev->dev,
177 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
178 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
179
180 if (pipe_stat_val & PIPE_VBLANK_STATUS)
181 drm_handle_vblank(dev, pipe);
182
183 if (pipe_stat_val & PIPE_TE_STATUS)
184 drm_handle_vblank(dev, pipe);
185}
186
187/*
188 * Display controller interrupt handler.
189 */
190static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
191{
192 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
193 mid_pipe_event_handler(dev, 0);
194
195 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
196 mid_pipe_event_handler(dev, 1);
197}
198
199irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
200{
201 struct drm_device *dev = (struct drm_device *) arg;
202 struct drm_psb_private *dev_priv =
203 (struct drm_psb_private *) dev->dev_private;
204
205 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
206 int handled = 0;
207
208 spin_lock(&dev_priv->irqmask_lock);
209
210 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
211
212 if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
213 dsp_int = 1;
214
215 /* FIXME: Handle Medfield
216 if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
217 dsp_int = 1;
218 */
219
220 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
221 sgx_int = 1;
222
223 vdc_stat &= dev_priv->vdc_irq_mask;
224 spin_unlock(&dev_priv->irqmask_lock);
225
226 if (dsp_int && gma_power_is_on(dev)) {
227 psb_vdc_interrupt(dev, vdc_stat);
228 handled = 1;
229 }
230
231 if (sgx_int) {
232 /* Not expected - we have it masked, shut it up */
233 u32 s, s2;
234 s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
235 s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
236 PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
237 PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
238 /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
239 we may as well poll even if we add that ! */
240 handled = 1;
241 }
242
243 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
244 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
245 DRM_READMEMORYBARRIER();
246
247 if (!handled)
248 return IRQ_NONE;
249
250 return IRQ_HANDLED;
251}
252
253void psb_irq_preinstall(struct drm_device *dev)
254{
255 struct drm_psb_private *dev_priv =
256 (struct drm_psb_private *) dev->dev_private;
257 unsigned long irqflags;
258
259 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
260
261 if (gma_power_is_on(dev))
262 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
263 if (dev->vblank_enabled[0])
264 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
265 if (dev->vblank_enabled[1])
266 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
267
268 /* FIXME: Handle Medfield irq mask
269 if (dev->vblank_enabled[1])
270 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
271 if (dev->vblank_enabled[2])
272 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
273 */
274
275 /* This register is safe even if display island is off */
276 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
277 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
278}
279
280int psb_irq_postinstall(struct drm_device *dev)
281{
282 struct drm_psb_private *dev_priv =
283 (struct drm_psb_private *) dev->dev_private;
284 unsigned long irqflags;
285
286 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
287
288 /* This register is safe even if display island is off */
289 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
290 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
291
292 if (dev->vblank_enabled[0])
293 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
294 else
295 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
296
297 if (dev->vblank_enabled[1])
298 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
299 else
300 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
301
302 if (dev->vblank_enabled[2])
303 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
304 else
305 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
306
307 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
308 return 0;
309}
310
311void psb_irq_uninstall(struct drm_device *dev)
312{
313 struct drm_psb_private *dev_priv =
314 (struct drm_psb_private *) dev->dev_private;
315 unsigned long irqflags;
316
317 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
318
319 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
320
321 if (dev->vblank_enabled[0])
322 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
323
324 if (dev->vblank_enabled[1])
325 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
326
327 if (dev->vblank_enabled[2])
328 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
329
330 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
331 _PSB_IRQ_MSVDX_FLAG |
332 _LNC_IRQ_TOPAZ_FLAG;
333
334 /* These two registers are safe even if display island is off */
335 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
336 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
337
338 wmb();
339
340 /* This register is safe even if display island is off */
341 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
342 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
343}
344
345void psb_irq_turn_on_dpst(struct drm_device *dev)
346{
347 struct drm_psb_private *dev_priv =
348 (struct drm_psb_private *) dev->dev_private;
349 u32 hist_reg;
350 u32 pwm_reg;
351
352 if (gma_power_begin(dev, false)) {
353 PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
354 hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
355 PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
356 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
357
358 PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
359 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
360 PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
361 | PWM_PHASEIN_INT_ENABLE,
362 PWM_CONTROL_LOGIC);
363 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
364
365 psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
366
367 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
368 PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
369 HISTOGRAM_INT_CONTROL);
370 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
371 PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
372 PWM_CONTROL_LOGIC);
373
374 gma_power_end(dev);
375 }
376}
377
378int psb_irq_enable_dpst(struct drm_device *dev)
379{
380 struct drm_psb_private *dev_priv =
381 (struct drm_psb_private *) dev->dev_private;
382 unsigned long irqflags;
383
384 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
385
386 /* enable DPST */
387 mid_enable_pipe_event(dev_priv, 0);
388 psb_irq_turn_on_dpst(dev);
389
390 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
391 return 0;
392}
393
394void psb_irq_turn_off_dpst(struct drm_device *dev)
395{
396 struct drm_psb_private *dev_priv =
397 (struct drm_psb_private *) dev->dev_private;
398 u32 hist_reg;
399 u32 pwm_reg;
400
401 if (gma_power_begin(dev, false)) {
402 PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
403 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
404
405 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
406
407 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
408 PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
409 PWM_CONTROL_LOGIC);
410 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
411
412 gma_power_end(dev);
413 }
414}
415
416int psb_irq_disable_dpst(struct drm_device *dev)
417{
418 struct drm_psb_private *dev_priv =
419 (struct drm_psb_private *) dev->dev_private;
420 unsigned long irqflags;
421
422 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
423
424 mid_disable_pipe_event(dev_priv, 0);
425 psb_irq_turn_off_dpst(dev);
426
427 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
428
429 return 0;
430}
431
432#ifdef PSB_FIXME
433static int psb_vblank_do_wait(struct drm_device *dev,
434 unsigned int *sequence, atomic_t *counter)
435{
436 unsigned int cur_vblank;
437 int ret = 0;
438 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
439 (((cur_vblank = atomic_read(counter))
440 - *sequence) <= (1 << 23)));
441 *sequence = cur_vblank;
442
443 return ret;
444}
445#endif
446
447/*
448 * It is used to enable VBLANK interrupt
449 */
450int psb_enable_vblank(struct drm_device *dev, int pipe)
451{
452 struct drm_psb_private *dev_priv = dev->dev_private;
453 unsigned long irqflags;
454 uint32_t reg_val = 0;
455 uint32_t pipeconf_reg = mid_pipeconf(pipe);
456
457#if defined(CONFIG_DRM_PSB_MFLD)
458 /* Medfield is different - we should perhaps extract out vblank
459 and blacklight etc ops */
460 if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
461 return mdfld_enable_te(dev, pipe);
462#endif
463 if (gma_power_begin(dev, false)) {
464 reg_val = REG_READ(pipeconf_reg);
465 gma_power_end(dev);
466 }
467
468 if (!(reg_val & PIPEACONF_ENABLE))
469 return -EINVAL;
470
471 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
472
473 if (pipe == 0)
474 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
475 else if (pipe == 1)
476 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
477
478 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
479 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
480 psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
481
482 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
483
484 return 0;
485}
486
487/*
488 * It is used to disable VBLANK interrupt
489 */
490void psb_disable_vblank(struct drm_device *dev, int pipe)
491{
492 struct drm_psb_private *dev_priv = dev->dev_private;
493 unsigned long irqflags;
494
495#if defined(CONFIG_DRM_PSB_MFLD)
496 if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
497 mdfld_disable_te(dev, pipe);
498#endif
499 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
500
501 if (pipe == 0)
502 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
503 else if (pipe == 1)
504 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
505
506 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
507 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
508 psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
509
510 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
511}
512
513/**
514 * mdfld_enable_te - enable TE events
515 * @dev: our DRM device
516 * @pipe: which pipe to work on
517 *
518 * Enable TE events on a Medfield display pipe. Medfield specific.
519 */
520int mdfld_enable_te(struct drm_device *dev, int pipe)
521{
522 struct drm_psb_private *dev_priv = dev->dev_private;
523 unsigned long flags;
524 uint32_t reg_val = 0;
525 uint32_t pipeconf_reg = mid_pipeconf(pipe);
526
527 if (gma_power_begin(dev, false)) {
528 reg_val = REG_READ(pipeconf_reg);
529 gma_power_end(dev);
530 }
531
532 if (!(reg_val & PIPEACONF_ENABLE))
533 return -EINVAL;
534
535 spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
536
537 mid_enable_pipe_event(dev_priv, pipe);
538 psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
539
540 spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
541
542 return 0;
543}
544
545/**
546 * mdfld_disable_te - disable TE events
547 * @dev: our DRM device
548 * @pipe: which pipe to work on
549 *
550 * Disable TE events on a Medfield display pipe. Medfield specific.
551 */
552void mdfld_disable_te(struct drm_device *dev, int pipe)
553{
554 struct drm_psb_private *dev_priv = dev->dev_private;
555 unsigned long flags;
556
557 spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
558
559 mid_disable_pipe_event(dev_priv, pipe);
560 psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
561
562 spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
563}
564
565/* Called from drm generic code, passed a 'crtc', which
566 * we use as a pipe index
567 */
568u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
569{
570 uint32_t high_frame = PIPEAFRAMEHIGH;
571 uint32_t low_frame = PIPEAFRAMEPIXEL;
572 uint32_t pipeconf_reg = PIPEACONF;
573 uint32_t reg_val = 0;
574 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
575
576 switch (pipe) {
577 case 0:
578 break;
579 case 1:
580 high_frame = PIPEBFRAMEHIGH;
581 low_frame = PIPEBFRAMEPIXEL;
582 pipeconf_reg = PIPEBCONF;
583 break;
584 case 2:
585 high_frame = PIPECFRAMEHIGH;
586 low_frame = PIPECFRAMEPIXEL;
587 pipeconf_reg = PIPECCONF;
588 break;
589 default:
590 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
591 return 0;
592 }
593
594 if (!gma_power_begin(dev, false))
595 return 0;
596
597 reg_val = REG_READ(pipeconf_reg);
598
599 if (!(reg_val & PIPEACONF_ENABLE)) {
600 dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
601 pipe);
602 goto psb_get_vblank_counter_exit;
603 }
604
605 /*
606 * High & low register fields aren't synchronized, so make sure
607 * we get a low value that's stable across two reads of the high
608 * register.
609 */
610 do {
611 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
612 PIPE_FRAME_HIGH_SHIFT);
613 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
614 PIPE_FRAME_LOW_SHIFT);
615 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
616 PIPE_FRAME_HIGH_SHIFT);
617 } while (high1 != high2);
618
619 count = (high1 << 8) | low;
620
621psb_get_vblank_counter_exit:
622
623 gma_power_end(dev);
624
625 return count;
626}
627
diff --git a/drivers/staging/gma500/psb_irq.h b/drivers/staging/gma500/psb_irq.h
deleted file mode 100644
index 216fda38b57d..000000000000
--- a/drivers/staging/gma500/psb_irq.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Authors:
19 * Benjamin Defnet <benjamin.r.defnet@intel.com>
20 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
21 *
22 **************************************************************************/
23
24#ifndef _SYSIRQ_H_
25#define _SYSIRQ_H_
26
27#include <drm/drmP.h>
28
29bool sysirq_init(struct drm_device *dev);
30void sysirq_uninit(struct drm_device *dev);
31
32void psb_irq_preinstall(struct drm_device *dev);
33int psb_irq_postinstall(struct drm_device *dev);
34void psb_irq_uninstall(struct drm_device *dev);
35irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
36
37int psb_irq_enable_dpst(struct drm_device *dev);
38int psb_irq_disable_dpst(struct drm_device *dev);
39void psb_irq_turn_on_dpst(struct drm_device *dev);
40void psb_irq_turn_off_dpst(struct drm_device *dev);
41int psb_enable_vblank(struct drm_device *dev, int pipe);
42void psb_disable_vblank(struct drm_device *dev, int pipe);
43u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
44
45#endif /* _SYSIRQ_H_ */
diff --git a/drivers/staging/gma500/psb_lid.c b/drivers/staging/gma500/psb_lid.c
deleted file mode 100644
index b867aabe6bf3..000000000000
--- a/drivers/staging/gma500/psb_lid.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
18 **************************************************************************/
19
20#include <drm/drmP.h>
21#include "psb_drv.h"
22#include "psb_reg.h"
23#include "psb_intel_reg.h"
24#include <linux/spinlock.h>
25
26static void psb_lid_timer_func(unsigned long data)
27{
28 struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
29 struct drm_device *dev = (struct drm_device *)dev_priv->dev;
30 struct timer_list *lid_timer = &dev_priv->lid_timer;
31 unsigned long irq_flags;
32 u32 *lid_state = dev_priv->lid_state;
33 u32 pp_status;
34
35 if (readl(lid_state) == dev_priv->lid_last_state)
36 goto lid_timer_schedule;
37
38 if ((readl(lid_state)) & 0x01) {
39 /*lid state is open*/
40 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
41 do {
42 pp_status = REG_READ(PP_STATUS);
43 } while ((pp_status & PP_ON) == 0);
44
45 /*FIXME: should be backlight level before*/
46 psb_intel_lvds_set_brightness(dev, 100);
47 } else {
48 psb_intel_lvds_set_brightness(dev, 0);
49
50 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
51 do {
52 pp_status = REG_READ(PP_STATUS);
53 } while ((pp_status & PP_ON) == 0);
54 }
55 dev_priv->lid_last_state = readl(lid_state);
56
57lid_timer_schedule:
58 spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
59 if (!timer_pending(lid_timer)) {
60 lid_timer->expires = jiffies + PSB_LID_DELAY;
61 add_timer(lid_timer);
62 }
63 spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
64}
65
66void psb_lid_timer_init(struct drm_psb_private *dev_priv)
67{
68 struct timer_list *lid_timer = &dev_priv->lid_timer;
69 unsigned long irq_flags;
70
71 spin_lock_init(&dev_priv->lid_lock);
72 spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
73
74 init_timer(lid_timer);
75
76 lid_timer->data = (unsigned long)dev_priv;
77 lid_timer->function = psb_lid_timer_func;
78 lid_timer->expires = jiffies + PSB_LID_DELAY;
79
80 add_timer(lid_timer);
81 spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
82}
83
84void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
85{
86 del_timer_sync(&dev_priv->lid_timer);
87}
88
diff --git a/drivers/staging/gma500/psb_reg.h b/drivers/staging/gma500/psb_reg.h
deleted file mode 100644
index b81c7c1e9c2d..000000000000
--- a/drivers/staging/gma500/psb_reg.h
+++ /dev/null
@@ -1,582 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright (c) (2005-2007) Imagination Technologies Limited.
4 * Copyright (c) 2007, Intel Corporation.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
19 *
20 **************************************************************************/
21
22#ifndef _PSB_REG_H_
23#define _PSB_REG_H_
24
25#define PSB_CR_CLKGATECTL 0x0000
26#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
28#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
29#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
30#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
31#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
32#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
33#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
34#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
35#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
36#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
37#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
38#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
39#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
40#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
41#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
42
43#define PSB_CR_CORE_ID 0x0010
44#define _PSB_CC_ID_ID_SHIFT (16)
45#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
46#define _PSB_CC_ID_CONFIG_SHIFT (0)
47#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
48
49#define PSB_CR_CORE_REVISION 0x0014
50#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
51#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
52#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
53#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
54#define _PSB_CC_REVISION_MINOR_SHIFT (8)
55#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
56#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
57#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
58
59#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
60
61#define PSB_CR_SOFT_RESET 0x0080
62#define _PSB_CS_RESET_TSP_RESET (1 << 6)
63#define _PSB_CS_RESET_ISP_RESET (1 << 5)
64#define _PSB_CS_RESET_USE_RESET (1 << 4)
65#define _PSB_CS_RESET_TA_RESET (1 << 3)
66#define _PSB_CS_RESET_DPM_RESET (1 << 2)
67#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
68#define _PSB_CS_RESET_BIF_RESET (1 << 0)
69
70#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
71
72#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
73
74#define PSB_CR_EVENT_STATUS2 0x0118
75
76#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
77#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
78
79#define PSB_CR_EVENT_STATUS 0x012C
80
81#define PSB_CR_EVENT_HOST_ENABLE 0x0130
82
83#define PSB_CR_EVENT_HOST_CLEAR 0x0134
84#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
85#define _PSB_CE_TA_DPM_FAULT (1 << 28)
86#define _PSB_CE_TWOD_COMPLETE (1 << 27)
87#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
88#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
89#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
90#define _PSB_CE_SW_EVENT (1 << 14)
91#define _PSB_CE_TA_FINISHED (1 << 13)
92#define _PSB_CE_TA_TERMINATE (1 << 12)
93#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
94#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
95#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
96#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
97
98
99#define PSB_USE_OFFSET_MASK 0x0007FFFF
100#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
101#define PSB_CR_USE_CODE_BASE0 0x0A0C
102#define PSB_CR_USE_CODE_BASE1 0x0A10
103#define PSB_CR_USE_CODE_BASE2 0x0A14
104#define PSB_CR_USE_CODE_BASE3 0x0A18
105#define PSB_CR_USE_CODE_BASE4 0x0A1C
106#define PSB_CR_USE_CODE_BASE5 0x0A20
107#define PSB_CR_USE_CODE_BASE6 0x0A24
108#define PSB_CR_USE_CODE_BASE7 0x0A28
109#define PSB_CR_USE_CODE_BASE8 0x0A2C
110#define PSB_CR_USE_CODE_BASE9 0x0A30
111#define PSB_CR_USE_CODE_BASE10 0x0A34
112#define PSB_CR_USE_CODE_BASE11 0x0A38
113#define PSB_CR_USE_CODE_BASE12 0x0A3C
114#define PSB_CR_USE_CODE_BASE13 0x0A40
115#define PSB_CR_USE_CODE_BASE14 0x0A44
116#define PSB_CR_USE_CODE_BASE15 0x0A48
117#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
118#define _PSB_CUC_BASE_DM_SHIFT (25)
119#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
120#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
121#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
122#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
123#define _PSB_CUC_DM_VERTEX (0)
124#define _PSB_CUC_DM_PIXEL (1)
125#define _PSB_CUC_DM_RESERVED (2)
126#define _PSB_CUC_DM_EDM (3)
127
128#define PSB_CR_PDS_EXEC_BASE 0x0AB8
129#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
130#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
131
132#define PSB_CR_EVENT_KICKER 0x0AC4
133#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
134
135#define PSB_CR_EVENT_KICK 0x0AC8
136#define _PSB_CE_KICK_NOW (1 << 0)
137
138#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
139
140#define PSB_CR_BIF_CTRL 0x0C00
141#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
142#define _PSB_CB_CTRL_INVALDC (1 << 3)
143#define _PSB_CB_CTRL_FLUSH (1 << 2)
144
145#define PSB_CR_BIF_INT_STAT 0x0C04
146
147#define PSB_CR_BIF_FAULT 0x0C08
148#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
149#define _PSB_CBI_STAT_FAULT_SHIFT (0)
150#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
151#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
152#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
153#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
154#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
155#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
156#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
157#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
158#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
159#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
160
161#define PSB_CR_BIF_BANK0 0x0C78
162#define PSB_CR_BIF_BANK1 0x0C7C
163#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
164#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
165#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
166
167#define PSB_CR_2D_SOCIF 0x0E18
168#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
169#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
170#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
171
172#define PSB_CR_2D_BLIT_STATUS 0x0E04
173#define _PSB_C2B_STATUS_BUSY (1 << 24)
174#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
175#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
176
177/*
178 * 2D defs.
179 */
180
181/*
182 * 2D Slave Port Data : Block Header's Object Type
183 */
184
185#define PSB_2D_CLIP_BH (0x00000000)
186#define PSB_2D_PAT_BH (0x10000000)
187#define PSB_2D_CTRL_BH (0x20000000)
188#define PSB_2D_SRC_OFF_BH (0x30000000)
189#define PSB_2D_MASK_OFF_BH (0x40000000)
190#define PSB_2D_RESERVED1_BH (0x50000000)
191#define PSB_2D_RESERVED2_BH (0x60000000)
192#define PSB_2D_FENCE_BH (0x70000000)
193#define PSB_2D_BLIT_BH (0x80000000)
194#define PSB_2D_SRC_SURF_BH (0x90000000)
195#define PSB_2D_DST_SURF_BH (0xA0000000)
196#define PSB_2D_PAT_SURF_BH (0xB0000000)
197#define PSB_2D_SRC_PAL_BH (0xC0000000)
198#define PSB_2D_PAT_PAL_BH (0xD0000000)
199#define PSB_2D_MASK_SURF_BH (0xE0000000)
200#define PSB_2D_FLUSH_BH (0xF0000000)
201
202/*
203 * Clip Definition block (PSB_2D_CLIP_BH)
204 */
205#define PSB_2D_CLIPCOUNT_MAX (1)
206#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
207#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
208#define PSB_2D_CLIPCOUNT_SHIFT (0)
209/* clip rectangle min & max */
210#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
211#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
212#define PSB_2D_CLIP_XMAX_SHIFT (12)
213#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
214#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
215#define PSB_2D_CLIP_XMIN_SHIFT (0)
216/* clip rectangle offset */
217#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
218#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
219#define PSB_2D_CLIP_YMAX_SHIFT (12)
220#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
221#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
222#define PSB_2D_CLIP_YMIN_SHIFT (0)
223
224/*
225 * Pattern Control (PSB_2D_PAT_BH)
226 */
227#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
228#define PSB_2D_PAT_HEIGHT_SHIFT (0)
229#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
230#define PSB_2D_PAT_WIDTH_SHIFT (5)
231#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
232#define PSB_2D_PAT_YSTART_SHIFT (10)
233#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
234#define PSB_2D_PAT_XSTART_SHIFT (15)
235
236/*
237 * 2D Control block (PSB_2D_CTRL_BH)
238 */
239/* Present Flags */
240#define PSB_2D_SRCCK_CTRL (0x00000001)
241#define PSB_2D_DSTCK_CTRL (0x00000002)
242#define PSB_2D_ALPHA_CTRL (0x00000004)
243/* Colour Key Colour (SRC/DST)*/
244#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
245#define PSB_2D_CK_COL_CLRMASK (0x00000000)
246#define PSB_2D_CK_COL_SHIFT (0)
247/* Colour Key Mask (SRC/DST)*/
248#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
249#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
250#define PSB_2D_CK_MASK_SHIFT (0)
251/* Alpha Control (Alpha/RGB)*/
252#define PSB_2D_GBLALPHA_MASK (0x000FF000)
253#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
254#define PSB_2D_GBLALPHA_SHIFT (12)
255#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
256#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
257#define PSB_2D_SRCALPHA_OP_SHIFT (20)
258#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
259#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
260#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
261#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
262#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
263#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
264#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
265#define PSB_2D_SRCALPHA_INVERT (0x00800000)
266#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
267#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
268#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
269#define PSB_2D_DSTALPHA_OP_SHIFT (24)
270#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
271#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
272#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
273#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
274#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
275#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
276#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
277#define PSB_2D_DSTALPHA_INVERT (0x08000000)
278#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
279
280#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
281#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
282#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
283#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
284
285/*
286 *Source Offset (PSB_2D_SRC_OFF_BH)
287 */
288#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
289#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
290#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
291#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
292
293/*
294 * Mask Offset (PSB_2D_MASK_OFF_BH)
295 */
296#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
297#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
298#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
299#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
300
301/*
302 * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
303 */
304
305/*
306 *Blit Rectangle (PSB_2D_BLIT_BH)
307 */
308
309#define PSB_2D_ROT_MASK (3 << 25)
310#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
311#define PSB_2D_ROT_NONE (0 << 25)
312#define PSB_2D_ROT_90DEGS (1 << 25)
313#define PSB_2D_ROT_180DEGS (2 << 25)
314#define PSB_2D_ROT_270DEGS (3 << 25)
315
316#define PSB_2D_COPYORDER_MASK (3 << 23)
317#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
318#define PSB_2D_COPYORDER_TL2BR (0 << 23)
319#define PSB_2D_COPYORDER_BR2TL (1 << 23)
320#define PSB_2D_COPYORDER_TR2BL (2 << 23)
321#define PSB_2D_COPYORDER_BL2TR (3 << 23)
322
323#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
324#define PSB_2D_DSTCK_DISABLE (0x00000000)
325#define PSB_2D_DSTCK_PASS (0x00200000)
326#define PSB_2D_DSTCK_REJECT (0x00400000)
327
328#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
329#define PSB_2D_SRCCK_DISABLE (0x00000000)
330#define PSB_2D_SRCCK_PASS (0x00080000)
331#define PSB_2D_SRCCK_REJECT (0x00100000)
332
333#define PSB_2D_CLIP_ENABLE (0x00040000)
334
335#define PSB_2D_ALPHA_ENABLE (0x00020000)
336
337#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
338#define PSB_2D_PAT_MASK (0x00010000)
339#define PSB_2D_USE_PAT (0x00010000)
340#define PSB_2D_USE_FILL (0x00000000)
341/*
342 * Tungsten Graphics note on rop codes: If rop A and rop B are
343 * identical, the mask surface will not be read and need not be
344 * set up.
345 */
346
347#define PSB_2D_ROP3B_MASK (0x0000FF00)
348#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
349#define PSB_2D_ROP3B_SHIFT (8)
350/* rop code A */
351#define PSB_2D_ROP3A_MASK (0x000000FF)
352#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
353#define PSB_2D_ROP3A_SHIFT (0)
354
355#define PSB_2D_ROP4_MASK (0x0000FFFF)
356/*
357 * DWORD0: (Only pass if Pattern control == Use Fill Colour)
358 * Fill Colour RGBA8888
359 */
360#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
361#define PSB_2D_FILLCOLOUR_SHIFT (0)
362/*
363 * DWORD1: (Always Present)
364 * X Start (Dest)
365 * Y Start (Dest)
366 */
367#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
368#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
369#define PSB_2D_DST_XSTART_SHIFT (12)
370#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
371#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
372#define PSB_2D_DST_YSTART_SHIFT (0)
373/*
374 * DWORD2: (Always Present)
375 * X Size (Dest)
376 * Y Size (Dest)
377 */
378#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
379#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
380#define PSB_2D_DST_XSIZE_SHIFT (12)
381#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
382#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
383#define PSB_2D_DST_YSIZE_SHIFT (0)
384
385/*
386 * Source Surface (PSB_2D_SRC_SURF_BH)
387 */
388/*
389 * WORD 0
390 */
391
392#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
393#define PSB_2D_SRC_1_PAL (0x00000000)
394#define PSB_2D_SRC_2_PAL (0x00008000)
395#define PSB_2D_SRC_4_PAL (0x00010000)
396#define PSB_2D_SRC_8_PAL (0x00018000)
397#define PSB_2D_SRC_8_ALPHA (0x00020000)
398#define PSB_2D_SRC_4_ALPHA (0x00028000)
399#define PSB_2D_SRC_332RGB (0x00030000)
400#define PSB_2D_SRC_4444ARGB (0x00038000)
401#define PSB_2D_SRC_555RGB (0x00040000)
402#define PSB_2D_SRC_1555ARGB (0x00048000)
403#define PSB_2D_SRC_565RGB (0x00050000)
404#define PSB_2D_SRC_0888ARGB (0x00058000)
405#define PSB_2D_SRC_8888ARGB (0x00060000)
406#define PSB_2D_SRC_8888UYVY (0x00068000)
407#define PSB_2D_SRC_RESERVED (0x00070000)
408#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
409
410
411#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
412#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
413#define PSB_2D_SRC_STRIDE_SHIFT (0)
414/*
415 * WORD 1 - Base Address
416 */
417#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
418#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
419#define PSB_2D_SRC_ADDR_SHIFT (2)
420#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
421
422/*
423 * Pattern Surface (PSB_2D_PAT_SURF_BH)
424 */
425/*
426 * WORD 0
427 */
428
429#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
430#define PSB_2D_PAT_1_PAL (0x00000000)
431#define PSB_2D_PAT_2_PAL (0x00008000)
432#define PSB_2D_PAT_4_PAL (0x00010000)
433#define PSB_2D_PAT_8_PAL (0x00018000)
434#define PSB_2D_PAT_8_ALPHA (0x00020000)
435#define PSB_2D_PAT_4_ALPHA (0x00028000)
436#define PSB_2D_PAT_332RGB (0x00030000)
437#define PSB_2D_PAT_4444ARGB (0x00038000)
438#define PSB_2D_PAT_555RGB (0x00040000)
439#define PSB_2D_PAT_1555ARGB (0x00048000)
440#define PSB_2D_PAT_565RGB (0x00050000)
441#define PSB_2D_PAT_0888ARGB (0x00058000)
442#define PSB_2D_PAT_8888ARGB (0x00060000)
443
444#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
445#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
446#define PSB_2D_PAT_STRIDE_SHIFT (0)
447/*
448 * WORD 1 - Base Address
449 */
450#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
451#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
452#define PSB_2D_PAT_ADDR_SHIFT (2)
453#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
454
455/*
456 * Destination Surface (PSB_2D_DST_SURF_BH)
457 */
458/*
459 * WORD 0
460 */
461
462#define PSB_2D_DST_FORMAT_MASK (0x00078000)
463#define PSB_2D_DST_332RGB (0x00030000)
464#define PSB_2D_DST_4444ARGB (0x00038000)
465#define PSB_2D_DST_555RGB (0x00040000)
466#define PSB_2D_DST_1555ARGB (0x00048000)
467#define PSB_2D_DST_565RGB (0x00050000)
468#define PSB_2D_DST_0888ARGB (0x00058000)
469#define PSB_2D_DST_8888ARGB (0x00060000)
470#define PSB_2D_DST_8888AYUV (0x00070000)
471
472#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
473#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
474#define PSB_2D_DST_STRIDE_SHIFT (0)
475/*
476 * WORD 1 - Base Address
477 */
478#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
479#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
480#define PSB_2D_DST_ADDR_SHIFT (2)
481#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
482
483/*
484 * Mask Surface (PSB_2D_MASK_SURF_BH)
485 */
486/*
487 * WORD 0
488 */
489#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
490#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
491#define PSB_2D_MASK_STRIDE_SHIFT (0)
492/*
493 * WORD 1 - Base Address
494 */
495#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
496#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
497#define PSB_2D_MASK_ADDR_SHIFT (2)
498#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
499
500/*
501 * Source Palette (PSB_2D_SRC_PAL_BH)
502 */
503
504#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
505#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
506#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
507#define PSB_2D_SRCPAL_BYTEALIGN (1024)
508
509/*
510 * Pattern Palette (PSB_2D_PAT_PAL_BH)
511 */
512
513#define PSB_2D_PATPAL_ADDR_SHIFT (0)
514#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
515#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
516#define PSB_2D_PATPAL_BYTEALIGN (1024)
517
518/*
519 * Rop3 Codes (2 LS bytes)
520 */
521
522#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
523#define PSB_2D_ROP3_PATCOPY (0xF0F0)
524#define PSB_2D_ROP3_WHITENESS (0xFFFF)
525#define PSB_2D_ROP3_BLACKNESS (0x0000)
526#define PSB_2D_ROP3_SRC (0xCC)
527#define PSB_2D_ROP3_PAT (0xF0)
528#define PSB_2D_ROP3_DST (0xAA)
529
530/*
531 * Sizes.
532 */
533
534#define PSB_SCENE_HW_COOKIE_SIZE 16
535#define PSB_TA_MEM_HW_COOKIE_SIZE 16
536
537/*
538 * Scene stuff.
539 */
540
541#define PSB_NUM_HW_SCENES 2
542
543/*
544 * Scheduler completion actions.
545 */
546
547#define PSB_RASTER_BLOCK 0
548#define PSB_RASTER 1
549#define PSB_RETURN 2
550#define PSB_TA 3
551
552/* Power management */
553#define PSB_PUNIT_PORT 0x04
554#define PSB_OSPMBA 0x78
555#define PSB_APMBA 0x7a
556#define PSB_APM_CMD 0x0
557#define PSB_APM_STS 0x04
558#define PSB_PWRGT_VID_ENC_MASK 0x30
559#define PSB_PWRGT_VID_DEC_MASK 0xc
560#define PSB_PWRGT_GL3_MASK 0xc0
561
562#define PSB_PM_SSC 0x20
563#define PSB_PM_SSS 0x30
564#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
565#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
566#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
567#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
568#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
569#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
570/* Display SSS register bits are different in A0 vs. B0 */
571#define PSB_PWRGT_GFX_MASK 0x3
572#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
573#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
574#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
575#define PSB_PWRGT_GFX_MASK_B0 0xc3
576#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
577#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
578#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
579#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
580#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
581#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
582#endif
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 70e006b50f29..5443e25086e9 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1279,3 +1279,4 @@ static struct usb_driver go7007_usb_driver = {
1279}; 1279};
1280 1280
1281module_usb_driver(go7007_usb_driver); 1281module_usb_driver(go7007_usb_driver);
1282MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
index 592cf69020cd..d9cdc120d122 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/staging/omapdrm/Makefile
@@ -7,6 +7,7 @@ ccflags-y := -Iinclude/drm -Werror
7omapdrm-y := omap_drv.o \ 7omapdrm-y := omap_drv.o \
8 omap_debugfs.o \ 8 omap_debugfs.o \
9 omap_crtc.o \ 9 omap_crtc.o \
10 omap_plane.o \
10 omap_encoder.o \ 11 omap_encoder.o \
11 omap_connector.o \ 12 omap_connector.o \
12 omap_fb.o \ 13 omap_fb.o \
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index cffdf5e12394..17ca163e5896 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -27,196 +27,95 @@
27 27
28struct omap_crtc { 28struct omap_crtc {
29 struct drm_crtc base; 29 struct drm_crtc base;
30 struct omap_overlay *ovl; 30 struct drm_plane *plane;
31 struct omap_overlay_info info; 31 const char *name;
32 int id; 32 int id;
33 33
34 /* if there is a pending flip, this will be non-null: */ 34 /* if there is a pending flip, these will be non-null: */
35 struct drm_pending_vblank_event *event; 35 struct drm_pending_vblank_event *event;
36 struct drm_framebuffer *old_fb;
36}; 37};
37 38
38/* push changes down to dss2 */
39static int commit(struct drm_crtc *crtc)
40{
41 struct drm_device *dev = crtc->dev;
42 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
43 struct omap_overlay *ovl = omap_crtc->ovl;
44 struct omap_overlay_info *info = &omap_crtc->info;
45 int ret;
46
47 DBG("%s", omap_crtc->ovl->name);
48 DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
49 info->out_height, info->screen_width);
50 DBG("%d,%d %08x", info->pos_x, info->pos_y, info->paddr);
51
52 /* NOTE: do we want to do this at all here, or just wait
53 * for dpms(ON) since other CRTC's may not have their mode
54 * set yet, so fb dimensions may still change..
55 */
56 ret = ovl->set_overlay_info(ovl, info);
57 if (ret) {
58 dev_err(dev->dev, "could not set overlay info\n");
59 return ret;
60 }
61
62 /* our encoder doesn't necessarily get a commit() after this, in
63 * particular in the dpms() and mode_set_base() cases, so force the
64 * manager to update:
65 *
66 * could this be in the encoder somehow?
67 */
68 if (ovl->manager) {
69 ret = ovl->manager->apply(ovl->manager);
70 if (ret) {
71 dev_err(dev->dev, "could not apply settings\n");
72 return ret;
73 }
74 }
75
76 if (info->enabled) {
77 omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
78 crtc->fb->width, crtc->fb->height);
79 }
80
81 return 0;
82}
83
84/* update parameters that are dependent on the framebuffer dimensions and
85 * position within the fb that this crtc scans out from. This is called
86 * when framebuffer dimensions or x,y base may have changed, either due
87 * to our mode, or a change in another crtc that is scanning out of the
88 * same fb.
89 */
90static void update_scanout(struct drm_crtc *crtc)
91{
92 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
93 dma_addr_t paddr;
94 unsigned int screen_width;
95
96 omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
97 NULL, &paddr, &screen_width);
98
99 DBG("%s: %d,%d: %08x (%d)", omap_crtc->ovl->name,
100 crtc->x, crtc->y, (u32)paddr, screen_width);
101
102 omap_crtc->info.paddr = paddr;
103 omap_crtc->info.screen_width = screen_width;
104}
105
106static void omap_crtc_gamma_set(struct drm_crtc *crtc, 39static void omap_crtc_gamma_set(struct drm_crtc *crtc,
107 u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) 40 u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
108{ 41{
109 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 42 /* not supported.. at least not yet */
110 DBG("%s", omap_crtc->ovl->name);
111} 43}
112 44
113static void omap_crtc_destroy(struct drm_crtc *crtc) 45static void omap_crtc_destroy(struct drm_crtc *crtc)
114{ 46{
115 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 47 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
116 DBG("%s", omap_crtc->ovl->name); 48 omap_crtc->plane->funcs->destroy(omap_crtc->plane);
117 drm_crtc_cleanup(crtc); 49 drm_crtc_cleanup(crtc);
118 kfree(omap_crtc); 50 kfree(omap_crtc);
119} 51}
120 52
121static void omap_crtc_dpms(struct drm_crtc *crtc, int mode) 53static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
122{ 54{
55 struct omap_drm_private *priv = crtc->dev->dev_private;
123 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 56 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
57 int i;
124 58
125 DBG("%s: %d", omap_crtc->ovl->name, mode); 59 WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
126 60
127 if (mode == DRM_MODE_DPMS_ON) { 61 for (i = 0; i < priv->num_planes; i++) {
128 update_scanout(crtc); 62 struct drm_plane *plane = priv->planes[i];
129 omap_crtc->info.enabled = true; 63 if (plane->crtc == crtc)
130 } else { 64 WARN_ON(omap_plane_dpms(plane, mode));
131 omap_crtc->info.enabled = false;
132 } 65 }
133
134 WARN_ON(commit(crtc));
135} 66}
136 67
137static bool omap_crtc_mode_fixup(struct drm_crtc *crtc, 68static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
138 struct drm_display_mode *mode, 69 struct drm_display_mode *mode,
139 struct drm_display_mode *adjusted_mode) 70 struct drm_display_mode *adjusted_mode)
140{ 71{
141 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
142 DBG("%s", omap_crtc->ovl->name);
143 return true; 72 return true;
144} 73}
145 74
146static int omap_crtc_mode_set(struct drm_crtc *crtc, 75static int omap_crtc_mode_set(struct drm_crtc *crtc,
147 struct drm_display_mode *mode, 76 struct drm_display_mode *mode,
148 struct drm_display_mode *adjusted_mode, 77 struct drm_display_mode *adjusted_mode,
149 int x, int y, 78 int x, int y,
150 struct drm_framebuffer *old_fb) 79 struct drm_framebuffer *old_fb)
151{ 80{
152 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 81 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
82 struct drm_plane *plane = omap_crtc->plane;
153 83
154 DBG("%s: %d,%d: %dx%d", omap_crtc->ovl->name, x, y, 84 return omap_plane_mode_set(plane, crtc, crtc->fb,
155 mode->hdisplay, mode->vdisplay); 85 0, 0, mode->hdisplay, mode->vdisplay,
156 86 x << 16, y << 16,
157 /* just use adjusted mode */ 87 mode->hdisplay << 16, mode->vdisplay << 16);
158 mode = adjusted_mode;
159
160 omap_crtc->info.width = mode->hdisplay;
161 omap_crtc->info.height = mode->vdisplay;
162 omap_crtc->info.out_width = mode->hdisplay;
163 omap_crtc->info.out_height = mode->vdisplay;
164 omap_crtc->info.color_mode = OMAP_DSS_COLOR_RGB24U;
165 omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
166 omap_crtc->info.rotation = OMAP_DSS_ROT_0;
167 omap_crtc->info.global_alpha = 0xff;
168 omap_crtc->info.mirror = 0;
169 omap_crtc->info.mirror = 0;
170 omap_crtc->info.pos_x = 0;
171 omap_crtc->info.pos_y = 0;
172#if 0 /* re-enable when these are available in DSS2 driver */
173 omap_crtc->info.zorder = 3; /* GUI in the front, video behind */
174 omap_crtc->info.min_x_decim = 1;
175 omap_crtc->info.max_x_decim = 1;
176 omap_crtc->info.min_y_decim = 1;
177 omap_crtc->info.max_y_decim = 1;
178#endif
179
180 update_scanout(crtc);
181
182 return 0;
183} 88}
184 89
185static void omap_crtc_prepare(struct drm_crtc *crtc) 90static void omap_crtc_prepare(struct drm_crtc *crtc)
186{ 91{
187 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 92 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
188 struct omap_overlay *ovl = omap_crtc->ovl; 93 DBG("%s", omap_crtc->name);
189
190 DBG("%s", omap_crtc->ovl->name);
191
192 ovl->get_overlay_info(ovl, &omap_crtc->info);
193
194 omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 94 omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
195} 95}
196 96
197static void omap_crtc_commit(struct drm_crtc *crtc) 97static void omap_crtc_commit(struct drm_crtc *crtc)
198{ 98{
199 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 99 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
200 DBG("%s", omap_crtc->ovl->name); 100 DBG("%s", omap_crtc->name);
201 omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 101 omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
202} 102}
203 103
204static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 104static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
205 struct drm_framebuffer *old_fb) 105 struct drm_framebuffer *old_fb)
206{ 106{
207 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 107 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
108 struct drm_plane *plane = omap_crtc->plane;
109 struct drm_display_mode *mode = &crtc->mode;
208 110
209 DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb); 111 return plane->funcs->update_plane(plane, crtc, crtc->fb,
210 112 0, 0, mode->hdisplay, mode->vdisplay,
211 update_scanout(crtc); 113 x << 16, y << 16,
212 114 mode->hdisplay << 16, mode->vdisplay << 16);
213 return commit(crtc);
214} 115}
215 116
216static void omap_crtc_load_lut(struct drm_crtc *crtc) 117static void omap_crtc_load_lut(struct drm_crtc *crtc)
217{ 118{
218 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
219 DBG("%s", omap_crtc->ovl->name);
220} 119}
221 120
222static void page_flip_cb(void *arg) 121static void page_flip_cb(void *arg)
@@ -225,15 +124,16 @@ static void page_flip_cb(void *arg)
225 struct drm_device *dev = crtc->dev; 124 struct drm_device *dev = crtc->dev;
226 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 125 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
227 struct drm_pending_vblank_event *event = omap_crtc->event; 126 struct drm_pending_vblank_event *event = omap_crtc->event;
127 struct drm_framebuffer *old_fb = omap_crtc->old_fb;
228 struct timeval now; 128 struct timeval now;
229 unsigned long flags; 129 unsigned long flags;
230 130
231 WARN_ON(!event); 131 WARN_ON(!event);
232 132
233 omap_crtc->event = NULL; 133 omap_crtc->event = NULL;
134 omap_crtc->old_fb = NULL;
234 135
235 update_scanout(crtc); 136 omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
236 WARN_ON(commit(crtc));
237 137
238 /* wakeup userspace */ 138 /* wakeup userspace */
239 /* TODO: this should happen *after* flip in vsync IRQ handler */ 139 /* TODO: this should happen *after* flip in vsync IRQ handler */
@@ -264,10 +164,11 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
264 return -EINVAL; 164 return -EINVAL;
265 } 165 }
266 166
267 crtc->fb = fb; 167 omap_crtc->old_fb = crtc->fb;
268 omap_crtc->event = event; 168 omap_crtc->event = event;
169 crtc->fb = fb;
269 170
270 omap_gem_op_async(omap_framebuffer_bo(fb), OMAP_GEM_READ, 171 omap_gem_op_async(omap_framebuffer_bo(fb, 0), OMAP_GEM_READ,
271 page_flip_cb, crtc); 172 page_flip_cb, crtc);
272 173
273 return 0; 174 return 0;
@@ -290,12 +191,6 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
290 .load_lut = omap_crtc_load_lut, 191 .load_lut = omap_crtc_load_lut,
291}; 192};
292 193
293struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc)
294{
295 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
296 return omap_crtc->ovl;
297}
298
299/* initialize crtc */ 194/* initialize crtc */
300struct drm_crtc *omap_crtc_init(struct drm_device *dev, 195struct drm_crtc *omap_crtc_init(struct drm_device *dev,
301 struct omap_overlay *ovl, int id) 196 struct omap_overlay *ovl, int id)
@@ -310,9 +205,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
310 goto fail; 205 goto fail;
311 } 206 }
312 207
313 omap_crtc->ovl = ovl;
314 omap_crtc->id = id;
315 crtc = &omap_crtc->base; 208 crtc = &omap_crtc->base;
209
210 omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
211 omap_crtc->plane->crtc = crtc;
212 omap_crtc->name = ovl->name;
213 omap_crtc->id = id;
214
316 drm_crtc_init(dev, crtc, &omap_crtc_funcs); 215 drm_crtc_init(dev, crtc, &omap_crtc_funcs);
317 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); 216 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
318 217
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 602aa2dd49c8..3bbea9aac404 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -204,12 +204,6 @@ static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
204 struct omap_overlay_manager *mgr = NULL; 204 struct omap_overlay_manager *mgr = NULL;
205 struct drm_crtc *crtc; 205 struct drm_crtc *crtc;
206 206
207 if (ovl->manager) {
208 DBG("disconnecting %s from %s", ovl->name,
209 ovl->manager->name);
210 ovl->unset_manager(ovl);
211 }
212
213 /* find next best connector, ones with detected connection first 207 /* find next best connector, ones with detected connection first
214 */ 208 */
215 while (*j < priv->num_connectors && !mgr) { 209 while (*j < priv->num_connectors && !mgr) {
@@ -245,11 +239,6 @@ static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
245 (*j)++; 239 (*j)++;
246 } 240 }
247 241
248 if (mgr) {
249 DBG("connecting %s to %s", ovl->name, mgr->name);
250 ovl->set_manager(ovl, mgr);
251 }
252
253 crtc = omap_crtc_init(dev, ovl, priv->num_crtcs); 242 crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
254 243
255 if (!crtc) { 244 if (!crtc) {
@@ -265,6 +254,26 @@ static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
265 return 0; 254 return 0;
266} 255}
267 256
257static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
258 unsigned int possible_crtcs)
259{
260 struct omap_drm_private *priv = dev->dev_private;
261 struct drm_plane *plane =
262 omap_plane_init(dev, ovl, possible_crtcs, false);
263
264 if (!plane) {
265 dev_err(dev->dev, "could not create plane: %s\n",
266 ovl->name);
267 return -ENOMEM;
268 }
269
270 BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
271
272 priv->planes[priv->num_planes++] = plane;
273
274 return 0;
275}
276
268static int match_dev_name(struct omap_dss_device *dssdev, void *data) 277static int match_dev_name(struct omap_dss_device *dssdev, void *data)
269{ 278{
270 return !strcmp(dssdev->name, data); 279 return !strcmp(dssdev->name, data);
@@ -332,6 +341,12 @@ static int omap_modeset_init(struct drm_device *dev)
332 omap_dss_get_overlay(kms_pdata->ovl_ids[i]); 341 omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
333 create_crtc(dev, ovl, &j, connected_connectors); 342 create_crtc(dev, ovl, &j, connected_connectors);
334 } 343 }
344
345 for (i = 0; i < kms_pdata->pln_cnt; i++) {
346 struct omap_overlay *ovl =
347 omap_dss_get_overlay(kms_pdata->pln_ids[i]);
348 create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
349 }
335 } else { 350 } else {
336 /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try 351 /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
337 * to make educated guesses about everything else 352 * to make educated guesses about everything else
@@ -353,6 +368,12 @@ static int omap_modeset_init(struct drm_device *dev)
353 create_crtc(dev, omap_dss_get_overlay(i), 368 create_crtc(dev, omap_dss_get_overlay(i),
354 &j, connected_connectors); 369 &j, connected_connectors);
355 } 370 }
371
372 /* use any remaining overlays as drm planes */
373 for (; i < omap_dss_get_num_overlays(); i++) {
374 struct omap_overlay *ovl = omap_dss_get_overlay(i);
375 create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
376 }
356 } 377 }
357 378
358 /* for now keep the mapping of CRTCs and encoders static.. */ 379 /* for now keep the mapping of CRTCs and encoders static.. */
@@ -361,15 +382,7 @@ static int omap_modeset_init(struct drm_device *dev)
361 struct omap_overlay_manager *mgr = 382 struct omap_overlay_manager *mgr =
362 omap_encoder_get_manager(encoder); 383 omap_encoder_get_manager(encoder);
363 384
364 encoder->possible_crtcs = 0; 385 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
365
366 for (j = 0; j < priv->num_crtcs; j++) {
367 struct omap_overlay *ovl =
368 omap_crtc_get_overlay(priv->crtcs[j]);
369 if (ovl->manager == mgr) {
370 encoder->possible_crtcs |= (1 << j);
371 }
372 }
373 386
374 DBG("%s: possible_crtcs=%08x", mgr->name, 387 DBG("%s: possible_crtcs=%08x", mgr->name,
375 encoder->possible_crtcs); 388 encoder->possible_crtcs);
@@ -377,8 +390,8 @@ static int omap_modeset_init(struct drm_device *dev)
377 390
378 dump_video_chains(); 391 dump_video_chains();
379 392
380 dev->mode_config.min_width = 256; 393 dev->mode_config.min_width = 32;
381 dev->mode_config.min_height = 256; 394 dev->mode_config.min_height = 32;
382 395
383 /* note: eventually will need some cpu_is_omapXYZ() type stuff here 396 /* note: eventually will need some cpu_is_omapXYZ() type stuff here
384 * to fill in these limits properly on different OMAP generations.. 397 * to fill in these limits properly on different OMAP generations..
@@ -708,6 +721,18 @@ static struct vm_operations_struct omap_gem_vm_ops = {
708 .close = drm_gem_vm_close, 721 .close = drm_gem_vm_close,
709}; 722};
710 723
724static const struct file_operations omapdriver_fops = {
725 .owner = THIS_MODULE,
726 .open = drm_open,
727 .unlocked_ioctl = drm_ioctl,
728 .release = drm_release,
729 .mmap = omap_gem_mmap,
730 .poll = drm_poll,
731 .fasync = drm_fasync,
732 .read = drm_read,
733 .llseek = noop_llseek,
734};
735
711static struct drm_driver omap_drm_driver = { 736static struct drm_driver omap_drm_driver = {
712 .driver_features = 737 .driver_features =
713 DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM, 738 DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
@@ -738,17 +763,7 @@ static struct drm_driver omap_drm_driver = {
738 .dumb_destroy = omap_gem_dumb_destroy, 763 .dumb_destroy = omap_gem_dumb_destroy,
739 .ioctls = ioctls, 764 .ioctls = ioctls,
740 .num_ioctls = DRM_OMAP_NUM_IOCTLS, 765 .num_ioctls = DRM_OMAP_NUM_IOCTLS,
741 .fops = { 766 .fops = &omapdriver_fops,
742 .owner = THIS_MODULE,
743 .open = drm_open,
744 .unlocked_ioctl = drm_ioctl,
745 .release = drm_release,
746 .mmap = omap_gem_mmap,
747 .poll = drm_poll,
748 .fasync = drm_fasync,
749 .read = drm_read,
750 .llseek = noop_llseek,
751 },
752 .name = DRIVER_NAME, 767 .name = DRIVER_NAME,
753 .desc = DRIVER_DESC, 768 .desc = DRIVER_DESC,
754 .date = DRIVER_DATE, 769 .date = DRIVER_DATE,
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 76c42515ecc5..61fe022dda5b 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h>
27#include "omap_drm.h" 28#include "omap_drm.h"
28#include "omap_priv.h" 29#include "omap_priv.h"
29 30
@@ -41,6 +42,8 @@
41struct omap_drm_private { 42struct omap_drm_private {
42 unsigned int num_crtcs; 43 unsigned int num_crtcs;
43 struct drm_crtc *crtcs[8]; 44 struct drm_crtc *crtcs[8];
45 unsigned int num_planes;
46 struct drm_plane *planes[8];
44 unsigned int num_encoders; 47 unsigned int num_encoders;
45 struct drm_encoder *encoders[8]; 48 struct drm_encoder *encoders[8];
46 unsigned int num_connectors; 49 unsigned int num_connectors;
@@ -61,7 +64,17 @@ void omap_fbdev_free(struct drm_device *dev);
61 64
62struct drm_crtc *omap_crtc_init(struct drm_device *dev, 65struct drm_crtc *omap_crtc_init(struct drm_device *dev,
63 struct omap_overlay *ovl, int id); 66 struct omap_overlay *ovl, int id);
64struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc); 67
68struct drm_plane *omap_plane_init(struct drm_device *dev,
69 struct omap_overlay *ovl, unsigned int possible_crtcs,
70 bool priv);
71int omap_plane_dpms(struct drm_plane *plane, int mode);
72int omap_plane_mode_set(struct drm_plane *plane,
73 struct drm_crtc *crtc, struct drm_framebuffer *fb,
74 int crtc_x, int crtc_y,
75 unsigned int crtc_w, unsigned int crtc_h,
76 uint32_t src_x, uint32_t src_y,
77 uint32_t src_w, uint32_t src_h);
65 78
66struct drm_encoder *omap_encoder_init(struct drm_device *dev, 79struct drm_encoder *omap_encoder_init(struct drm_device *dev,
67 struct omap_overlay_manager *mgr); 80 struct omap_overlay_manager *mgr);
@@ -80,12 +93,14 @@ void omap_connector_flush(struct drm_connector *connector,
80 int x, int y, int w, int h); 93 int x, int y, int w, int h);
81 94
82struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 95struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
83 struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd); 96 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
84struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 97struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
85 struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo); 98 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
86struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb); 99struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
87int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y, 100int omap_framebuffer_pin(struct drm_framebuffer *fb);
88 void **vaddr, dma_addr_t *paddr, unsigned int *screen_width); 101void omap_framebuffer_unpin(struct drm_framebuffer *fb);
102void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
103 struct omap_overlay_info *info);
89struct drm_connector *omap_framebuffer_get_next_connector( 104struct drm_connector *omap_framebuffer_get_next_connector(
90 struct drm_framebuffer *fb, struct drm_connector *from); 105 struct drm_framebuffer *fb, struct drm_connector *from);
91void omap_framebuffer_flush(struct drm_framebuffer *fb, 106void omap_framebuffer_flush(struct drm_framebuffer *fb,
@@ -132,4 +147,29 @@ static inline int align_pitch(int pitch, int width, int bpp)
132 return ALIGN(pitch, 8 * bytespp); 147 return ALIGN(pitch, 8 * bytespp);
133} 148}
134 149
150/* should these be made into common util helpers?
151 */
152
153static inline int objects_lookup(struct drm_device *dev,
154 struct drm_file *filp, uint32_t pixel_format,
155 struct drm_gem_object **bos, uint32_t *handles)
156{
157 int i, n = drm_format_num_planes(pixel_format);
158
159 for (i = 0; i < n; i++) {
160 bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
161 if (!bos[i]) {
162 goto fail;
163 }
164 }
165
166 return 0;
167
168fail:
169 while (--i > 0) {
170 drm_gem_object_unreference_unlocked(bos[i]);
171 }
172 return -ENOENT;
173}
174
135#endif /* __OMAP_DRV_H__ */ 175#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index 0b50c5b3b564..d021a7ec58df 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -22,18 +22,57 @@
22#include "drm_crtc.h" 22#include "drm_crtc.h"
23#include "drm_crtc_helper.h" 23#include "drm_crtc_helper.h"
24 24
25
26/* 25/*
27 * framebuffer funcs 26 * framebuffer funcs
28 */ 27 */
29 28
29/* per-format info: */
30struct format {
31 enum omap_color_mode dss_format;
32 uint32_t pixel_format;
33 struct {
34 int stride_bpp; /* this times width is stride */
35 int sub_y; /* sub-sample in y dimension */
36 } planes[4];
37 bool yuv;
38};
39
40static const struct format formats[] = {
41 /* 16bpp [A]RGB: */
42 { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */
43 { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
44 { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
45 { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
46 { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
47 { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
48 { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
49 /* 24bpp RGB: */
50 { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */
51 /* 32bpp [A]RGB: */
52 { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
53 { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
54 { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
55 { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
56 /* YUV: */
57 { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true },
58 { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true },
59 { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
60};
61
62/* per-plane info for the fb: */
63struct plane {
64 struct drm_gem_object *bo;
65 uint32_t pitch;
66 uint32_t offset;
67 dma_addr_t paddr;
68};
69
30#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base) 70#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
31 71
32struct omap_framebuffer { 72struct omap_framebuffer {
33 struct drm_framebuffer base; 73 struct drm_framebuffer base;
34 struct drm_gem_object *bo; 74 const struct format *format;
35 int size; 75 struct plane planes[4];
36 dma_addr_t paddr;
37}; 76};
38 77
39static int omap_framebuffer_create_handle(struct drm_framebuffer *fb, 78static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
@@ -41,22 +80,23 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
41 unsigned int *handle) 80 unsigned int *handle)
42{ 81{
43 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 82 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
44 return drm_gem_handle_create(file_priv, omap_fb->bo, handle); 83 return drm_gem_handle_create(file_priv,
84 omap_fb->planes[0].bo, handle);
45} 85}
46 86
47static void omap_framebuffer_destroy(struct drm_framebuffer *fb) 87static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
48{ 88{
49 struct drm_device *dev = fb->dev;
50 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 89 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
90 int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
51 91
52 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); 92 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
53 93
54 drm_framebuffer_cleanup(fb); 94 drm_framebuffer_cleanup(fb);
55 95
56 if (omap_fb->bo) { 96 for (i = 0; i < n; i++) {
57 if (omap_fb->paddr && omap_gem_put_paddr(omap_fb->bo)) 97 struct plane *plane = &omap_fb->planes[i];
58 dev_err(dev->dev, "could not unmap!\n"); 98 if (plane->bo)
59 drm_gem_object_unreference_unlocked(omap_fb->bo); 99 drm_gem_object_unreference_unlocked(plane->bo);
60 } 100 }
61 101
62 kfree(omap_fb); 102 kfree(omap_fb);
@@ -83,37 +123,76 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
83 .dirty = omap_framebuffer_dirty, 123 .dirty = omap_framebuffer_dirty,
84}; 124};
85 125
86/* returns the buffer size */ 126/* pins buffer in preparation for scanout */
87int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y, 127int omap_framebuffer_pin(struct drm_framebuffer *fb)
88 void **vaddr, dma_addr_t *paddr, unsigned int *screen_width)
89{ 128{
90 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 129 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
91 int bpp = fb->bits_per_pixel / 8; 130 int ret, i, n = drm_format_num_planes(omap_fb->format->pixel_format);
92 unsigned long offset;
93 131
94 offset = (x * bpp) + (y * fb->pitch); 132 for (i = 0; i < n; i++) {
133 struct plane *plane = &omap_fb->planes[i];
134 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
135 if (ret)
136 goto fail;
137 }
95 138
96 if (vaddr) { 139 return 0;
97 void *bo_vaddr = omap_gem_vaddr(omap_fb->bo); 140
98 /* note: we can only count on having a vaddr for buffers that 141fail:
99 * are allocated physically contiguously to begin with (ie. 142 while (--i > 0) {
100 * dma_alloc_coherent()). But this should be ok because it 143 struct plane *plane = &omap_fb->planes[i];
101 * is only used by legacy fbdev 144 omap_gem_put_paddr(plane->bo);
102 */
103 BUG_ON(IS_ERR_OR_NULL(bo_vaddr));
104 *vaddr = bo_vaddr + offset;
105 } 145 }
146 return ret;
147}
106 148
107 *paddr = omap_fb->paddr + offset; 149/* releases buffer when done with scanout */
108 *screen_width = fb->pitch / bpp; 150void omap_framebuffer_unpin(struct drm_framebuffer *fb)
151{
152 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
153 int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
109 154
110 return omap_fb->size - offset; 155 for (i = 0; i < n; i++) {
156 struct plane *plane = &omap_fb->planes[i];
157 omap_gem_put_paddr(plane->bo);
158 }
111} 159}
112 160
113struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb) 161/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
162 */
163void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
164 struct omap_overlay_info *info)
114{ 165{
115 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 166 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
116 return omap_fb->bo; 167 const struct format *format = omap_fb->format;
168 struct plane *plane = &omap_fb->planes[0];
169 unsigned int offset;
170
171 offset = plane->offset +
172 (x * format->planes[0].stride_bpp) +
173 (y * plane->pitch / format->planes[0].sub_y);
174
175 info->color_mode = format->dss_format;
176 info->paddr = plane->paddr + offset;
177 info->screen_width = plane->pitch / format->planes[0].stride_bpp;
178
179 if (format->dss_format == OMAP_DSS_COLOR_NV12) {
180 plane = &omap_fb->planes[1];
181 offset = plane->offset +
182 (x * format->planes[1].stride_bpp) +
183 (y * plane->pitch / format->planes[1].sub_y);
184 info->p_uv_addr = plane->paddr + offset;
185 } else {
186 info->p_uv_addr = 0;
187 }
188}
189
190struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
191{
192 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
193 if (p >= drm_format_num_planes(omap_fb->format->pixel_format))
194 return NULL;
195 return omap_fb->planes[p].bo;
117} 196}
118 197
119/* iterate thru all the connectors, returning ones that are attached 198/* iterate thru all the connectors, returning ones that are attached
@@ -171,39 +250,57 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
171} 250}
172 251
173struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 252struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
174 struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd) 253 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
175{ 254{
176 struct drm_gem_object *bo; 255 struct drm_gem_object *bos[4];
177 struct drm_framebuffer *fb; 256 struct drm_framebuffer *fb;
178 bo = drm_gem_object_lookup(dev, file, mode_cmd->handle); 257 int ret;
179 if (!bo) { 258
180 return ERR_PTR(-ENOENT); 259 ret = objects_lookup(dev, file, mode_cmd->pixel_format,
181 } 260 bos, mode_cmd->handles);
182 fb = omap_framebuffer_init(dev, mode_cmd, bo); 261 if (ret)
183 if (!fb) { 262 return ERR_PTR(ret);
184 return ERR_PTR(-ENOMEM); 263
264 fb = omap_framebuffer_init(dev, mode_cmd, bos);
265 if (IS_ERR(fb)) {
266 int i, n = drm_format_num_planes(mode_cmd->pixel_format);
267 for (i = 0; i < n; i++)
268 drm_gem_object_unreference_unlocked(bos[i]);
269 return fb;
185 } 270 }
186 return fb; 271 return fb;
187} 272}
188 273
189struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 274struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
190 struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo) 275 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
191{ 276{
192 struct omap_framebuffer *omap_fb; 277 struct omap_framebuffer *omap_fb;
193 struct drm_framebuffer *fb = NULL; 278 struct drm_framebuffer *fb = NULL;
194 int size, ret; 279 const struct format *format = NULL;
280 int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
195 281
196 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)", 282 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
197 dev, mode_cmd, mode_cmd->width, mode_cmd->height, 283 dev, mode_cmd, mode_cmd->width, mode_cmd->height,
198 mode_cmd->bpp); 284 (char *)&mode_cmd->pixel_format);
285
286 for (i = 0; i < ARRAY_SIZE(formats); i++) {
287 if (formats[i].pixel_format == mode_cmd->pixel_format) {
288 format = &formats[i];
289 break;
290 }
291 }
199 292
200 /* in case someone tries to feed us a completely bogus stride: */ 293 if (!format) {
201 mode_cmd->pitch = align_pitch(mode_cmd->pitch, 294 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
202 mode_cmd->width, mode_cmd->bpp); 295 (char *)&mode_cmd->pixel_format);
296 ret = -EINVAL;
297 goto fail;
298 }
203 299
204 omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL); 300 omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
205 if (!omap_fb) { 301 if (!omap_fb) {
206 dev_err(dev->dev, "could not allocate fb\n"); 302 dev_err(dev->dev, "could not allocate fb\n");
303 ret = -ENOMEM;
207 goto fail; 304 goto fail;
208 } 305 }
209 306
@@ -216,19 +313,32 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
216 313
217 DBG("create: FB ID: %d (%p)", fb->base.id, fb); 314 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
218 315
219 size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height); 316 omap_fb->format = format;
220 317
221 if (size > bo->size) { 318 for (i = 0; i < n; i++) {
222 dev_err(dev->dev, "provided buffer object is too small!\n"); 319 struct plane *plane = &omap_fb->planes[i];
223 goto fail; 320 int size, pitch = mode_cmd->pitches[i];
224 } 321
322 if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
323 dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
324 pitch, mode_cmd->width * format->planes[i].stride_bpp);
325 ret = -EINVAL;
326 goto fail;
327 }
225 328
226 omap_fb->bo = bo; 329 size = pitch * mode_cmd->height / format->planes[i].sub_y;
227 omap_fb->size = size;
228 330
229 if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) { 331 if (size > (bos[i]->size - mode_cmd->offsets[i])) {
230 dev_err(dev->dev, "could not map (paddr)!\n"); 332 dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
231 goto fail; 333 bos[i]->size - mode_cmd->offsets[i], size);
334 ret = -EINVAL;
335 goto fail;
336 }
337
338 plane->bo = bos[i];
339 plane->offset = mode_cmd->offsets[i];
340 plane->pitch = mode_cmd->pitches[i];
341 plane->paddr = pitch;
232 } 342 }
233 343
234 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 344 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
@@ -239,5 +349,5 @@ fail:
239 if (fb) { 349 if (fb) {
240 omap_framebuffer_destroy(fb); 350 omap_framebuffer_destroy(fb);
241 } 351 }
242 return NULL; 352 return ERR_PTR(ret);
243} 353}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 093ae2f87b20..96940bbfc6f4 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -129,10 +129,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
129 struct drm_framebuffer *fb = NULL; 129 struct drm_framebuffer *fb = NULL;
130 union omap_gem_size gsize; 130 union omap_gem_size gsize;
131 struct fb_info *fbi = NULL; 131 struct fb_info *fbi = NULL;
132 struct drm_mode_fb_cmd mode_cmd = {0}; 132 struct drm_mode_fb_cmd2 mode_cmd = {0};
133 dma_addr_t paddr; 133 dma_addr_t paddr;
134 void __iomem *vaddr;
135 int size, screen_width;
136 int ret; 134 int ret;
137 135
138 /* only doing ARGB32 since this is what is needed to alpha-blend 136 /* only doing ARGB32 since this is what is needed to alpha-blend
@@ -145,36 +143,56 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
145 sizes->surface_height, sizes->surface_bpp, 143 sizes->surface_height, sizes->surface_bpp,
146 sizes->fb_width, sizes->fb_height); 144 sizes->fb_width, sizes->fb_height);
147 145
146 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
147 sizes->surface_depth);
148
148 mode_cmd.width = sizes->surface_width; 149 mode_cmd.width = sizes->surface_width;
149 mode_cmd.height = sizes->surface_height; 150 mode_cmd.height = sizes->surface_height;
150 151
151 mode_cmd.bpp = sizes->surface_bpp; 152 mode_cmd.pitches[0] = align_pitch(
152 mode_cmd.depth = sizes->surface_depth; 153 mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
153 154 mode_cmd.width, sizes->surface_bpp);
154 mode_cmd.pitch = align_pitch(
155 mode_cmd.width * ((mode_cmd.bpp + 7) / 8),
156 mode_cmd.width, mode_cmd.bpp);
157 155
158 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 156 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
159 if (fbdev->ywrap_enabled) { 157 if (fbdev->ywrap_enabled) {
160 /* need to align pitch to page size if using DMM scrolling */ 158 /* need to align pitch to page size if using DMM scrolling */
161 mode_cmd.pitch = ALIGN(mode_cmd.pitch, PAGE_SIZE); 159 mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
162 } 160 }
163 161
164 /* allocate backing bo */ 162 /* allocate backing bo */
165 gsize = (union omap_gem_size){ 163 gsize = (union omap_gem_size){
166 .bytes = PAGE_ALIGN(mode_cmd.pitch * mode_cmd.height), 164 .bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
167 }; 165 };
168 DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index); 166 DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
169 fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC); 167 fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
170 if (!fbdev->bo) { 168 if (!fbdev->bo) {
171 dev_err(dev->dev, "failed to allocate buffer object\n"); 169 dev_err(dev->dev, "failed to allocate buffer object\n");
170 ret = -ENOMEM;
172 goto fail; 171 goto fail;
173 } 172 }
174 173
175 fb = omap_framebuffer_init(dev, &mode_cmd, fbdev->bo); 174 fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
176 if (!fb) { 175 if (IS_ERR(fb)) {
177 dev_err(dev->dev, "failed to allocate fb\n"); 176 dev_err(dev->dev, "failed to allocate fb\n");
177 /* note: if fb creation failed, we can't rely on fb destroy
178 * to unref the bo:
179 */
180 drm_gem_object_unreference(fbdev->bo);
181 ret = PTR_ERR(fb);
182 goto fail;
183 }
184
185 /* note: this keeps the bo pinned.. which is perhaps not ideal,
186 * but is needed as long as we use fb_mmap() to mmap to userspace
187 * (since this happens using fix.smem_start). Possibly we could
188 * implement our own mmap using GEM mmap support to avoid this
189 * (non-tiled buffer doesn't need to be pinned for fbcon to write
190 * to it). Then we just need to be sure that we are able to re-
191 * pin it in case of an opps.
192 */
193 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
194 if (ret) {
195 dev_err(dev->dev, "could not map (paddr)!\n");
178 ret = -ENOMEM; 196 ret = -ENOMEM;
179 goto fail; 197 goto fail;
180 } 198 }
@@ -206,18 +224,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
206 goto fail_unlock; 224 goto fail_unlock;
207 } 225 }
208 226
209 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); 227 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
210 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 228 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
211 229
212 size = omap_framebuffer_get_buffer(fb, 0, 0,
213 &vaddr, &paddr, &screen_width);
214
215 dev->mode_config.fb_base = paddr; 230 dev->mode_config.fb_base = paddr;
216 231
217 fbi->screen_base = vaddr; 232 fbi->screen_base = omap_gem_vaddr(fbdev->bo);
218 fbi->screen_size = size; 233 fbi->screen_size = fbdev->bo->size;
219 fbi->fix.smem_start = paddr; 234 fbi->fix.smem_start = paddr;
220 fbi->fix.smem_len = size; 235 fbi->fix.smem_len = fbdev->bo->size;
221 236
222 /* if we have DMM, then we can use it for scrolling by just 237 /* if we have DMM, then we can use it for scrolling by just
223 * shuffling pages around in DMM rather than doing sw blit. 238 * shuffling pages around in DMM rather than doing sw blit.
@@ -362,11 +377,11 @@ void omap_fbdev_free(struct drm_device *dev)
362 377
363 fbdev = to_omap_fbdev(priv->fbdev); 378 fbdev = to_omap_fbdev(priv->fbdev);
364 379
365 kfree(fbdev);
366
367 /* this will free the backing object */ 380 /* this will free the backing object */
368 if (fbdev->fb) 381 if (fbdev->fb)
369 fbdev->fb->funcs->destroy(fbdev->fb); 382 fbdev->fb->funcs->destroy(fbdev->fb);
370 383
384 kfree(fbdev);
385
371 priv->fbdev = NULL; 386 priv->fbdev = NULL;
372} 387}
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index e0ebd1d139f6..b7d6f886c5cf 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -116,6 +116,9 @@ struct omap_gem_object {
116 } *sync; 116 } *sync;
117}; 117};
118 118
119static int get_pages(struct drm_gem_object *obj, struct page ***pages);
120static uint64_t mmap_offset(struct drm_gem_object *obj);
121
119/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 122/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
120 * not necessarily pinned in TILER all the time, and (b) when they are 123 * not necessarily pinned in TILER all the time, and (b) when they are
121 * they are not necessarily page aligned, we reserve one or more small 124 * they are not necessarily page aligned, we reserve one or more small
@@ -149,7 +152,7 @@ static void evict_entry(struct drm_gem_object *obj,
149{ 152{
150 if (obj->dev->dev_mapping) { 153 if (obj->dev->dev_mapping) {
151 size_t size = PAGE_SIZE * usergart[fmt].height; 154 size_t size = PAGE_SIZE * usergart[fmt].height;
152 loff_t off = omap_gem_mmap_offset(obj) + 155 loff_t off = mmap_offset(obj) +
153 (entry->obj_pgoff << PAGE_SHIFT); 156 (entry->obj_pgoff << PAGE_SHIFT);
154 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); 157 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
155 } 158 }
@@ -189,8 +192,6 @@ static inline bool is_shmem(struct drm_gem_object *obj)
189 return obj->filp != NULL; 192 return obj->filp != NULL;
190} 193}
191 194
192static int get_pages(struct drm_gem_object *obj, struct page ***pages);
193
194static DEFINE_SPINLOCK(sync_lock); 195static DEFINE_SPINLOCK(sync_lock);
195 196
196/** ensure backing pages are allocated */ 197/** ensure backing pages are allocated */
@@ -251,7 +252,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
251} 252}
252 253
253/** get mmap offset */ 254/** get mmap offset */
254uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 255static uint64_t mmap_offset(struct drm_gem_object *obj)
255{ 256{
256 if (!obj->map_list.map) { 257 if (!obj->map_list.map) {
257 /* Make it mmapable */ 258 /* Make it mmapable */
@@ -267,6 +268,15 @@ uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
267 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; 268 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
268} 269}
269 270
271uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
272{
273 uint64_t offset;
274 mutex_lock(&obj->dev->struct_mutex);
275 offset = mmap_offset(obj);
276 mutex_unlock(&obj->dev->struct_mutex);
277 return offset;
278}
279
270/** get mmap size */ 280/** get mmap size */
271size_t omap_gem_mmap_size(struct drm_gem_object *obj) 281size_t omap_gem_mmap_size(struct drm_gem_object *obj)
272{ 282{
@@ -1034,6 +1044,11 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1034 drm_gem_free_mmap_offset(obj); 1044 drm_gem_free_mmap_offset(obj);
1035 } 1045 }
1036 1046
1047 /* this means the object is still pinned.. which really should
1048 * not happen. I think..
1049 */
1050 WARN_ON(omap_obj->paddr_cnt > 0);
1051
1037 /* don't free externally allocated backing memory */ 1052 /* don't free externally allocated backing memory */
1038 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { 1053 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1039 if (omap_obj->pages) { 1054 if (omap_obj->pages) {
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
new file mode 100644
index 000000000000..97909124a1fe
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -0,0 +1,344 @@
1/*
2 * drivers/staging/omapdrm/omap_plane.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "omap_drv.h"
21
22/* some hackery because omapdss has an 'enum omap_plane' (which would be
23 * better named omap_plane_id).. and compiler seems unhappy about having
24 * both a 'struct omap_plane' and 'enum omap_plane'
25 */
26#define omap_plane _omap_plane
27
28/*
29 * plane funcs
30 */
31
32#define to_omap_plane(x) container_of(x, struct omap_plane, base)
33
34struct omap_plane {
35 struct drm_plane base;
36 struct omap_overlay *ovl;
37 struct omap_overlay_info info;
38
39 /* Source values, converted to integers because we don't support
40 * fractional positions:
41 */
42 unsigned int src_x, src_y;
43
44 /* last fb that we pinned: */
45 struct drm_framebuffer *pinned_fb;
46};
47
48
49/* push changes down to dss2 */
50static int commit(struct drm_plane *plane)
51{
52 struct drm_device *dev = plane->dev;
53 struct omap_plane *omap_plane = to_omap_plane(plane);
54 struct omap_overlay *ovl = omap_plane->ovl;
55 struct omap_overlay_info *info = &omap_plane->info;
56 int ret;
57
58 DBG("%s", ovl->name);
59 DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
60 info->out_height, info->screen_width);
61 DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
62 info->paddr, info->p_uv_addr);
63
64 /* NOTE: do we want to do this at all here, or just wait
65 * for dpms(ON) since other CRTC's may not have their mode
66 * set yet, so fb dimensions may still change..
67 */
68 ret = ovl->set_overlay_info(ovl, info);
69 if (ret) {
70 dev_err(dev->dev, "could not set overlay info\n");
71 return ret;
72 }
73
74 /* our encoder doesn't necessarily get a commit() after this, in
75 * particular in the dpms() and mode_set_base() cases, so force the
76 * manager to update:
77 *
78 * could this be in the encoder somehow?
79 */
80 if (ovl->manager) {
81 ret = ovl->manager->apply(ovl->manager);
82 if (ret) {
83 dev_err(dev->dev, "could not apply settings\n");
84 return ret;
85 }
86 }
87
88 if (ovl->is_enabled(ovl)) {
89 omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
90 info->out_width, info->out_height);
91 }
92
93 return 0;
94}
95
96/* when CRTC that we are attached to has potentially changed, this checks
97 * if we are attached to proper manager, and if necessary updates.
98 */
99static void update_manager(struct drm_plane *plane)
100{
101 struct omap_drm_private *priv = plane->dev->dev_private;
102 struct omap_plane *omap_plane = to_omap_plane(plane);
103 struct omap_overlay *ovl = omap_plane->ovl;
104 struct omap_overlay_manager *mgr = NULL;
105 int i;
106
107 if (plane->crtc) {
108 for (i = 0; i < priv->num_encoders; i++) {
109 struct drm_encoder *encoder = priv->encoders[i];
110 if (encoder->crtc == plane->crtc) {
111 mgr = omap_encoder_get_manager(encoder);
112 break;
113 }
114 }
115 }
116
117 if (ovl->manager != mgr) {
118 bool enabled = ovl->is_enabled(ovl);
119
120 /* don't switch things around with enabled overlays: */
121 if (enabled)
122 omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
123
124 if (ovl->manager) {
125 DBG("disconnecting %s from %s", ovl->name,
126 ovl->manager->name);
127 ovl->unset_manager(ovl);
128 }
129
130 if (mgr) {
131 DBG("connecting %s to %s", ovl->name, mgr->name);
132 ovl->set_manager(ovl, mgr);
133 }
134
135 if (enabled && mgr)
136 omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
137 }
138}
139
140/* update which fb (if any) is pinned for scanout */
141static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
142{
143 struct omap_plane *omap_plane = to_omap_plane(plane);
144 int ret = 0;
145
146 if (omap_plane->pinned_fb != fb) {
147 if (omap_plane->pinned_fb)
148 omap_framebuffer_unpin(omap_plane->pinned_fb);
149 omap_plane->pinned_fb = fb;
150 if (fb)
151 ret = omap_framebuffer_pin(fb);
152 }
153
154 return ret;
155}
156
157/* update parameters that are dependent on the framebuffer dimensions and
158 * position within the fb that this plane scans out from. This is called
159 * when framebuffer or x,y base may have changed.
160 */
161static void update_scanout(struct drm_plane *plane)
162{
163 struct omap_plane *omap_plane = to_omap_plane(plane);
164 struct omap_overlay_info *info = &omap_plane->info;
165 int ret;
166
167 ret = update_pin(plane, plane->fb);
168 if (ret) {
169 dev_err(plane->dev->dev,
170 "could not pin fb: %d\n", ret);
171 omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
172 return;
173 }
174
175 omap_framebuffer_update_scanout(plane->fb,
176 omap_plane->src_x, omap_plane->src_y, info);
177
178 DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
179 omap_plane->src_x, omap_plane->src_y,
180 (u32)info->paddr, (u32)info->p_uv_addr,
181 info->screen_width);
182}
183
184int omap_plane_mode_set(struct drm_plane *plane,
185 struct drm_crtc *crtc, struct drm_framebuffer *fb,
186 int crtc_x, int crtc_y,
187 unsigned int crtc_w, unsigned int crtc_h,
188 uint32_t src_x, uint32_t src_y,
189 uint32_t src_w, uint32_t src_h)
190{
191 struct omap_plane *omap_plane = to_omap_plane(plane);
192
193 /* src values are in Q16 fixed point, convert to integer: */
194 src_x = src_x >> 16;
195 src_y = src_y >> 16;
196 src_w = src_w >> 16;
197 src_h = src_h >> 16;
198
199 omap_plane->info.pos_x = crtc_x;
200 omap_plane->info.pos_y = crtc_y;
201 omap_plane->info.out_width = crtc_w;
202 omap_plane->info.out_height = crtc_h;
203 omap_plane->info.width = src_w;
204 omap_plane->info.height = src_h;
205 omap_plane->src_x = src_x;
206 omap_plane->src_y = src_y;
207
208 /* note: this is done after this fxn returns.. but if we need
209 * to do a commit/update_scanout, etc before this returns we
210 * need the current value.
211 */
212 plane->fb = fb;
213 plane->crtc = crtc;
214
215 update_scanout(plane);
216 update_manager(plane);
217
218 return 0;
219}
220
221static int omap_plane_update(struct drm_plane *plane,
222 struct drm_crtc *crtc, struct drm_framebuffer *fb,
223 int crtc_x, int crtc_y,
224 unsigned int crtc_w, unsigned int crtc_h,
225 uint32_t src_x, uint32_t src_y,
226 uint32_t src_w, uint32_t src_h)
227{
228 omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
229 src_x, src_y, src_w, src_h);
230 return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
231}
232
233static int omap_plane_disable(struct drm_plane *plane)
234{
235 return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
236}
237
238static void omap_plane_destroy(struct drm_plane *plane)
239{
240 struct omap_plane *omap_plane = to_omap_plane(plane);
241 DBG("%s", omap_plane->ovl->name);
242 omap_plane_disable(plane);
243 drm_plane_cleanup(plane);
244 kfree(omap_plane);
245}
246
247int omap_plane_dpms(struct drm_plane *plane, int mode)
248{
249 struct omap_plane *omap_plane = to_omap_plane(plane);
250 struct omap_overlay *ovl = omap_plane->ovl;
251 int r;
252
253 DBG("%s: %d", omap_plane->ovl->name, mode);
254
255 if (mode == DRM_MODE_DPMS_ON) {
256 update_scanout(plane);
257 r = commit(plane);
258 if (!r)
259 r = ovl->enable(ovl);
260 } else {
261 r = ovl->disable(ovl);
262 update_pin(plane, NULL);
263 }
264
265 return r;
266}
267
268static const struct drm_plane_funcs omap_plane_funcs = {
269 .update_plane = omap_plane_update,
270 .disable_plane = omap_plane_disable,
271 .destroy = omap_plane_destroy,
272};
273
274static const uint32_t formats[] = {
275 DRM_FORMAT_RGB565,
276 DRM_FORMAT_RGBX4444,
277 DRM_FORMAT_XRGB4444,
278 DRM_FORMAT_RGBA4444,
279 DRM_FORMAT_ABGR4444,
280 DRM_FORMAT_XRGB1555,
281 DRM_FORMAT_ARGB1555,
282 DRM_FORMAT_RGB888,
283 DRM_FORMAT_RGBX8888,
284 DRM_FORMAT_XRGB8888,
285 DRM_FORMAT_RGBA8888,
286 DRM_FORMAT_ARGB8888,
287 DRM_FORMAT_NV12,
288 DRM_FORMAT_YUYV,
289 DRM_FORMAT_UYVY,
290};
291
292/* initialize plane */
293struct drm_plane *omap_plane_init(struct drm_device *dev,
294 struct omap_overlay *ovl, unsigned int possible_crtcs,
295 bool priv)
296{
297 struct drm_plane *plane = NULL;
298 struct omap_plane *omap_plane;
299
300 DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
301 possible_crtcs, priv);
302
303 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
304 if (!omap_plane) {
305 dev_err(dev->dev, "could not allocate plane\n");
306 goto fail;
307 }
308
309 omap_plane->ovl = ovl;
310 plane = &omap_plane->base;
311
312 drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
313 formats, ARRAY_SIZE(formats), priv);
314
315 /* get our starting configuration, set defaults for parameters
316 * we don't currently use, etc:
317 */
318 ovl->get_overlay_info(ovl, &omap_plane->info);
319 omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
320 omap_plane->info.rotation = OMAP_DSS_ROT_0;
321 omap_plane->info.global_alpha = 0xff;
322 omap_plane->info.mirror = 0;
323 omap_plane->info.mirror = 0;
324
325 /* Set defaults depending on whether we are a CRTC or overlay
326 * layer.
327 * TODO add ioctl to give userspace an API to change this.. this
328 * will come in a subsequent patch.
329 */
330 if (priv)
331 omap_plane->info.zorder = 0;
332 else
333 omap_plane->info.zorder = 1;
334
335 update_manager(plane);
336
337 return plane;
338
339fail:
340 if (plane) {
341 omap_plane_destroy(plane);
342 }
343 return NULL;
344}
diff --git a/drivers/staging/omapdrm/omap_priv.h b/drivers/staging/omapdrm/omap_priv.h
index c324709aa9a1..ef6441447147 100644
--- a/drivers/staging/omapdrm/omap_priv.h
+++ b/drivers/staging/omapdrm/omap_priv.h
@@ -27,14 +27,22 @@
27 * pipes/overlays/CRTCs are used.. if this is not provided, then instead the 27 * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
28 * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to 28 * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
29 * one manager, with priority given to managers that are connected to 29 * one manager, with priority given to managers that are connected to
30 * detected devices. This should be a good default behavior for most cases, 30 * detected devices. Remaining overlays are used as video planes. This
31 * but yet there still might be times when you wish to do something different. 31 * should be a good default behavior for most cases, but yet there still
32 * might be times when you wish to do something different.
32 */ 33 */
33struct omap_kms_platform_data { 34struct omap_kms_platform_data {
35 /* overlays to use as CRTCs: */
34 int ovl_cnt; 36 int ovl_cnt;
35 const int *ovl_ids; 37 const int *ovl_ids;
38
39 /* overlays to use as video planes: */
40 int pln_cnt;
41 const int *pln_ids;
42
36 int mgr_cnt; 43 int mgr_cnt;
37 const int *mgr_ids; 44 const int *mgr_ids;
45
38 int dev_cnt; 46 int dev_cnt;
39 const char **dev_names; 47 const char **dev_names;
40}; 48};
diff --git a/drivers/staging/pohmelfs/Kconfig b/drivers/staging/pohmelfs/Kconfig
deleted file mode 100644
index 8d53b1a1e715..000000000000
--- a/drivers/staging/pohmelfs/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config POHMELFS
2 tristate "POHMELFS filesystem support"
3 depends on NET
4 select CONNECTOR
5 select CRYPTO
6 select CRYPTO_BLKCIPHER
7 select CRYPTO_HMAC
8 help
9 POHMELFS stands for Parallel Optimized Host Message Exchange Layered
10 File System. This is a network filesystem which supports coherent
11 caching of data and metadata on clients.
12
13config POHMELFS_DEBUG
14 bool "POHMELFS debugging"
15 depends on POHMELFS
16 default n
17 help
18 Turns on excessive POHMELFS debugging facilities.
19 You usually do not want to slow things down noticeably and get really
20 lots of kernel messages in syslog.
diff --git a/drivers/staging/pohmelfs/Makefile b/drivers/staging/pohmelfs/Makefile
deleted file mode 100644
index 196561ca26bc..000000000000
--- a/drivers/staging/pohmelfs/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_POHMELFS) += pohmelfs.o
2
3pohmelfs-y := inode.o config.o dir.o net.o path_entry.o trans.o crypto.o lock.o mcache.o
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
deleted file mode 100644
index b6c42cb0d1c6..000000000000
--- a/drivers/staging/pohmelfs/config.c
+++ /dev/null
@@ -1,611 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/connector.h>
18#include <linux/crypto.h>
19#include <linux/list.h>
20#include <linux/mutex.h>
21#include <linux/string.h>
22#include <linux/in.h>
23#include <linux/slab.h>
24
25#include "netfs.h"
26
27/*
28 * Global configuration list.
29 * Each client can be asked to get one of them.
30 *
31 * Allows to provide remote server address (ipv4/v6/whatever), port
32 * and so on via kernel connector.
33 */
34
35static struct cb_id pohmelfs_cn_id = {.idx = POHMELFS_CN_IDX, .val = POHMELFS_CN_VAL};
36static LIST_HEAD(pohmelfs_config_list);
37static DEFINE_MUTEX(pohmelfs_config_lock);
38
39static inline int pohmelfs_config_eql(struct pohmelfs_ctl *sc, struct pohmelfs_ctl *ctl)
40{
41 if (sc->idx == ctl->idx && sc->type == ctl->type &&
42 sc->proto == ctl->proto &&
43 sc->addrlen == ctl->addrlen &&
44 !memcmp(&sc->addr, &ctl->addr, ctl->addrlen))
45 return 1;
46
47 return 0;
48}
49
50static struct pohmelfs_config_group *pohmelfs_find_config_group(unsigned int idx)
51{
52 struct pohmelfs_config_group *g, *group = NULL;
53
54 list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
55 if (g->idx == idx) {
56 group = g;
57 break;
58 }
59 }
60
61 return group;
62}
63
64static struct pohmelfs_config_group *pohmelfs_find_create_config_group(unsigned int idx)
65{
66 struct pohmelfs_config_group *g;
67
68 g = pohmelfs_find_config_group(idx);
69 if (g)
70 return g;
71
72 g = kzalloc(sizeof(struct pohmelfs_config_group), GFP_KERNEL);
73 if (!g)
74 return NULL;
75
76 INIT_LIST_HEAD(&g->config_list);
77 g->idx = idx;
78 g->num_entry = 0;
79
80 list_add_tail(&g->group_entry, &pohmelfs_config_list);
81
82 return g;
83}
84
85static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst)
86{
87 struct pohmelfs_config *tmp;
88
89 INIT_LIST_HEAD(&dst->config_entry);
90
91 list_for_each_entry(tmp, &psb->state_list, config_entry) {
92 if (dst->state.ctl.prio > tmp->state.ctl.prio)
93 list_add_tail(&dst->config_entry, &tmp->config_entry);
94 }
95 if (list_empty(&dst->config_entry))
96 list_add_tail(&dst->config_entry, &psb->state_list);
97}
98
99static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb,
100 struct pohmelfs_config *dst, struct pohmelfs_config *new)
101{
102 if ((dst->state.ctl.prio == new->state.ctl.prio) &&
103 (dst->state.ctl.perm == new->state.ctl.perm))
104 return 0;
105
106 dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n",
107 __func__, dst->state.ctl.prio, dst->state.ctl.perm,
108 new->state.ctl.prio, new->state.ctl.perm);
109 dst->state.ctl.prio = new->state.ctl.prio;
110 dst->state.ctl.perm = new->state.ctl.perm;
111
112 list_del_init(&dst->config_entry);
113 pohmelfs_insert_config_entry(psb, dst);
114 return 0;
115}
116
117/*
118 * pohmelfs_copy_config() is used to copy new state configs from the
119 * config group (controlled by the netlink messages) into the superblock.
120 * This happens either at startup time where no transactions can access
121 * the list of the configs (and thus list of the network states), or at
122 * run-time, where it is protected by the psb->state_lock.
123 */
124int pohmelfs_copy_config(struct pohmelfs_sb *psb)
125{
126 struct pohmelfs_config_group *g;
127 struct pohmelfs_config *c, *dst;
128 int err = -ENODEV;
129
130 mutex_lock(&pohmelfs_config_lock);
131
132 g = pohmelfs_find_config_group(psb->idx);
133 if (!g)
134 goto out_unlock;
135
136 /*
137 * Run over all entries in given config group and try to create and
138 * initialize those, which do not exist in superblock list.
139 * Skip all existing entries.
140 */
141
142 list_for_each_entry(c, &g->config_list, config_entry) {
143 err = 0;
144 list_for_each_entry(dst, &psb->state_list, config_entry) {
145 if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) {
146 err = pohmelfs_move_config_entry(psb, dst, c);
147 if (!err)
148 err = -EEXIST;
149 break;
150 }
151 }
152
153 if (err)
154 continue;
155
156 dst = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
157 if (!dst) {
158 err = -ENOMEM;
159 break;
160 }
161
162 memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl));
163
164 pohmelfs_insert_config_entry(psb, dst);
165
166 err = pohmelfs_state_init_one(psb, dst);
167 if (err) {
168 list_del(&dst->config_entry);
169 kfree(dst);
170 }
171
172 err = 0;
173 }
174
175out_unlock:
176 mutex_unlock(&pohmelfs_config_lock);
177
178 return err;
179}
180
181int pohmelfs_copy_crypto(struct pohmelfs_sb *psb)
182{
183 struct pohmelfs_config_group *g;
184 int err = -ENOENT;
185
186 mutex_lock(&pohmelfs_config_lock);
187 g = pohmelfs_find_config_group(psb->idx);
188 if (!g)
189 goto err_out_exit;
190
191 if (g->hash_string) {
192 err = -ENOMEM;
193 psb->hash_string = kstrdup(g->hash_string, GFP_KERNEL);
194 if (!psb->hash_string)
195 goto err_out_exit;
196 psb->hash_strlen = g->hash_strlen;
197 }
198
199 if (g->cipher_string) {
200 psb->cipher_string = kstrdup(g->cipher_string, GFP_KERNEL);
201 if (!psb->cipher_string)
202 goto err_out_free_hash_string;
203 psb->cipher_strlen = g->cipher_strlen;
204 }
205
206 if (g->hash_keysize) {
207 psb->hash_key = kmemdup(g->hash_key, g->hash_keysize,
208 GFP_KERNEL);
209 if (!psb->hash_key)
210 goto err_out_free_cipher_string;
211 psb->hash_keysize = g->hash_keysize;
212 }
213
214 if (g->cipher_keysize) {
215 psb->cipher_key = kmemdup(g->cipher_key, g->cipher_keysize,
216 GFP_KERNEL);
217 if (!psb->cipher_key)
218 goto err_out_free_hash;
219 psb->cipher_keysize = g->cipher_keysize;
220 }
221
222 mutex_unlock(&pohmelfs_config_lock);
223
224 return 0;
225
226err_out_free_hash:
227 kfree(psb->hash_key);
228err_out_free_cipher_string:
229 kfree(psb->cipher_string);
230err_out_free_hash_string:
231 kfree(psb->hash_string);
232err_out_exit:
233 mutex_unlock(&pohmelfs_config_lock);
234 return err;
235}
236
237static int pohmelfs_send_reply(int err, int msg_num, int action, struct cn_msg *msg, struct pohmelfs_ctl *ctl)
238{
239 struct pohmelfs_cn_ack *ack;
240
241 ack = kzalloc(sizeof(struct pohmelfs_cn_ack), GFP_KERNEL);
242 if (!ack)
243 return -ENOMEM;
244
245 memcpy(&ack->msg, msg, sizeof(struct cn_msg));
246
247 if (action == POHMELFS_CTLINFO_ACK)
248 memcpy(&ack->ctl, ctl, sizeof(struct pohmelfs_ctl));
249
250 ack->msg.len = sizeof(struct pohmelfs_cn_ack) - sizeof(struct cn_msg);
251 ack->msg.ack = msg->ack + 1;
252 ack->error = err;
253 ack->msg_num = msg_num;
254
255 cn_netlink_send(&ack->msg, 0, GFP_KERNEL);
256 kfree(ack);
257 return 0;
258}
259
260static int pohmelfs_cn_disp(struct cn_msg *msg)
261{
262 struct pohmelfs_config_group *g;
263 struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
264 struct pohmelfs_config *c, *tmp;
265 int err = 0, i = 1;
266
267 if (msg->len != sizeof(struct pohmelfs_ctl))
268 return -EBADMSG;
269
270 mutex_lock(&pohmelfs_config_lock);
271
272 g = pohmelfs_find_config_group(ctl->idx);
273 if (!g) {
274 pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL);
275 goto out_unlock;
276 }
277
278 list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
279 struct pohmelfs_ctl *sc = &c->state.ctl;
280 if (pohmelfs_send_reply(err, g->num_entry - i, POHMELFS_CTLINFO_ACK, msg, sc)) {
281 err = -ENOMEM;
282 goto out_unlock;
283 }
284 i += 1;
285 }
286
287 out_unlock:
288 mutex_unlock(&pohmelfs_config_lock);
289 return err;
290}
291
292static int pohmelfs_cn_dump(struct cn_msg *msg)
293{
294 struct pohmelfs_config_group *g;
295 struct pohmelfs_config *c, *tmp;
296 int err = 0, i = 1;
297 int total_msg = 0;
298
299 if (msg->len != sizeof(struct pohmelfs_ctl))
300 return -EBADMSG;
301
302 mutex_lock(&pohmelfs_config_lock);
303
304 list_for_each_entry(g, &pohmelfs_config_list, group_entry)
305 total_msg += g->num_entry;
306 if (total_msg == 0) {
307 if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
308 err = -ENOMEM;
309 goto out_unlock;
310 }
311
312 list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
313 list_for_each_entry_safe(c, tmp, &g->config_list,
314 config_entry) {
315 struct pohmelfs_ctl *sc = &c->state.ctl;
316 if (pohmelfs_send_reply(err, total_msg - i,
317 POHMELFS_CTLINFO_ACK, msg,
318 sc)) {
319 err = -ENOMEM;
320 goto out_unlock;
321 }
322 i += 1;
323 }
324 }
325
326out_unlock:
327 mutex_unlock(&pohmelfs_config_lock);
328 return err;
329}
330
331static int pohmelfs_cn_flush(struct cn_msg *msg)
332{
333 struct pohmelfs_config_group *g;
334 struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
335 struct pohmelfs_config *c, *tmp;
336 int err = 0;
337
338 if (msg->len != sizeof(struct pohmelfs_ctl))
339 return -EBADMSG;
340
341 mutex_lock(&pohmelfs_config_lock);
342
343 if (ctl->idx != POHMELFS_NULL_IDX) {
344 g = pohmelfs_find_config_group(ctl->idx);
345
346 if (!g)
347 goto out_unlock;
348
349 list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
350 list_del(&c->config_entry);
351 g->num_entry--;
352 kfree(c);
353 }
354 } else {
355 list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
356 list_for_each_entry_safe(c, tmp, &g->config_list,
357 config_entry) {
358 list_del(&c->config_entry);
359 g->num_entry--;
360 kfree(c);
361 }
362 }
363 }
364
365out_unlock:
366 mutex_unlock(&pohmelfs_config_lock);
367 pohmelfs_cn_dump(msg);
368
369 return err;
370}
371
372static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new)
373{
374 old->perm = new->perm;
375 old->prio = new->prio;
376 return 0;
377}
378
379static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
380{
381 struct pohmelfs_config_group *g;
382 struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
383 struct pohmelfs_config *c, *tmp;
384 int err = 0;
385
386 if (msg->len != sizeof(struct pohmelfs_ctl))
387 return -EBADMSG;
388
389 mutex_lock(&pohmelfs_config_lock);
390
391 g = pohmelfs_find_create_config_group(ctl->idx);
392 if (!g) {
393 err = -ENOMEM;
394 goto out_unlock;
395 }
396
397 list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
398 struct pohmelfs_ctl *sc = &c->state.ctl;
399
400 if (pohmelfs_config_eql(sc, ctl)) {
401 if (action == POHMELFS_FLAGS_ADD) {
402 err = -EEXIST;
403 goto out_unlock;
404 } else if (action == POHMELFS_FLAGS_DEL) {
405 list_del(&c->config_entry);
406 g->num_entry--;
407 kfree(c);
408 goto out_unlock;
409 } else if (action == POHMELFS_FLAGS_MODIFY) {
410 err = pohmelfs_modify_config(sc, ctl);
411 goto out_unlock;
412 } else {
413 err = -EEXIST;
414 goto out_unlock;
415 }
416 }
417 }
418 if (action == POHMELFS_FLAGS_DEL) {
419 err = -EBADMSG;
420 goto out_unlock;
421 }
422
423 c = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
424 if (!c) {
425 err = -ENOMEM;
426 goto out_unlock;
427 }
428 memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl));
429 g->num_entry++;
430
431 list_add_tail(&c->config_entry, &g->config_list);
432
433 out_unlock:
434 mutex_unlock(&pohmelfs_config_lock);
435 if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
436 err = -ENOMEM;
437
438 return err;
439}
440
441static int pohmelfs_crypto_hash_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
442{
443 char *algo = (char *)c->data;
444 u8 *key = (u8 *)(algo + c->strlen);
445
446 if (g->hash_string)
447 return -EEXIST;
448
449 g->hash_string = kstrdup(algo, GFP_KERNEL);
450 if (!g->hash_string)
451 return -ENOMEM;
452 g->hash_strlen = c->strlen;
453 g->hash_keysize = c->keysize;
454
455 g->hash_key = kmemdup(key, c->keysize, GFP_KERNEL);
456 if (!g->hash_key) {
457 kfree(g->hash_string);
458 return -ENOMEM;
459 }
460
461 return 0;
462}
463
464static int pohmelfs_crypto_cipher_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
465{
466 char *algo = (char *)c->data;
467 u8 *key = (u8 *)(algo + c->strlen);
468
469 if (g->cipher_string)
470 return -EEXIST;
471
472 g->cipher_string = kstrdup(algo, GFP_KERNEL);
473 if (!g->cipher_string)
474 return -ENOMEM;
475 g->cipher_strlen = c->strlen;
476 g->cipher_keysize = c->keysize;
477
478 g->cipher_key = kmemdup(key, c->keysize, GFP_KERNEL);
479 if (!g->cipher_key) {
480 kfree(g->cipher_string);
481 return -ENOMEM;
482 }
483
484 return 0;
485}
486
487static int pohmelfs_cn_crypto(struct cn_msg *msg)
488{
489 struct pohmelfs_crypto *crypto = (struct pohmelfs_crypto *)msg->data;
490 struct pohmelfs_config_group *g;
491 int err = 0;
492
493 dprintk("%s: idx: %u, strlen: %u, type: %u, keysize: %u, algo: %s.\n",
494 __func__, crypto->idx, crypto->strlen, crypto->type,
495 crypto->keysize, (char *)crypto->data);
496
497 mutex_lock(&pohmelfs_config_lock);
498 g = pohmelfs_find_create_config_group(crypto->idx);
499 if (!g) {
500 err = -ENOMEM;
501 goto out_unlock;
502 }
503
504 switch (crypto->type) {
505 case POHMELFS_CRYPTO_HASH:
506 err = pohmelfs_crypto_hash_init(g, crypto);
507 break;
508 case POHMELFS_CRYPTO_CIPHER:
509 err = pohmelfs_crypto_cipher_init(g, crypto);
510 break;
511 default:
512 err = -ENOTSUPP;
513 break;
514 }
515
516out_unlock:
517 mutex_unlock(&pohmelfs_config_lock);
518 if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
519 err = -ENOMEM;
520
521 return err;
522}
523
524static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
525{
526 int err;
527
528 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
529 return;
530
531 switch (msg->flags) {
532 case POHMELFS_FLAGS_ADD:
533 case POHMELFS_FLAGS_DEL:
534 case POHMELFS_FLAGS_MODIFY:
535 err = pohmelfs_cn_ctl(msg, msg->flags);
536 break;
537 case POHMELFS_FLAGS_FLUSH:
538 err = pohmelfs_cn_flush(msg);
539 break;
540 case POHMELFS_FLAGS_SHOW:
541 err = pohmelfs_cn_disp(msg);
542 break;
543 case POHMELFS_FLAGS_DUMP:
544 err = pohmelfs_cn_dump(msg);
545 break;
546 case POHMELFS_FLAGS_CRYPTO:
547 err = pohmelfs_cn_crypto(msg);
548 break;
549 default:
550 err = -ENOSYS;
551 break;
552 }
553}
554
555int pohmelfs_config_check(struct pohmelfs_config *config, int idx)
556{
557 struct pohmelfs_ctl *ctl = &config->state.ctl;
558 struct pohmelfs_config *tmp;
559 int err = -ENOENT;
560 struct pohmelfs_ctl *sc;
561 struct pohmelfs_config_group *g;
562
563 mutex_lock(&pohmelfs_config_lock);
564
565 g = pohmelfs_find_config_group(ctl->idx);
566 if (g) {
567 list_for_each_entry(tmp, &g->config_list, config_entry) {
568 sc = &tmp->state.ctl;
569
570 if (pohmelfs_config_eql(sc, ctl)) {
571 err = 0;
572 break;
573 }
574 }
575 }
576
577 mutex_unlock(&pohmelfs_config_lock);
578
579 return err;
580}
581
582int __init pohmelfs_config_init(void)
583{
584 /* XXX remove (void *) cast when vanilla connector got synced */
585 return cn_add_callback(&pohmelfs_cn_id, "pohmelfs", (void *)pohmelfs_cn_callback);
586}
587
588void pohmelfs_config_exit(void)
589{
590 struct pohmelfs_config *c, *tmp;
591 struct pohmelfs_config_group *g, *gtmp;
592
593 cn_del_callback(&pohmelfs_cn_id);
594
595 mutex_lock(&pohmelfs_config_lock);
596 list_for_each_entry_safe(g, gtmp, &pohmelfs_config_list, group_entry) {
597 list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
598 list_del(&c->config_entry);
599 kfree(c);
600 }
601
602 list_del(&g->group_entry);
603
604 kfree(g->hash_string);
605
606 kfree(g->cipher_string);
607
608 kfree(g);
609 }
610 mutex_unlock(&pohmelfs_config_lock);
611}
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
deleted file mode 100644
index ad92771dce57..000000000000
--- a/drivers/staging/pohmelfs/crypto.c
+++ /dev/null
@@ -1,878 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/crypto.h>
17#include <linux/highmem.h>
18#include <linux/kthread.h>
19#include <linux/pagemap.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
22
23#include "netfs.h"
24
25static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb)
26{
27 int err;
28 struct crypto_hash *hash;
29
30 hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC);
31 if (IS_ERR(hash)) {
32 err = PTR_ERR(hash);
33 dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n",
34 __func__, psb->idx, psb->hash_string, err);
35 goto err_out_exit;
36 }
37
38 psb->crypto_attached_size = crypto_hash_digestsize(hash);
39
40 if (!psb->hash_keysize)
41 return hash;
42
43 err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize);
44 if (err) {
45 dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n",
46 __func__, psb->idx, psb->hash_string, err);
47 goto err_out_free;
48 }
49
50 return hash;
51
52err_out_free:
53 crypto_free_hash(hash);
54err_out_exit:
55 return ERR_PTR(err);
56}
57
58static struct crypto_ablkcipher *pohmelfs_init_cipher(struct pohmelfs_sb *psb)
59{
60 int err = -EINVAL;
61 struct crypto_ablkcipher *cipher;
62
63 if (!psb->cipher_keysize)
64 goto err_out_exit;
65
66 cipher = crypto_alloc_ablkcipher(psb->cipher_string, 0, 0);
67 if (IS_ERR(cipher)) {
68 err = PTR_ERR(cipher);
69 dprintk("%s: idx: %u: failed to allocate cipher '%s', err: %d.\n",
70 __func__, psb->idx, psb->cipher_string, err);
71 goto err_out_exit;
72 }
73
74 crypto_ablkcipher_clear_flags(cipher, ~0);
75
76 err = crypto_ablkcipher_setkey(cipher, psb->cipher_key, psb->cipher_keysize);
77 if (err) {
78 dprintk("%s: idx: %u: failed to set key for cipher '%s', err: %d.\n",
79 __func__, psb->idx, psb->cipher_string, err);
80 goto err_out_free;
81 }
82
83 return cipher;
84
85err_out_free:
86 crypto_free_ablkcipher(cipher);
87err_out_exit:
88 return ERR_PTR(err);
89}
90
91int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
92{
93 int err;
94
95 e->page_num = 0;
96
97 e->size = PAGE_SIZE;
98 e->data = kmalloc(e->size, GFP_KERNEL);
99 if (!e->data) {
100 err = -ENOMEM;
101 goto err_out_exit;
102 }
103
104 if (psb->hash_string) {
105 e->hash = pohmelfs_init_hash(psb);
106 if (IS_ERR(e->hash)) {
107 err = PTR_ERR(e->hash);
108 e->hash = NULL;
109 goto err_out_free;
110 }
111 }
112
113 if (psb->cipher_string) {
114 e->cipher = pohmelfs_init_cipher(psb);
115 if (IS_ERR(e->cipher)) {
116 err = PTR_ERR(e->cipher);
117 e->cipher = NULL;
118 goto err_out_free_hash;
119 }
120 }
121
122 return 0;
123
124err_out_free_hash:
125 crypto_free_hash(e->hash);
126err_out_free:
127 kfree(e->data);
128err_out_exit:
129 return err;
130}
131
132void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e)
133{
134 crypto_free_hash(e->hash);
135 crypto_free_ablkcipher(e->cipher);
136 kfree(e->data);
137}
138
139static void pohmelfs_crypto_complete(struct crypto_async_request *req, int err)
140{
141 struct pohmelfs_crypto_completion *c = req->data;
142
143 if (err == -EINPROGRESS)
144 return;
145
146 dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
147 c->error = err;
148 complete(&c->complete);
149}
150
151static int pohmelfs_crypto_process(struct ablkcipher_request *req,
152 struct scatterlist *sg_dst, struct scatterlist *sg_src,
153 void *iv, int enc, unsigned long timeout)
154{
155 struct pohmelfs_crypto_completion complete;
156 int err;
157
158 init_completion(&complete.complete);
159 complete.error = -EINPROGRESS;
160
161 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
162 pohmelfs_crypto_complete, &complete);
163
164 ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
165
166 if (enc)
167 err = crypto_ablkcipher_encrypt(req);
168 else
169 err = crypto_ablkcipher_decrypt(req);
170
171 switch (err) {
172 case -EINPROGRESS:
173 case -EBUSY:
174 err = wait_for_completion_interruptible_timeout(&complete.complete,
175 timeout);
176 if (!err)
177 err = -ETIMEDOUT;
178 else if (err > 0)
179 err = complete.error;
180 break;
181 default:
182 break;
183 }
184
185 return err;
186}
187
188int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd_iv,
189 void *data, struct page *page, unsigned int size)
190{
191 int err;
192 struct scatterlist sg;
193
194 if (!e->cipher && !e->hash)
195 return 0;
196
197 dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
198 __func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);
199
200 if (data) {
201 sg_init_one(&sg, data, size);
202 } else {
203 sg_init_table(&sg, 1);
204 sg_set_page(&sg, page, size, 0);
205 }
206
207 if (e->cipher) {
208 struct ablkcipher_request *req = e->data + crypto_hash_digestsize(e->hash);
209 u8 iv[32];
210
211 memset(iv, 0, sizeof(iv));
212 memcpy(iv, &cmd_iv, sizeof(cmd_iv));
213
214 ablkcipher_request_set_tfm(req, e->cipher);
215
216 err = pohmelfs_crypto_process(req, &sg, &sg, iv, 0, e->timeout);
217 if (err)
218 goto err_out_exit;
219 }
220
221 if (e->hash) {
222 struct hash_desc desc;
223 void *dst = e->data + e->size/2;
224
225 desc.tfm = e->hash;
226 desc.flags = 0;
227
228 err = crypto_hash_init(&desc);
229 if (err)
230 goto err_out_exit;
231
232 err = crypto_hash_update(&desc, &sg, size);
233 if (err)
234 goto err_out_exit;
235
236 err = crypto_hash_final(&desc, dst);
237 if (err)
238 goto err_out_exit;
239
240 err = !!memcmp(dst, e->data, crypto_hash_digestsize(e->hash));
241
242 if (err) {
243#ifdef CONFIG_POHMELFS_DEBUG
244 unsigned int i;
245 unsigned char *recv = e->data, *calc = dst;
246
247 dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
248 __func__, e, e->hash, e->cipher, cmd_iv);
249 for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
250#if 0
251 dprintka("%02x ", recv[i]);
252 if (recv[i] != calc[i]) {
253 dprintka("| calc byte: %02x.\n", calc[i]);
254 break;
255 }
256#else
257 dprintka("%02x/%02x ", recv[i], calc[i]);
258#endif
259 }
260 dprintk("\n");
261#endif
262 goto err_out_exit;
263 } else {
264 dprintk("%s: eng: %p, hash: %p, cipher: %p: hashes matched.\n",
265 __func__, e, e->hash, e->cipher);
266 }
267 }
268
269 dprintk("%s: eng: %p, size: %u, hash: %p, cipher: %p: completed.\n",
270 __func__, e, e->size, e->hash, e->cipher);
271
272 return 0;
273
274err_out_exit:
275 dprintk("%s: eng: %p, hash: %p, cipher: %p: err: %d.\n",
276 __func__, e, e->hash, e->cipher, err);
277 return err;
278}
279
280static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
281 int (*iterator) (struct pohmelfs_crypto_engine *e,
282 struct scatterlist *dst,
283 struct scatterlist *src))
284{
285 void *data = t->iovec.iov_base + sizeof(struct netfs_cmd) + t->psb->crypto_attached_size;
286 unsigned int size = t->iovec.iov_len - sizeof(struct netfs_cmd) - t->psb->crypto_attached_size;
287 struct netfs_cmd *cmd = data;
288 unsigned int sz, pages = t->attached_pages, i, csize, cmd_cmd, dpage_idx;
289 struct scatterlist sg_src, sg_dst;
290 int err;
291
292 while (size) {
293 cmd = data;
294 cmd_cmd = __be16_to_cpu(cmd->cmd);
295 csize = __be32_to_cpu(cmd->size);
296 cmd->iv = __cpu_to_be64(e->iv);
297
298 if (cmd_cmd == NETFS_READ_PAGES || cmd_cmd == NETFS_READ_PAGE)
299 csize = __be16_to_cpu(cmd->ext);
300
301 sz = csize + __be16_to_cpu(cmd->cpad) + sizeof(struct netfs_cmd);
302
303 dprintk("%s: size: %u, sz: %u, cmd_size: %u, cmd_cpad: %u.\n",
304 __func__, size, sz, __be32_to_cpu(cmd->size), __be16_to_cpu(cmd->cpad));
305
306 data += sz;
307 size -= sz;
308
309 sg_init_one(&sg_src, cmd->data, sz - sizeof(struct netfs_cmd));
310 sg_init_one(&sg_dst, cmd->data, sz - sizeof(struct netfs_cmd));
311
312 err = iterator(e, &sg_dst, &sg_src);
313 if (err)
314 return err;
315 }
316
317 if (!pages)
318 return 0;
319
320 dpage_idx = 0;
321 for (i = 0; i < t->page_num; ++i) {
322 struct page *page = t->pages[i];
323 struct page *dpage = e->pages[dpage_idx];
324
325 if (!page)
326 continue;
327
328 sg_init_table(&sg_src, 1);
329 sg_init_table(&sg_dst, 1);
330 sg_set_page(&sg_src, page, page_private(page), 0);
331 sg_set_page(&sg_dst, dpage, page_private(page), 0);
332
333 err = iterator(e, &sg_dst, &sg_src);
334 if (err)
335 return err;
336
337 pages--;
338 if (!pages)
339 break;
340 dpage_idx++;
341 }
342
343 return 0;
344}
345
346static int pohmelfs_encrypt_iterator(struct pohmelfs_crypto_engine *e,
347 struct scatterlist *sg_dst, struct scatterlist *sg_src)
348{
349 struct ablkcipher_request *req = e->data;
350 u8 iv[32];
351
352 memset(iv, 0, sizeof(iv));
353
354 memcpy(iv, &e->iv, sizeof(e->iv));
355
356 return pohmelfs_crypto_process(req, sg_dst, sg_src, iv, 1, e->timeout);
357}
358
359static int pohmelfs_encrypt(struct pohmelfs_crypto_thread *tc)
360{
361 struct netfs_trans *t = tc->trans;
362 struct pohmelfs_crypto_engine *e = &tc->eng;
363 struct ablkcipher_request *req = e->data;
364
365 memset(req, 0, sizeof(struct ablkcipher_request));
366 ablkcipher_request_set_tfm(req, e->cipher);
367
368 e->iv = pohmelfs_gen_iv(t);
369
370 return pohmelfs_trans_iter(t, e, pohmelfs_encrypt_iterator);
371}
372
373static int pohmelfs_hash_iterator(struct pohmelfs_crypto_engine *e,
374 struct scatterlist *sg_dst, struct scatterlist *sg_src)
375{
376 return crypto_hash_update(e->data, sg_src, sg_src->length);
377}
378
379static int pohmelfs_hash(struct pohmelfs_crypto_thread *tc)
380{
381 struct pohmelfs_crypto_engine *e = &tc->eng;
382 struct hash_desc *desc = e->data;
383 unsigned char *dst = tc->trans->iovec.iov_base + sizeof(struct netfs_cmd);
384 int err;
385
386 desc->tfm = e->hash;
387 desc->flags = 0;
388
389 err = crypto_hash_init(desc);
390 if (err)
391 return err;
392
393 err = pohmelfs_trans_iter(tc->trans, e, pohmelfs_hash_iterator);
394 if (err)
395 return err;
396
397 err = crypto_hash_final(desc, dst);
398 if (err)
399 return err;
400
401 {
402 unsigned int i;
403 dprintk("%s: ", __func__);
404 for (i = 0; i < tc->psb->crypto_attached_size; ++i)
405 dprintka("%02x ", dst[i]);
406 dprintka("\n");
407 }
408
409 return 0;
410}
411
412static void pohmelfs_crypto_pages_free(struct pohmelfs_crypto_engine *e)
413{
414 unsigned int i;
415
416 for (i = 0; i < e->page_num; ++i)
417 __free_page(e->pages[i]);
418 kfree(e->pages);
419}
420
421static int pohmelfs_crypto_pages_alloc(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
422{
423 unsigned int i;
424
425 e->pages = kmalloc(psb->trans_max_pages * sizeof(struct page *), GFP_KERNEL);
426 if (!e->pages)
427 return -ENOMEM;
428
429 for (i = 0; i < psb->trans_max_pages; ++i) {
430 e->pages[i] = alloc_page(GFP_KERNEL);
431 if (!e->pages[i])
432 break;
433 }
434
435 e->page_num = i;
436 if (!e->page_num)
437 goto err_out_free;
438
439 return 0;
440
441err_out_free:
442 kfree(e->pages);
443 return -ENOMEM;
444}
445
446static void pohmelfs_sys_crypto_exit_one(struct pohmelfs_crypto_thread *t)
447{
448 struct pohmelfs_sb *psb = t->psb;
449
450 if (t->thread)
451 kthread_stop(t->thread);
452
453 mutex_lock(&psb->crypto_thread_lock);
454 list_del(&t->thread_entry);
455 psb->crypto_thread_num--;
456 mutex_unlock(&psb->crypto_thread_lock);
457
458 pohmelfs_crypto_engine_exit(&t->eng);
459 pohmelfs_crypto_pages_free(&t->eng);
460 kfree(t);
461}
462
463static int pohmelfs_crypto_finish(struct netfs_trans *t, struct pohmelfs_sb *psb, int err)
464{
465 struct netfs_cmd *cmd = t->iovec.iov_base;
466 netfs_convert_cmd(cmd);
467
468 if (likely(!err))
469 err = netfs_trans_finish_send(t, psb);
470
471 t->result = err;
472 netfs_trans_put(t);
473
474 return err;
475}
476
477void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th)
478{
479 struct pohmelfs_sb *psb = th->psb;
480
481 th->page = NULL;
482 th->trans = NULL;
483
484 mutex_lock(&psb->crypto_thread_lock);
485 list_move_tail(&th->thread_entry, &psb->crypto_ready_list);
486 mutex_unlock(&psb->crypto_thread_lock);
487 wake_up(&psb->wait);
488}
489
490static int pohmelfs_crypto_thread_trans(struct pohmelfs_crypto_thread *t)
491{
492 struct netfs_trans *trans;
493 int err = 0;
494
495 trans = t->trans;
496 trans->eng = NULL;
497
498 if (t->eng.hash) {
499 err = pohmelfs_hash(t);
500 if (err)
501 goto out_complete;
502 }
503
504 if (t->eng.cipher) {
505 err = pohmelfs_encrypt(t);
506 if (err)
507 goto out_complete;
508 trans->eng = &t->eng;
509 }
510
511out_complete:
512 t->page = NULL;
513 t->trans = NULL;
514
515 if (!trans->eng)
516 pohmelfs_crypto_thread_make_ready(t);
517
518 pohmelfs_crypto_finish(trans, t->psb, err);
519 return err;
520}
521
522static int pohmelfs_crypto_thread_page(struct pohmelfs_crypto_thread *t)
523{
524 struct pohmelfs_crypto_engine *e = &t->eng;
525 struct page *page = t->page;
526 int err;
527
528 WARN_ON(!PageChecked(page));
529
530 err = pohmelfs_crypto_process_input_data(e, e->iv, NULL, page, t->size);
531 if (!err)
532 SetPageUptodate(page);
533 else
534 SetPageError(page);
535 unlock_page(page);
536 page_cache_release(page);
537
538 pohmelfs_crypto_thread_make_ready(t);
539
540 return err;
541}
542
543static int pohmelfs_crypto_thread_func(void *data)
544{
545 struct pohmelfs_crypto_thread *t = data;
546
547 while (!kthread_should_stop()) {
548 wait_event_interruptible(t->wait, kthread_should_stop() ||
549 t->trans || t->page);
550
551 if (kthread_should_stop())
552 break;
553
554 if (!t->trans && !t->page)
555 continue;
556
557 dprintk("%s: thread: %p, trans: %p, page: %p.\n",
558 __func__, t, t->trans, t->page);
559
560 if (t->trans)
561 pohmelfs_crypto_thread_trans(t);
562 else if (t->page)
563 pohmelfs_crypto_thread_page(t);
564 }
565
566 return 0;
567}
568
569static void pohmelfs_crypto_flush(struct pohmelfs_sb *psb, struct list_head *head)
570{
571 while (!list_empty(head)) {
572 struct pohmelfs_crypto_thread *t = NULL;
573
574 mutex_lock(&psb->crypto_thread_lock);
575 if (!list_empty(head)) {
576 t = list_first_entry(head, struct pohmelfs_crypto_thread, thread_entry);
577 list_del_init(&t->thread_entry);
578 }
579 mutex_unlock(&psb->crypto_thread_lock);
580
581 if (t)
582 pohmelfs_sys_crypto_exit_one(t);
583 }
584}
585
586static void pohmelfs_sys_crypto_exit(struct pohmelfs_sb *psb)
587{
588 while (!list_empty(&psb->crypto_active_list) || !list_empty(&psb->crypto_ready_list)) {
589 dprintk("%s: crypto_thread_num: %u.\n", __func__, psb->crypto_thread_num);
590 pohmelfs_crypto_flush(psb, &psb->crypto_active_list);
591 pohmelfs_crypto_flush(psb, &psb->crypto_ready_list);
592 }
593}
594
595static int pohmelfs_sys_crypto_init(struct pohmelfs_sb *psb)
596{
597 unsigned int i;
598 struct pohmelfs_crypto_thread *t;
599 struct pohmelfs_config *c;
600 struct netfs_state *st;
601 int err;
602
603 list_for_each_entry(c, &psb->state_list, config_entry) {
604 st = &c->state;
605
606 err = pohmelfs_crypto_engine_init(&st->eng, psb);
607 if (err)
608 goto err_out_exit;
609
610 dprintk("%s: st: %p, eng: %p, hash: %p, cipher: %p.\n",
611 __func__, st, &st->eng, &st->eng.hash, &st->eng.cipher);
612 }
613
614 for (i = 0; i < psb->crypto_thread_num; ++i) {
615 err = -ENOMEM;
616 t = kzalloc(sizeof(struct pohmelfs_crypto_thread), GFP_KERNEL);
617 if (!t)
618 goto err_out_free_state_engines;
619
620 init_waitqueue_head(&t->wait);
621
622 t->psb = psb;
623 t->trans = NULL;
624 t->eng.thread = t;
625
626 err = pohmelfs_crypto_engine_init(&t->eng, psb);
627 if (err)
628 goto err_out_free_state_engines;
629
630 err = pohmelfs_crypto_pages_alloc(&t->eng, psb);
631 if (err)
632 goto err_out_free;
633
634 t->thread = kthread_run(pohmelfs_crypto_thread_func, t,
635 "pohmelfs-crypto-%d-%d", psb->idx, i);
636 if (IS_ERR(t->thread)) {
637 err = PTR_ERR(t->thread);
638 t->thread = NULL;
639 goto err_out_free;
640 }
641
642 if (t->eng.cipher)
643 psb->crypto_align_size = crypto_ablkcipher_blocksize(t->eng.cipher);
644
645 mutex_lock(&psb->crypto_thread_lock);
646 list_add_tail(&t->thread_entry, &psb->crypto_ready_list);
647 mutex_unlock(&psb->crypto_thread_lock);
648 }
649
650 psb->crypto_thread_num = i;
651 return 0;
652
653err_out_free:
654 pohmelfs_sys_crypto_exit_one(t);
655err_out_free_state_engines:
656 list_for_each_entry(c, &psb->state_list, config_entry) {
657 st = &c->state;
658 pohmelfs_crypto_engine_exit(&st->eng);
659 }
660err_out_exit:
661 pohmelfs_sys_crypto_exit(psb);
662 return err;
663}
664
665void pohmelfs_crypto_exit(struct pohmelfs_sb *psb)
666{
667 pohmelfs_sys_crypto_exit(psb);
668
669 kfree(psb->hash_string);
670 kfree(psb->cipher_string);
671}
672
673static int pohmelfs_crypt_init_complete(struct page **pages, unsigned int page_num,
674 void *private, int err)
675{
676 struct pohmelfs_sb *psb = private;
677
678 psb->flags = -err;
679 dprintk("%s: err: %d.\n", __func__, err);
680
681 wake_up(&psb->wait);
682
683 return err;
684}
685
686static int pohmelfs_crypto_init_handshake(struct pohmelfs_sb *psb)
687{
688 struct netfs_trans *t;
689 struct netfs_crypto_capabilities *cap;
690 struct netfs_cmd *cmd;
691 char *str;
692 int err = -ENOMEM, size;
693
694 size = sizeof(struct netfs_crypto_capabilities) +
695 psb->cipher_strlen + psb->hash_strlen + 2; /* 0 bytes */
696
697 t = netfs_trans_alloc(psb, size, 0, 0);
698 if (!t)
699 goto err_out_exit;
700
701 t->complete = pohmelfs_crypt_init_complete;
702 t->private = psb;
703
704 cmd = netfs_trans_current(t);
705 cap = (struct netfs_crypto_capabilities *)(cmd + 1);
706 str = (char *)(cap + 1);
707
708 cmd->cmd = NETFS_CAPABILITIES;
709 cmd->id = POHMELFS_CRYPTO_CAPABILITIES;
710 cmd->size = size;
711 cmd->start = 0;
712 cmd->ext = 0;
713 cmd->csize = 0;
714
715 netfs_convert_cmd(cmd);
716 netfs_trans_update(cmd, t, size);
717
718 cap->hash_strlen = psb->hash_strlen;
719 if (cap->hash_strlen) {
720 sprintf(str, "%s", psb->hash_string);
721 str += cap->hash_strlen;
722 }
723
724 cap->cipher_strlen = psb->cipher_strlen;
725 cap->cipher_keysize = psb->cipher_keysize;
726 if (cap->cipher_strlen)
727 sprintf(str, "%s", psb->cipher_string);
728
729 netfs_convert_crypto_capabilities(cap);
730
731 psb->flags = ~0;
732 err = netfs_trans_finish(t, psb);
733 if (err)
734 goto err_out_exit;
735
736 err = wait_event_interruptible_timeout(psb->wait, (psb->flags != ~0),
737 psb->wait_on_page_timeout);
738 if (!err)
739 err = -ETIMEDOUT;
740 else if (err > 0)
741 err = -psb->flags;
742
743 if (!err)
744 psb->perform_crypto = 1;
745 psb->flags = 0;
746
747 /*
748 * At this point NETFS_CAPABILITIES response command
749 * should setup superblock in a way, which is acceptable
750 * for both client and server, so if server refuses connection,
751 * it will send error in transaction response.
752 */
753
754 if (err)
755 goto err_out_exit;
756
757 return 0;
758
759err_out_exit:
760 return err;
761}
762
763int pohmelfs_crypto_init(struct pohmelfs_sb *psb)
764{
765 int err;
766
767 if (!psb->cipher_string && !psb->hash_string)
768 return 0;
769
770 err = pohmelfs_crypto_init_handshake(psb);
771 if (err)
772 return err;
773
774 err = pohmelfs_sys_crypto_init(psb);
775 if (err)
776 return err;
777
778 return 0;
779}
780
781static int pohmelfs_crypto_thread_get(struct pohmelfs_sb *psb,
782 int (*action)(struct pohmelfs_crypto_thread *t, void *data), void *data)
783{
784 struct pohmelfs_crypto_thread *t = NULL;
785 int err;
786
787 while (!t) {
788 err = wait_event_interruptible_timeout(psb->wait,
789 !list_empty(&psb->crypto_ready_list),
790 psb->wait_on_page_timeout);
791
792 t = NULL;
793 err = 0;
794 mutex_lock(&psb->crypto_thread_lock);
795 if (!list_empty(&psb->crypto_ready_list)) {
796 t = list_entry(psb->crypto_ready_list.prev,
797 struct pohmelfs_crypto_thread,
798 thread_entry);
799
800 list_move_tail(&t->thread_entry,
801 &psb->crypto_active_list);
802
803 action(t, data);
804 wake_up(&t->wait);
805
806 }
807 mutex_unlock(&psb->crypto_thread_lock);
808 }
809
810 return err;
811}
812
813static int pohmelfs_trans_crypt_action(struct pohmelfs_crypto_thread *t, void *data)
814{
815 struct netfs_trans *trans = data;
816
817 netfs_trans_get(trans);
818 t->trans = trans;
819
820 dprintk("%s: t: %p, gen: %u, thread: %p.\n", __func__, trans, trans->gen, t);
821 return 0;
822}
823
824int pohmelfs_trans_crypt(struct netfs_trans *trans, struct pohmelfs_sb *psb)
825{
826 if ((!psb->hash_string && !psb->cipher_string) || !psb->perform_crypto) {
827 netfs_trans_get(trans);
828 return pohmelfs_crypto_finish(trans, psb, 0);
829 }
830
831 return pohmelfs_crypto_thread_get(psb, pohmelfs_trans_crypt_action, trans);
832}
833
834struct pohmelfs_crypto_input_action_data {
835 struct page *page;
836 struct pohmelfs_crypto_engine *e;
837 u64 iv;
838 unsigned int size;
839};
840
841static int pohmelfs_crypt_input_page_action(struct pohmelfs_crypto_thread *t, void *data)
842{
843 struct pohmelfs_crypto_input_action_data *act = data;
844
845 memcpy(t->eng.data, act->e->data, t->psb->crypto_attached_size);
846
847 t->size = act->size;
848 t->eng.iv = act->iv;
849
850 t->page = act->page;
851 return 0;
852}
853
854int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
855 struct page *page, unsigned int size, u64 iv)
856{
857 struct inode *inode = page->mapping->host;
858 struct pohmelfs_crypto_input_action_data act;
859 int err = -ENOENT;
860
861 act.page = page;
862 act.e = e;
863 act.size = size;
864 act.iv = iv;
865
866 err = pohmelfs_crypto_thread_get(POHMELFS_SB(inode->i_sb),
867 pohmelfs_crypt_input_page_action, &act);
868 if (err)
869 goto err_out_exit;
870
871 return 0;
872
873err_out_exit:
874 SetPageUptodate(page);
875 page_cache_release(page);
876
877 return err;
878}
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
deleted file mode 100644
index 2ee4491b7136..000000000000
--- a/drivers/staging/pohmelfs/dir.c
+++ /dev/null
@@ -1,1102 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/fs.h>
18#include <linux/jhash.h>
19#include <linux/namei.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22
23#include "netfs.h"
24
25static int pohmelfs_cmp_hash(struct pohmelfs_name *n, u32 hash)
26{
27 if (n->hash > hash)
28 return -1;
29 if (n->hash < hash)
30 return 1;
31
32 return 0;
33}
34
35static struct pohmelfs_name *pohmelfs_search_hash_unprecise(struct pohmelfs_inode *pi, u32 hash)
36{
37 struct rb_node *n = pi->hash_root.rb_node;
38 struct pohmelfs_name *tmp = NULL;
39 int cmp;
40
41 while (n) {
42 tmp = rb_entry(n, struct pohmelfs_name, hash_node);
43
44 cmp = pohmelfs_cmp_hash(tmp, hash);
45 if (cmp < 0)
46 n = n->rb_left;
47 else if (cmp > 0)
48 n = n->rb_right;
49 else
50 break;
51
52 }
53
54 return tmp;
55}
56
57struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash)
58{
59 struct pohmelfs_name *tmp;
60
61 tmp = pohmelfs_search_hash_unprecise(pi, hash);
62 if (tmp && (tmp->hash == hash))
63 return tmp;
64
65 return NULL;
66}
67
68static void __pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
69{
70 rb_erase(&node->hash_node, &parent->hash_root);
71}
72
73/*
74 * Remove name cache entry from its caches and free it.
75 */
76static void pohmelfs_name_free(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
77{
78 __pohmelfs_name_del(parent, node);
79 list_del(&node->sync_create_entry);
80 kfree(node);
81}
82
83static struct pohmelfs_name *pohmelfs_insert_hash(struct pohmelfs_inode *pi,
84 struct pohmelfs_name *new)
85{
86 struct rb_node **n = &pi->hash_root.rb_node, *parent = NULL;
87 struct pohmelfs_name *ret = NULL, *tmp;
88 int cmp;
89
90 while (*n) {
91 parent = *n;
92
93 tmp = rb_entry(parent, struct pohmelfs_name, hash_node);
94
95 cmp = pohmelfs_cmp_hash(tmp, new->hash);
96 if (cmp < 0)
97 n = &parent->rb_left;
98 else if (cmp > 0)
99 n = &parent->rb_right;
100 else {
101 ret = tmp;
102 break;
103 }
104 }
105
106 if (ret) {
107 printk("%s: exist: parent: %llu, ino: %llu, hash: %x, len: %u, data: '%s', "
108 "new: ino: %llu, hash: %x, len: %u, data: '%s'.\n",
109 __func__, pi->ino,
110 ret->ino, ret->hash, ret->len, ret->data,
111 new->ino, new->hash, new->len, new->data);
112 ret->ino = new->ino;
113 return ret;
114 }
115
116 rb_link_node(&new->hash_node, parent, n);
117 rb_insert_color(&new->hash_node, &pi->hash_root);
118
119 return NULL;
120}
121
122/*
123 * Free name cache for given inode.
124 */
125void pohmelfs_free_names(struct pohmelfs_inode *parent)
126{
127 struct rb_node *rb_node;
128 struct pohmelfs_name *n;
129
130 for (rb_node = rb_first(&parent->hash_root); rb_node;) {
131 n = rb_entry(rb_node, struct pohmelfs_name, hash_node);
132 rb_node = rb_next(rb_node);
133
134 pohmelfs_name_free(parent, n);
135 }
136}
137
138static void pohmelfs_fix_offset(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
139{
140 parent->total_len -= node->len;
141}
142
143/*
144 * Free name cache entry helper.
145 */
146void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
147{
148 pohmelfs_fix_offset(parent, node);
149 pohmelfs_name_free(parent, node);
150}
151
152/*
153 * Insert new name cache entry into all hash cache.
154 */
155static int pohmelfs_insert_name(struct pohmelfs_inode *parent, struct pohmelfs_name *n)
156{
157 struct pohmelfs_name *name;
158
159 name = pohmelfs_insert_hash(parent, n);
160 if (name)
161 return -EEXIST;
162
163 parent->total_len += n->len;
164 list_add_tail(&n->sync_create_entry, &parent->sync_create_list);
165
166 return 0;
167}
168
169/*
170 * Allocate new name cache entry.
171 */
172static struct pohmelfs_name *pohmelfs_name_alloc(unsigned int len)
173{
174 struct pohmelfs_name *n;
175
176 n = kzalloc(sizeof(struct pohmelfs_name) + len, GFP_KERNEL);
177 if (!n)
178 return NULL;
179
180 INIT_LIST_HEAD(&n->sync_create_entry);
181
182 n->data = (char *)(n+1);
183
184 return n;
185}
186
187/*
188 * Add new name entry into directory's cache.
189 */
190static int pohmelfs_add_dir(struct pohmelfs_sb *psb, struct pohmelfs_inode *parent,
191 struct pohmelfs_inode *npi, struct qstr *str, unsigned int mode, int link)
192{
193 int err = -ENOMEM;
194 struct pohmelfs_name *n;
195
196 n = pohmelfs_name_alloc(str->len + 1);
197 if (!n)
198 goto err_out_exit;
199
200 n->ino = npi->ino;
201 n->mode = mode;
202 n->len = str->len;
203 n->hash = str->hash;
204 sprintf(n->data, "%s", str->name);
205
206 mutex_lock(&parent->offset_lock);
207 err = pohmelfs_insert_name(parent, n);
208 mutex_unlock(&parent->offset_lock);
209
210 if (err) {
211 if (err != -EEXIST)
212 goto err_out_free;
213 kfree(n);
214 }
215
216 return 0;
217
218err_out_free:
219 kfree(n);
220err_out_exit:
221 return err;
222}
223
224/*
225 * Create new inode for given parameters (name, inode info, parent).
226 * This does not create object on the server, it will be synced there during writeback.
227 */
228struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
229 struct pohmelfs_inode *parent, struct qstr *str,
230 struct netfs_inode_info *info, int link)
231{
232 struct inode *new = NULL;
233 struct pohmelfs_inode *npi;
234 int err = -EEXIST;
235
236 dprintk("%s: creating inode: parent: %llu, ino: %llu, str: %p.\n",
237 __func__, (parent) ? parent->ino : 0, info->ino, str);
238
239 err = -ENOMEM;
240 new = iget_locked(psb->sb, info->ino);
241 if (!new)
242 goto err_out_exit;
243
244 npi = POHMELFS_I(new);
245 npi->ino = info->ino;
246 err = 0;
247
248 if (new->i_state & I_NEW) {
249 dprintk("%s: filling VFS inode: %lu/%llu.\n",
250 __func__, new->i_ino, info->ino);
251 pohmelfs_fill_inode(new, info);
252
253 if (S_ISDIR(info->mode)) {
254 struct qstr s;
255
256 s.name = ".";
257 s.len = 1;
258 s.hash = jhash(s.name, s.len, 0);
259
260 err = pohmelfs_add_dir(psb, npi, npi, &s, info->mode, 0);
261 if (err)
262 goto err_out_put;
263
264 s.name = "..";
265 s.len = 2;
266 s.hash = jhash(s.name, s.len, 0);
267
268 err = pohmelfs_add_dir(psb, npi, (parent) ? parent : npi, &s,
269 (parent) ? parent->vfs_inode.i_mode : npi->vfs_inode.i_mode, 0);
270 if (err)
271 goto err_out_put;
272 }
273 }
274
275 if (str) {
276 if (parent) {
277 err = pohmelfs_add_dir(psb, parent, npi, str, info->mode, link);
278
279 dprintk("%s: %s inserted name: '%s', new_offset: %llu, ino: %llu, parent: %llu.\n",
280 __func__, (err) ? "unsuccessfully" : "successfully",
281 str->name, parent->total_len, info->ino, parent->ino);
282
283 if (err && err != -EEXIST)
284 goto err_out_put;
285 }
286 }
287
288 if (new->i_state & I_NEW) {
289 if (parent)
290 mark_inode_dirty(&parent->vfs_inode);
291 mark_inode_dirty(new);
292 }
293
294 set_bit(NETFS_INODE_OWNED, &npi->state);
295 npi->lock_type = POHMELFS_WRITE_LOCK;
296 unlock_new_inode(new);
297
298 return npi;
299
300err_out_put:
301 printk("%s: putting inode: %p, npi: %p, error: %d.\n", __func__, new, npi, err);
302 iput(new);
303err_out_exit:
304 return ERR_PTR(err);
305}
306
307static int pohmelfs_remote_sync_complete(struct page **pages, unsigned int page_num,
308 void *private, int err)
309{
310 struct pohmelfs_inode *pi = private;
311 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
312
313 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
314
315 if (err)
316 pi->error = err;
317 wake_up(&psb->wait);
318 pohmelfs_put_inode(pi);
319
320 return err;
321}
322
323/*
324 * Receive directory content from the server.
325 * This should be only done for objects, which were not created locally,
326 * and which were not synced previously.
327 */
328static int pohmelfs_sync_remote_dir(struct pohmelfs_inode *pi)
329{
330 struct inode *inode = &pi->vfs_inode;
331 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
332 long ret = psb->wait_on_page_timeout;
333 int err;
334
335 dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n",
336 __func__, pi->ino, pi->state, test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state));
337
338 if (test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state))
339 return 0;
340
341 if (!igrab(inode)) {
342 err = -ENOENT;
343 goto err_out_exit;
344 }
345
346 err = pohmelfs_meta_command(pi, NETFS_READDIR, NETFS_TRANS_SINGLE_DST,
347 pohmelfs_remote_sync_complete, pi, 0);
348 if (err)
349 goto err_out_exit;
350
351 pi->error = 0;
352 ret = wait_event_interruptible_timeout(psb->wait,
353 test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state) || pi->error, ret);
354 dprintk("%s: awake dir: %llu, ret: %ld, err: %d.\n", __func__, pi->ino, ret, pi->error);
355 if (ret <= 0) {
356 err = ret;
357 if (!err)
358 err = -ETIMEDOUT;
359 goto err_out_exit;
360 }
361
362 if (pi->error)
363 return pi->error;
364
365 return 0;
366
367err_out_exit:
368 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
369
370 return err;
371}
372
373static int pohmelfs_dir_open(struct inode *inode, struct file *file)
374{
375 file->private_data = NULL;
376 return 0;
377}
378
379/*
380 * VFS readdir callback. Syncs directory content from server if needed,
381 * and provides direntry info to the userspace.
382 */
383static int pohmelfs_readdir(struct file *file, void *dirent, filldir_t filldir)
384{
385 struct inode *inode = file->f_path.dentry->d_inode;
386 struct pohmelfs_inode *pi = POHMELFS_I(inode);
387 struct pohmelfs_name *n;
388 struct rb_node *rb_node;
389 int err = 0, mode;
390 u64 len;
391
392 dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n",
393 __func__, pi->ino, (u64)file->f_pos,
394 (unsigned long)file->private_data);
395#if 0
396 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
397 if (err)
398 return err;
399#endif
400 err = pohmelfs_sync_remote_dir(pi);
401 if (err)
402 return err;
403
404 if (file->private_data && (file->private_data == (void *)(unsigned long)file->f_pos))
405 return 0;
406
407 mutex_lock(&pi->offset_lock);
408 n = pohmelfs_search_hash_unprecise(pi, (unsigned long)file->private_data);
409
410 while (n) {
411 mode = (n->mode >> 12) & 15;
412
413 dprintk("%s: offset: %llu, parent ino: %llu, name: '%s', len: %u, ino: %llu, "
414 "mode: %o/%o, fpos: %llu, hash: %08x.\n",
415 __func__, file->f_pos, pi->ino, n->data, n->len,
416 n->ino, n->mode, mode, file->f_pos, n->hash);
417
418 file->private_data = (void *)(unsigned long)n->hash;
419
420 len = n->len;
421 err = filldir(dirent, n->data, n->len, file->f_pos, n->ino, mode);
422
423 if (err < 0) {
424 dprintk("%s: err: %d.\n", __func__, err);
425 err = 0;
426 break;
427 }
428
429 file->f_pos += len;
430
431 rb_node = rb_next(&n->hash_node);
432
433 if (!rb_node || (rb_node == &n->hash_node)) {
434 file->private_data = (void *)(unsigned long)file->f_pos;
435 break;
436 }
437
438 n = rb_entry(rb_node, struct pohmelfs_name, hash_node);
439 }
440 mutex_unlock(&pi->offset_lock);
441
442 return err;
443}
444
445static loff_t pohmelfs_dir_lseek(struct file *file, loff_t offset, int origin)
446{
447 file->f_pos = offset;
448 file->private_data = NULL;
449 return offset;
450}
451
452const struct file_operations pohmelfs_dir_fops = {
453 .open = pohmelfs_dir_open,
454 .read = generic_read_dir,
455 .llseek = pohmelfs_dir_lseek,
456 .readdir = pohmelfs_readdir,
457};
458
459/*
460 * Lookup single object on server.
461 */
462static int pohmelfs_lookup_single(struct pohmelfs_inode *parent,
463 struct qstr *str, u64 ino)
464{
465 struct pohmelfs_sb *psb = POHMELFS_SB(parent->vfs_inode.i_sb);
466 long ret = msecs_to_jiffies(5000);
467 int err;
468
469 set_bit(NETFS_COMMAND_PENDING, &parent->state);
470 err = pohmelfs_meta_command_data(parent, parent->ino, NETFS_LOOKUP,
471 (char *)str->name, NETFS_TRANS_SINGLE_DST, NULL, NULL, ino);
472 if (err)
473 goto err_out_exit;
474
475 err = 0;
476 ret = wait_event_interruptible_timeout(psb->wait,
477 !test_bit(NETFS_COMMAND_PENDING, &parent->state), ret);
478 if (ret <= 0) {
479 err = ret;
480 if (!err)
481 err = -ETIMEDOUT;
482 }
483
484 if (err)
485 goto err_out_exit;
486
487 return 0;
488
489err_out_exit:
490 clear_bit(NETFS_COMMAND_PENDING, &parent->state);
491
492 printk("%s: failed: parent: %llu, ino: %llu, name: '%s', err: %d.\n",
493 __func__, parent->ino, ino, str->name, err);
494
495 return err;
496}
497
498/*
499 * VFS lookup callback.
500 * We first try to get inode number from local name cache, if we have one,
501 * then inode can be found in inode cache. If there is no inode or no object in
502 * local cache, try to lookup it on server. This only should be done for directories,
503 * which were not created locally, otherwise remote server does not know about dir at all,
504 * so no need to try to know that.
505 */
506struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
507{
508 struct pohmelfs_inode *parent = POHMELFS_I(dir);
509 struct pohmelfs_name *n;
510 struct inode *inode = NULL;
511 unsigned long ino = 0;
512 int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1;
513 struct qstr str = dentry->d_name;
514
515 if ((nd->intent.open.flags & O_ACCMODE) != O_RDONLY)
516 lock_type = POHMELFS_WRITE_LOCK;
517
518 if (test_bit(NETFS_INODE_OWNED, &parent->state)) {
519 if (lock_type == parent->lock_type)
520 need_lock = 0;
521 if ((lock_type == POHMELFS_READ_LOCK) && (parent->lock_type == POHMELFS_WRITE_LOCK))
522 need_lock = 0;
523 }
524
525 if ((lock_type == POHMELFS_READ_LOCK) && !test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state))
526 need_lock = 1;
527
528 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
529
530 mutex_lock(&parent->offset_lock);
531 n = pohmelfs_search_hash(parent, str.hash);
532 if (n)
533 ino = n->ino;
534 mutex_unlock(&parent->offset_lock);
535
536 dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n",
537 __func__, ino, inode, str.name, str.hash, parent->state, need_lock);
538
539 if (ino) {
540 inode = ilookup(dir->i_sb, ino);
541 if (inode)
542 goto out;
543 }
544
545 dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n",
546 __func__, dir, parent->ino,
547 str.name, str.len, parent->state, ino);
548
549 if (!ino) {
550 if (!need_lock)
551 goto out;
552 }
553
554 err = pohmelfs_data_lock(parent, 0, ~0, lock_type);
555 if (err)
556 goto out;
557
558 err = pohmelfs_lookup_single(parent, &str, ino);
559 if (err)
560 goto out;
561
562 if (!ino) {
563 mutex_lock(&parent->offset_lock);
564 n = pohmelfs_search_hash(parent, str.hash);
565 if (n)
566 ino = n->ino;
567 mutex_unlock(&parent->offset_lock);
568 }
569
570 if (ino) {
571 inode = ilookup(dir->i_sb, ino);
572 dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n",
573 __func__, ino, inode, str.name, str.hash);
574 if (!inode) {
575 dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n",
576 __func__, ino, str.name, str.hash);
577 /* return NULL; */
578 return ERR_PTR(-EACCES);
579 }
580 } else {
581 printk("%s: No inode number : name: '%s', hash: %x.\n",
582 __func__, str.name, str.hash);
583 }
584out:
585 return d_splice_alias(inode, dentry);
586}
587
588/*
589 * Create new object in local cache. Object will be synced to server
590 * during writeback for given inode.
591 */
592struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
593 struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode)
594{
595 struct pohmelfs_inode *npi;
596 int err = -ENOMEM;
597 struct netfs_inode_info info;
598
599 dprintk("%s: name: '%s', mode: %ho, start: %llu.\n",
600 __func__, str->name, mode, start);
601
602 info.mode = mode;
603 info.ino = start;
604
605 if (!start)
606 info.ino = pohmelfs_new_ino(psb);
607
608 info.nlink = S_ISDIR(mode) ? 2 : 1;
609 info.uid = current_fsuid();
610 info.gid = current_fsgid();
611 info.size = 0;
612 info.blocksize = 512;
613 info.blocks = 0;
614 info.rdev = 0;
615 info.version = 0;
616
617 npi = pohmelfs_new_inode(psb, parent, str, &info, !!start);
618 if (IS_ERR(npi)) {
619 err = PTR_ERR(npi);
620 goto err_out_unlock;
621 }
622
623 return npi;
624
625err_out_unlock:
626 dprintk("%s: err: %d.\n", __func__, err);
627 return ERR_PTR(err);
628}
629
630/*
631 * Create local object and bind it to dentry.
632 */
633static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry,
634 u64 start, umode_t mode)
635{
636 struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
637 struct pohmelfs_inode *npi, *parent;
638 struct qstr str = dentry->d_name;
639 int err;
640
641 parent = POHMELFS_I(dir);
642
643 err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
644 if (err)
645 return err;
646
647 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
648
649 npi = pohmelfs_create_entry_local(psb, parent, &str, start, mode);
650 if (IS_ERR(npi))
651 return PTR_ERR(npi);
652
653 d_instantiate(dentry, &npi->vfs_inode);
654
655 dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n",
656 __func__, parent->ino, npi->ino, dentry->d_name.name,
657 (signed)dir->i_nlink, (signed)npi->vfs_inode.i_nlink);
658
659 return 0;
660}
661
662/*
663 * VFS create and mkdir callbacks.
664 */
665static int pohmelfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
666 struct nameidata *nd)
667{
668 return pohmelfs_create_entry(dir, dentry, 0, mode);
669}
670
671static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
672{
673 int err;
674
675 inode_inc_link_count(dir);
676 err = pohmelfs_create_entry(dir, dentry, 0, mode | S_IFDIR);
677 if (err)
678 inode_dec_link_count(dir);
679
680 return err;
681}
682
683static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry)
684{
685 struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
686 struct inode *inode = dentry->d_inode;
687 struct pohmelfs_inode *parent = POHMELFS_I(dir), *pi = POHMELFS_I(inode);
688 struct pohmelfs_name *n;
689 int err = -ENOENT;
690 struct qstr str = dentry->d_name;
691
692 err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
693 if (err)
694 return err;
695
696 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
697
698 dprintk("%s: dir_ino: %llu, inode: %llu, name: '%s', nlink: %d.\n",
699 __func__, parent->ino, pi->ino,
700 str.name, (signed)inode->i_nlink);
701
702 BUG_ON(!inode);
703
704 mutex_lock(&parent->offset_lock);
705 n = pohmelfs_search_hash(parent, str.hash);
706 if (n) {
707 pohmelfs_fix_offset(parent, n);
708 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
709 pohmelfs_remove_child(pi, n);
710
711 pohmelfs_name_free(parent, n);
712 err = 0;
713 }
714 mutex_unlock(&parent->offset_lock);
715
716 if (!err) {
717 psb->avail_size += inode->i_size;
718
719 pohmelfs_inode_del_inode(psb, pi);
720
721 mark_inode_dirty(dir);
722
723 inode->i_ctime = dir->i_ctime;
724 if (inode->i_nlink)
725 inode_dec_link_count(inode);
726 }
727
728 return err;
729}
730
731/*
732 * Unlink and rmdir VFS callbacks.
733 */
734static int pohmelfs_unlink(struct inode *dir, struct dentry *dentry)
735{
736 return pohmelfs_remove_entry(dir, dentry);
737}
738
739static int pohmelfs_rmdir(struct inode *dir, struct dentry *dentry)
740{
741 int err;
742 struct inode *inode = dentry->d_inode;
743
744 dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n",
745 __func__, POHMELFS_I(dir)->ino, POHMELFS_I(inode)->ino,
746 dentry->d_name.name, (signed)dir->i_nlink, (signed)inode->i_nlink);
747
748 err = pohmelfs_remove_entry(dir, dentry);
749 if (!err) {
750 inode_dec_link_count(dir);
751 inode_dec_link_count(inode);
752 }
753
754 return err;
755}
756
757/*
758 * Link creation is synchronous.
759 * I'm lazy.
760 * Earth is somewhat round.
761 */
762static int pohmelfs_create_link(struct pohmelfs_inode *parent, struct qstr *obj,
763 struct pohmelfs_inode *target, struct qstr *tstr)
764{
765 struct super_block *sb = parent->vfs_inode.i_sb;
766 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
767 struct netfs_cmd *cmd;
768 struct netfs_trans *t;
769 void *data;
770 int err, parent_len, target_len = 0, cur_len, path_size = 0;
771
772 err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
773 if (err)
774 return err;
775
776 err = sb->s_op->write_inode(&parent->vfs_inode, 0);
777 if (err)
778 goto err_out_exit;
779
780 if (tstr)
781 target_len = tstr->len;
782
783 parent_len = pohmelfs_path_length(parent);
784 if (target)
785 target_len += pohmelfs_path_length(target);
786
787 if (parent_len < 0) {
788 err = parent_len;
789 goto err_out_exit;
790 }
791
792 if (target_len < 0) {
793 err = target_len;
794 goto err_out_exit;
795 }
796
797 t = netfs_trans_alloc(psb, parent_len + target_len + obj->len + 2, 0, 0);
798 if (!t) {
799 err = -ENOMEM;
800 goto err_out_exit;
801 }
802 cur_len = netfs_trans_cur_len(t);
803
804 cmd = netfs_trans_current(t);
805 if (IS_ERR(cmd)) {
806 err = PTR_ERR(cmd);
807 goto err_out_free;
808 }
809
810 data = (void *)(cmd + 1);
811 cur_len -= sizeof(struct netfs_cmd);
812
813 err = pohmelfs_construct_path_string(parent, data, parent_len);
814 if (err > 0) {
815 /* Do not place null-byte before the slash */
816 path_size = err - 1;
817 cur_len -= path_size;
818
819 err = snprintf(data + path_size, cur_len, "/%s|", obj->name);
820
821 path_size += err;
822 cur_len -= err;
823
824 cmd->ext = path_size - 1; /* No | symbol */
825
826 if (target) {
827 err = pohmelfs_construct_path_string(target, data + path_size, target_len);
828 if (err > 0) {
829 path_size += err;
830 cur_len -= err;
831 }
832 }
833 }
834
835 if (err < 0)
836 goto err_out_free;
837
838 cmd->start = 0;
839
840 if (!target && tstr) {
841 if (tstr->len > cur_len - 1) {
842 err = -ENAMETOOLONG;
843 goto err_out_free;
844 }
845
846 err = snprintf(data + path_size, cur_len, "%s", tstr->name) + 1; /* 0-byte */
847 path_size += err;
848 cur_len -= err;
849 cmd->start = 1;
850 }
851
852 dprintk("%s: parent: %llu, obj: '%s', target_inode: %llu, target_str: '%s', full: '%s'.\n",
853 __func__, parent->ino, obj->name, (target) ? target->ino : 0, (tstr) ? tstr->name : NULL,
854 (char *)data);
855
856 cmd->cmd = NETFS_LINK;
857 cmd->size = path_size;
858 cmd->id = parent->ino;
859
860 netfs_convert_cmd(cmd);
861
862 netfs_trans_update(cmd, t, path_size);
863
864 err = netfs_trans_finish(t, psb);
865 if (err)
866 goto err_out_exit;
867
868 return 0;
869
870err_out_free:
871 t->result = err;
872 netfs_trans_put(t);
873err_out_exit:
874 return err;
875}
876
877/*
878 * VFS hard and soft link callbacks.
879 */
880static int pohmelfs_link(struct dentry *old_dentry, struct inode *dir,
881 struct dentry *dentry)
882{
883 struct inode *inode = old_dentry->d_inode;
884 struct pohmelfs_inode *pi = POHMELFS_I(inode);
885 int err;
886 struct qstr str = dentry->d_name;
887
888 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
889
890 err = inode->i_sb->s_op->write_inode(inode, 0);
891 if (err)
892 return err;
893
894 err = pohmelfs_create_link(POHMELFS_I(dir), &str, pi, NULL);
895 if (err)
896 return err;
897
898 return pohmelfs_create_entry(dir, dentry, pi->ino, inode->i_mode);
899}
900
901static int pohmelfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
902{
903 struct qstr sym_str;
904 struct qstr str = dentry->d_name;
905 struct inode *inode;
906 int err;
907
908 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
909
910 sym_str.name = symname;
911 sym_str.len = strlen(symname);
912
913 err = pohmelfs_create_link(POHMELFS_I(dir), &str, NULL, &sym_str);
914 if (err)
915 goto err_out_exit;
916
917 err = pohmelfs_create_entry(dir, dentry, 0, S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO);
918 if (err)
919 goto err_out_exit;
920
921 inode = dentry->d_inode;
922
923 err = page_symlink(inode, symname, sym_str.len + 1);
924 if (err)
925 goto err_out_put;
926
927 return 0;
928
929err_out_put:
930 iput(inode);
931err_out_exit:
932 return err;
933}
934
935static int pohmelfs_send_rename(struct pohmelfs_inode *pi, struct pohmelfs_inode *parent,
936 struct qstr *str)
937{
938 int path_len, err, total_len = 0, inode_len, parent_len;
939 char *path;
940 struct netfs_trans *t;
941 struct netfs_cmd *cmd;
942 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
943
944 parent_len = pohmelfs_path_length(parent);
945 inode_len = pohmelfs_path_length(pi);
946
947 if (parent_len < 0 || inode_len < 0)
948 return -EINVAL;
949
950 path_len = parent_len + inode_len + str->len + 3;
951
952 t = netfs_trans_alloc(psb, path_len, 0, 0);
953 if (!t)
954 return -ENOMEM;
955
956 cmd = netfs_trans_current(t);
957 path = (char *)(cmd + 1);
958
959 err = pohmelfs_construct_path_string(pi, path, inode_len);
960 if (err < 0)
961 goto err_out_unlock;
962
963 cmd->ext = err;
964
965 path += err;
966 total_len += err;
967 path_len -= err;
968
969 *path = '|';
970 path++;
971 total_len++;
972 path_len--;
973
974 err = pohmelfs_construct_path_string(parent, path, parent_len);
975 if (err < 0)
976 goto err_out_unlock;
977
978 /*
979 * Do not place a null-byte before the final slash and the name.
980 */
981 err--;
982 path += err;
983 total_len += err;
984 path_len -= err;
985
986 err = snprintf(path, path_len - 1, "/%s", str->name);
987
988 total_len += err + 1; /* 0 symbol */
989 path_len -= err + 1;
990
991 cmd->cmd = NETFS_RENAME;
992 cmd->id = pi->ino;
993 cmd->start = parent->ino;
994 cmd->size = total_len;
995
996 netfs_convert_cmd(cmd);
997
998 netfs_trans_update(cmd, t, total_len);
999
1000 return netfs_trans_finish(t, psb);
1001
1002err_out_unlock:
1003 netfs_trans_free(t);
1004 return err;
1005}
1006
1007static int pohmelfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1008 struct inode *new_dir, struct dentry *new_dentry)
1009{
1010 struct inode *inode = old_dentry->d_inode;
1011 struct pohmelfs_inode *old_parent, *pi, *new_parent;
1012 struct qstr str = new_dentry->d_name;
1013 struct pohmelfs_name *n;
1014 unsigned int old_hash;
1015 int err = -ENOENT;
1016
1017 pi = POHMELFS_I(inode);
1018 old_parent = POHMELFS_I(old_dir);
1019
1020 if (new_dir)
1021 new_dir->i_sb->s_op->write_inode(new_dir, 0);
1022
1023 old_hash = jhash(old_dentry->d_name.name, old_dentry->d_name.len, 0);
1024 str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0);
1025
1026 str.len = new_dentry->d_name.len;
1027 str.name = new_dentry->d_name.name;
1028 str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0);
1029
1030 if (new_dir) {
1031 new_parent = POHMELFS_I(new_dir);
1032 err = -ENOTEMPTY;
1033
1034 if (S_ISDIR(inode->i_mode) &&
1035 new_parent->total_len <= 3)
1036 goto err_out_exit;
1037 } else {
1038 new_parent = old_parent;
1039 }
1040
1041 dprintk("%s: ino: %llu, parent: %llu, name: '%s' -> parent: %llu, name: '%s', i_size: %llu.\n",
1042 __func__, pi->ino, old_parent->ino, old_dentry->d_name.name,
1043 new_parent->ino, new_dentry->d_name.name, inode->i_size);
1044
1045 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state) &&
1046 test_bit(NETFS_INODE_OWNED, &pi->state)) {
1047 err = pohmelfs_send_rename(pi, new_parent, &str);
1048 if (err)
1049 goto err_out_exit;
1050 }
1051
1052 n = pohmelfs_name_alloc(str.len + 1);
1053 if (!n)
1054 goto err_out_exit;
1055
1056 mutex_lock(&new_parent->offset_lock);
1057 n->ino = pi->ino;
1058 n->mode = inode->i_mode;
1059 n->len = str.len;
1060 n->hash = str.hash;
1061 sprintf(n->data, "%s", str.name);
1062
1063 err = pohmelfs_insert_name(new_parent, n);
1064 mutex_unlock(&new_parent->offset_lock);
1065
1066 if (err)
1067 goto err_out_exit;
1068
1069 mutex_lock(&old_parent->offset_lock);
1070 n = pohmelfs_search_hash(old_parent, old_hash);
1071 if (n)
1072 pohmelfs_name_del(old_parent, n);
1073 mutex_unlock(&old_parent->offset_lock);
1074
1075 mark_inode_dirty(inode);
1076 mark_inode_dirty(&new_parent->vfs_inode);
1077
1078 WARN_ON_ONCE(list_empty(&inode->i_dentry));
1079
1080 return 0;
1081
1082err_out_exit:
1083
1084 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
1085
1086 return err;
1087}
1088
1089/*
1090 * POHMELFS directory inode operations.
1091 */
1092const struct inode_operations pohmelfs_dir_inode_ops = {
1093 .link = pohmelfs_link,
1094 .symlink = pohmelfs_symlink,
1095 .unlink = pohmelfs_unlink,
1096 .mkdir = pohmelfs_mkdir,
1097 .rmdir = pohmelfs_rmdir,
1098 .create = pohmelfs_create,
1099 .lookup = pohmelfs_lookup,
1100 .setattr = pohmelfs_setattr,
1101 .rename = pohmelfs_rename,
1102};
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
deleted file mode 100644
index 807e3f324113..000000000000
--- a/drivers/staging/pohmelfs/inode.c
+++ /dev/null
@@ -1,2055 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/crypto.h>
19#include <linux/fs.h>
20#include <linux/jhash.h>
21#include <linux/hash.h>
22#include <linux/ktime.h>
23#include <linux/mm.h>
24#include <linux/mount.h>
25#include <linux/pagemap.h>
26#include <linux/pagevec.h>
27#include <linux/parser.h>
28#include <linux/swap.h>
29#include <linux/slab.h>
30#include <linux/statfs.h>
31#include <linux/writeback.h>
32#include <linux/prefetch.h>
33
34#include "netfs.h"
35
36#define POHMELFS_MAGIC_NUM 0x504f482e
37
38static struct kmem_cache *pohmelfs_inode_cache;
39static atomic_t psb_bdi_num = ATOMIC_INIT(0);
40
41/*
42 * Removes inode from all trees, drops local name cache and removes all queued
43 * requests for object removal.
44 */
45void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
46{
47 mutex_lock(&pi->offset_lock);
48 pohmelfs_free_names(pi);
49 mutex_unlock(&pi->offset_lock);
50
51 dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
52}
53
54/*
55 * Sync inode to server.
56 * Returns zero in success and negative error value otherwise.
57 * It will gather path to root directory into structures containing
58 * creation mode, permissions and names, so that the whole path
59 * to given inode could be created using only single network command.
60 */
61int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
62{
63 struct pohmelfs_inode *pi = POHMELFS_I(inode);
64 int err = -ENOMEM, size;
65 struct netfs_cmd *cmd;
66 void *data;
67 int cur_len = netfs_trans_cur_len(trans);
68
69 if (unlikely(cur_len < 0))
70 return -ETOOSMALL;
71
72 cmd = netfs_trans_current(trans);
73 cur_len -= sizeof(struct netfs_cmd);
74
75 data = (void *)(cmd + 1);
76
77 err = pohmelfs_construct_path_string(pi, data, cur_len);
78 if (err < 0)
79 goto err_out_exit;
80
81 size = err;
82
83 cmd->start = i_size_read(inode);
84 cmd->cmd = NETFS_CREATE;
85 cmd->size = size;
86 cmd->id = pi->ino;
87 cmd->ext = inode->i_mode;
88
89 netfs_convert_cmd(cmd);
90
91 netfs_trans_update(cmd, trans, size);
92
93 return 0;
94
95err_out_exit:
96 printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
97 return err;
98}
99
100static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
101 void *private, int err)
102{
103 unsigned i;
104
105 dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
106 __func__, pages[0]->index, pages[page_num-1]->index,
107 page_num, err);
108
109 for (i = 0; i < page_num; i++) {
110 struct page *page = pages[i];
111
112 if (!page)
113 continue;
114
115 end_page_writeback(page);
116
117 if (err < 0) {
118 SetPageError(page);
119 set_page_dirty(page);
120 }
121
122 unlock_page(page);
123 page_cache_release(page);
124
125 /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
126 }
127 return err;
128}
129
130static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
131{
132 int ret;
133 struct page *page;
134
135 rcu_read_lock();
136 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
137 (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
138 rcu_read_unlock();
139 return ret;
140}
141
142static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
143{
144 struct inode *inode = mapping->host;
145 struct pohmelfs_inode *pi = POHMELFS_I(inode);
146 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
147 int err = 0;
148 int done = 0;
149 int nr_pages;
150 pgoff_t index;
151 pgoff_t end; /* Inclusive */
152 int scanned = 0;
153 int range_whole = 0;
154
155 if (wbc->range_cyclic) {
156 index = mapping->writeback_index; /* Start from prev offset */
157 end = -1;
158 } else {
159 index = wbc->range_start >> PAGE_CACHE_SHIFT;
160 end = wbc->range_end >> PAGE_CACHE_SHIFT;
161 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
162 range_whole = 1;
163 scanned = 1;
164 }
165retry:
166 while (!done && (index <= end)) {
167 unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
168 int path_len;
169 struct netfs_trans *trans;
170
171 err = pohmelfs_inode_has_dirty_pages(mapping, index);
172 if (!err)
173 break;
174
175 err = pohmelfs_path_length(pi);
176 if (err < 0)
177 break;
178
179 path_len = err;
180
181 if (path_len <= 2) {
182 err = -ENOENT;
183 break;
184 }
185
186 trans = netfs_trans_alloc(psb, path_len, 0, i);
187 if (!trans) {
188 err = -ENOMEM;
189 break;
190 }
191 trans->complete = &pohmelfs_write_trans_complete;
192
193 trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
194 PAGECACHE_TAG_DIRTY, trans->page_num,
195 trans->pages);
196
197 dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
198 __func__, trans, nr_pages, end, index, trans->page_num);
199
200 if (!nr_pages)
201 goto err_out_reset;
202
203 err = pohmelfs_write_inode_create(inode, trans);
204 if (err)
205 goto err_out_reset;
206
207 err = 0;
208 scanned = 1;
209
210 for (i = 0; i < trans->page_num; i++) {
211 struct page *page = trans->pages[i];
212
213 lock_page(page);
214
215 if (unlikely(page->mapping != mapping))
216 goto out_continue;
217
218 if (!wbc->range_cyclic && page->index > end) {
219 done = 1;
220 goto out_continue;
221 }
222
223 if (wbc->sync_mode != WB_SYNC_NONE)
224 wait_on_page_writeback(page);
225
226 if (PageWriteback(page) ||
227 !clear_page_dirty_for_io(page)) {
228 dprintk("%s: not clear for io page: %p, writeback: %d.\n",
229 __func__, page, PageWriteback(page));
230 goto out_continue;
231 }
232
233 set_page_writeback(page);
234
235 trans->attached_size += page_private(page);
236 trans->attached_pages++;
237#if 0
238 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
239 __func__, i, trans->page_num, trans, trans->gen, page,
240 !!PageHighMem(page), page_private(page), page->index);
241#endif
242 wbc->nr_to_write--;
243
244 if (wbc->nr_to_write <= 0)
245 done = 1;
246
247 continue;
248out_continue:
249 unlock_page(page);
250 trans->pages[i] = NULL;
251 }
252
253 err = netfs_trans_finish(trans, psb);
254 if (err)
255 break;
256
257 continue;
258
259err_out_reset:
260 trans->result = err;
261 netfs_trans_reset(trans);
262 netfs_trans_put(trans);
263 break;
264 }
265
266 if (!scanned && !done) {
267 /*
268 * We hit the last page and there is more work to be done: wrap
269 * back to the start of the file
270 */
271 scanned = 1;
272 index = 0;
273 goto retry;
274 }
275
276 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
277 mapping->writeback_index = index;
278
279 return err;
280}
281
282/*
283 * Inode writeback creation completion callback.
284 * Only invoked for just created inodes, which do not have pages attached,
285 * like dirs and empty files.
286 */
287static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
288 void *private, int err)
289{
290 struct inode *inode = private;
291 struct pohmelfs_inode *pi = POHMELFS_I(inode);
292
293 if (inode) {
294 if (err) {
295 mark_inode_dirty(inode);
296 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
297 } else {
298 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
299 }
300
301 pohmelfs_put_inode(pi);
302 }
303
304 return err;
305}
306
307int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
308{
309 struct netfs_trans *t;
310 struct inode *inode = &pi->vfs_inode;
311 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
312 int err;
313
314 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
315 return 0;
316
317 dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
318
319 err = pohmelfs_path_length(pi);
320 if (err < 0)
321 goto err_out_exit;
322
323 t = netfs_trans_alloc(psb, err + 1, 0, 0);
324 if (!t) {
325 err = -ENOMEM;
326 goto err_out_exit;
327 }
328 t->complete = pohmelfs_write_inode_complete;
329 t->private = igrab(inode);
330 if (!t->private) {
331 err = -ENOENT;
332 goto err_out_put;
333 }
334
335 err = pohmelfs_write_inode_create(inode, t);
336 if (err)
337 goto err_out_put;
338
339 netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
340
341 return 0;
342
343err_out_put:
344 t->result = err;
345 netfs_trans_put(t);
346err_out_exit:
347 return err;
348}
349
350/*
351 * Sync all not-yet-created children in given directory to the server.
352 */
353static int pohmelfs_write_inode_create_children(struct inode *inode)
354{
355 struct pohmelfs_inode *parent = POHMELFS_I(inode);
356 struct super_block *sb = inode->i_sb;
357 struct pohmelfs_name *n;
358
359 while (!list_empty(&parent->sync_create_list)) {
360 n = NULL;
361 mutex_lock(&parent->offset_lock);
362 if (!list_empty(&parent->sync_create_list)) {
363 n = list_first_entry(&parent->sync_create_list,
364 struct pohmelfs_name, sync_create_entry);
365 list_del_init(&n->sync_create_entry);
366 }
367 mutex_unlock(&parent->offset_lock);
368
369 if (!n)
370 break;
371
372 inode = ilookup(sb, n->ino);
373
374 dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
375 __func__, parent->ino, n->ino, inode);
376
377 if (inode && (inode->i_state & I_DIRTY)) {
378 struct pohmelfs_inode *pi = POHMELFS_I(inode);
379 pohmelfs_write_create_inode(pi);
380 /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */
381 iput(inode);
382 }
383 }
384
385 return 0;
386}
387
388/*
389 * Removes given child from given inode on server.
390 */
391int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
392{
393 return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
394}
395
396/*
397 * Writeback for given inode.
398 */
399static int pohmelfs_write_inode(struct inode *inode,
400 struct writeback_control *wbc)
401{
402 struct pohmelfs_inode *pi = POHMELFS_I(inode);
403
404 pohmelfs_write_create_inode(pi);
405 pohmelfs_write_inode_create_children(inode);
406
407 return 0;
408}
409
410/*
411 * It is not exported, sorry...
412 */
413static inline wait_queue_head_t *page_waitqueue(struct page *page)
414{
415 const struct zone *zone = page_zone(page);
416
417 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
418}
419
420static int pohmelfs_wait_on_page_locked(struct page *page)
421{
422 struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
423 long ret = psb->wait_on_page_timeout;
424 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
425 int err = 0;
426
427 if (!PageLocked(page))
428 return 0;
429
430 for (;;) {
431 prepare_to_wait(page_waitqueue(page),
432 &wait.wait, TASK_INTERRUPTIBLE);
433
434 dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
435 __func__, page, PageLocked(page), PageUptodate(page),
436 PageError(page), page->flags);
437
438 if (!PageLocked(page))
439 break;
440
441 if (!signal_pending(current)) {
442 ret = schedule_timeout(ret);
443 if (!ret)
444 break;
445 continue;
446 }
447 ret = -ERESTARTSYS;
448 break;
449 }
450 finish_wait(page_waitqueue(page), &wait.wait);
451
452 if (!ret)
453 err = -ETIMEDOUT;
454
455
456 if (!err)
457 SetPageUptodate(page);
458
459 if (err)
460 printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
461 __func__, page, PageUptodate(page), PageLocked(page), err);
462
463 return err;
464}
465
466static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
467 void *private, int err)
468{
469 struct page *page = private;
470
471 if (PageChecked(page))
472 return err;
473
474 if (err < 0) {
475 dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
476 SetPageError(page);
477 }
478
479 unlock_page(page);
480
481 return err;
482}
483
484/*
485 * Read a page from remote server.
486 * Function will wait until page is unlocked.
487 */
488static int pohmelfs_readpage(struct file *file, struct page *page)
489{
490 struct inode *inode = page->mapping->host;
491 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
492 struct pohmelfs_inode *pi = POHMELFS_I(inode);
493 struct netfs_trans *t;
494 struct netfs_cmd *cmd;
495 int err, path_len;
496 void *data;
497 u64 isize;
498
499 err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
500 PAGE_SIZE, POHMELFS_READ_LOCK);
501 if (err)
502 goto err_out_exit;
503
504 isize = i_size_read(inode);
505 if (isize <= page->index << PAGE_CACHE_SHIFT) {
506 SetPageUptodate(page);
507 unlock_page(page);
508 return 0;
509 }
510
511 path_len = pohmelfs_path_length(pi);
512 if (path_len < 0) {
513 err = path_len;
514 goto err_out_exit;
515 }
516
517 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
518 if (!t) {
519 err = -ENOMEM;
520 goto err_out_exit;
521 }
522
523 t->complete = pohmelfs_read_page_complete;
524 t->private = page;
525
526 cmd = netfs_trans_current(t);
527 data = (void *)(cmd + 1);
528
529 err = pohmelfs_construct_path_string(pi, data, path_len);
530 if (err < 0)
531 goto err_out_free;
532
533 path_len = err;
534
535 cmd->id = pi->ino;
536 cmd->start = page->index;
537 cmd->start <<= PAGE_CACHE_SHIFT;
538 cmd->size = PAGE_CACHE_SIZE + path_len;
539 cmd->cmd = NETFS_READ_PAGE;
540 cmd->ext = path_len;
541
542 dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
543 __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
544
545 netfs_convert_cmd(cmd);
546 netfs_trans_update(cmd, t, path_len);
547
548 err = netfs_trans_finish(t, psb);
549 if (err)
550 goto err_out_return;
551
552 return pohmelfs_wait_on_page_locked(page);
553
554err_out_free:
555 t->result = err;
556 netfs_trans_put(t);
557err_out_exit:
558 SetPageError(page);
559 if (PageLocked(page))
560 unlock_page(page);
561err_out_return:
562 printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
563 __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
564
565 return err;
566}
567
568/*
569 * Write begin/end magic.
570 * Allocates a page and writes inode if it was not synced to server before.
571 */
572static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
573 loff_t pos, unsigned len, unsigned flags,
574 struct page **pagep, void **fsdata)
575{
576 struct inode *inode = mapping->host;
577 struct page *page;
578 pgoff_t index;
579 unsigned start, end;
580 int err;
581
582 *pagep = NULL;
583
584 index = pos >> PAGE_CACHE_SHIFT;
585 start = pos & (PAGE_CACHE_SIZE - 1);
586 end = start + len;
587
588 page = grab_cache_page(mapping, index);
589#if 0
590 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
591 __func__, page, pos, len, index, start, end, PageUptodate(page));
592#endif
593 if (!page) {
594 err = -ENOMEM;
595 goto err_out_exit;
596 }
597
598 while (!PageUptodate(page)) {
599 if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
600 err = pohmelfs_readpage(file, page);
601 if (err)
602 goto err_out_exit;
603
604 lock_page(page);
605 continue;
606 }
607
608 if (len != PAGE_CACHE_SIZE) {
609 void *kaddr = kmap_atomic(page, KM_USER0);
610
611 memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
612 flush_dcache_page(page);
613 kunmap_atomic(kaddr, KM_USER0);
614 }
615 SetPageUptodate(page);
616 }
617
618 set_page_private(page, end);
619
620 *pagep = page;
621
622 return 0;
623
624err_out_exit:
625 page_cache_release(page);
626 *pagep = NULL;
627
628 return err;
629}
630
631static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
632 loff_t pos, unsigned len, unsigned copied,
633 struct page *page, void *fsdata)
634{
635 struct inode *inode = mapping->host;
636
637 if (copied != len) {
638 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
639 void *kaddr = kmap_atomic(page, KM_USER0);
640
641 memset(kaddr + from + copied, 0, len - copied);
642 flush_dcache_page(page);
643 kunmap_atomic(kaddr, KM_USER0);
644 }
645
646 SetPageUptodate(page);
647 set_page_dirty(page);
648#if 0
649 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
650 __func__, page,
651 PageUptodate(page), PageDirty(page), PageLocked(page),
652 pos, len, copied);
653#endif
654 flush_dcache_page(page);
655
656 unlock_page(page);
657 page_cache_release(page);
658
659 if (pos + copied > inode->i_size) {
660 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
661
662 psb->avail_size -= pos + copied - inode->i_size;
663
664 i_size_write(inode, pos + copied);
665 }
666
667 return copied;
668}
669
670static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
671 void *private, int err)
672{
673 struct pohmelfs_inode *pi = private;
674 unsigned int i, num;
675 struct page **pages, *page = (struct page *)__pages;
676 loff_t index = page->index;
677
678 pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
679 if (!pages)
680 return -ENOMEM;
681
682 num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
683 if (num <= 0) {
684 err = num;
685 goto err_out_free;
686 }
687
688 for (i = 0; i < num; ++i) {
689 page = pages[i];
690
691 if (err)
692 printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
693 __func__, i, num, page, page->index,
694 PageUptodate(page), PageLocked(page), err);
695
696 if (!PageChecked(page)) {
697 if (err < 0)
698 SetPageError(page);
699 unlock_page(page);
700 }
701 page_cache_release(page);
702 page_cache_release(page);
703 }
704
705err_out_free:
706 kfree(pages);
707 return err;
708}
709
710static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
711{
712 struct netfs_trans *t;
713 struct netfs_cmd *cmd;
714 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
715 int err, path_len;
716 void *data;
717
718 err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
719 num * PAGE_SIZE, POHMELFS_READ_LOCK);
720 if (err)
721 goto err_out_exit;
722
723 path_len = pohmelfs_path_length(pi);
724 if (path_len < 0) {
725 err = path_len;
726 goto err_out_exit;
727 }
728
729 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
730 if (!t) {
731 err = -ENOMEM;
732 goto err_out_exit;
733 }
734
735 cmd = netfs_trans_current(t);
736 data = (void *)(cmd + 1);
737
738 t->complete = pohmelfs_readpages_trans_complete;
739 t->private = pi;
740 t->page_num = num;
741 t->pages = (struct page **)first;
742
743 err = pohmelfs_construct_path_string(pi, data, path_len);
744 if (err < 0)
745 goto err_out_put;
746
747 path_len = err;
748
749 cmd->cmd = NETFS_READ_PAGES;
750 cmd->start = first->index;
751 cmd->start <<= PAGE_CACHE_SHIFT;
752 cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
753 cmd->id = pi->ino;
754 cmd->ext = path_len;
755
756 dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
757 "start: %lu, num: %u.\n",
758 __func__, t, t->gen, (char *)data, path_len,
759 first->index, num);
760
761 netfs_convert_cmd(cmd);
762 netfs_trans_update(cmd, t, path_len);
763
764 return netfs_trans_finish(t, psb);
765
766err_out_put:
767 netfs_trans_free(t);
768err_out_exit:
769 pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
770 return err;
771}
772
773#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
774
775static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
776 struct list_head *pages, unsigned nr_pages)
777{
778 unsigned int page_idx, num = 0;
779 struct page *page = NULL, *first = NULL;
780
781 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
782 page = list_to_page(pages);
783
784 prefetchw(&page->flags);
785 list_del(&page->lru);
786
787 if (!add_to_page_cache_lru(page, mapping,
788 page->index, GFP_KERNEL)) {
789
790 if (!num) {
791 num = 1;
792 first = page;
793 continue;
794 }
795
796 dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
797 __func__, page, page->index, first->index);
798
799 if (unlikely(first->index + num != page->index) || (num > 500)) {
800 pohmelfs_send_readpages(POHMELFS_I(mapping->host),
801 first, num);
802 first = page;
803 num = 0;
804 }
805
806 num++;
807 }
808 }
809 pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
810
811 /*
812 * This will be sync read, so when last page is processed,
813 * all previous are alerady unlocked and ready to be used.
814 */
815 return 0;
816}
817
818/*
819 * Small address space operations for POHMELFS.
820 */
821const struct address_space_operations pohmelfs_aops = {
822 .readpage = pohmelfs_readpage,
823 .readpages = pohmelfs_readpages,
824 .writepages = pohmelfs_writepages,
825 .write_begin = pohmelfs_write_begin,
826 .write_end = pohmelfs_write_end,
827 .set_page_dirty = __set_page_dirty_nobuffers,
828};
829
830static void pohmelfs_i_callback(struct rcu_head *head)
831{
832 struct inode *inode = container_of(head, struct inode, i_rcu);
833 kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
834}
835
836/*
837 * ->destroy_inode() callback. Deletes inode from the caches
838 * and frees private data.
839 */
840static void pohmelfs_destroy_inode(struct inode *inode)
841{
842 struct super_block *sb = inode->i_sb;
843 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
844 struct pohmelfs_inode *pi = POHMELFS_I(inode);
845
846 /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */
847
848 pohmelfs_inode_del_inode(psb, pi);
849
850 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
851 __func__, pi, &pi->vfs_inode, pi->ino);
852 atomic_long_dec(&psb->total_inodes);
853 call_rcu(&inode->i_rcu, pohmelfs_i_callback);
854}
855
856/*
857 * ->alloc_inode() callback. Allocates inode and initializes private data.
858 */
859static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
860{
861 struct pohmelfs_inode *pi;
862
863 pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
864 if (!pi)
865 return NULL;
866
867 pi->hash_root = RB_ROOT;
868 mutex_init(&pi->offset_lock);
869
870 INIT_LIST_HEAD(&pi->sync_create_list);
871
872 INIT_LIST_HEAD(&pi->inode_entry);
873
874 pi->lock_type = 0;
875 pi->state = 0;
876 pi->total_len = 0;
877 pi->drop_count = 0;
878
879 dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
880
881 atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
882
883 return &pi->vfs_inode;
884}
885
886/*
887 * We want fsync() to work on POHMELFS.
888 */
889static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
890{
891 struct inode *inode = file->f_mapping->host;
892 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
893 if (!err) {
894 mutex_lock(&inode->i_mutex);
895 err = sync_inode_metadata(inode, 1);
896 mutex_unlock(&inode->i_mutex);
897 }
898 return err;
899}
900
901ssize_t pohmelfs_write(struct file *file, const char __user *buf,
902 size_t len, loff_t *ppos)
903{
904 struct address_space *mapping = file->f_mapping;
905 struct inode *inode = mapping->host;
906 struct pohmelfs_inode *pi = POHMELFS_I(inode);
907 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
908 struct kiocb kiocb;
909 ssize_t ret;
910 loff_t pos = *ppos;
911
912 init_sync_kiocb(&kiocb, file);
913 kiocb.ki_pos = pos;
914 kiocb.ki_left = len;
915
916 dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
917
918 mutex_lock(&inode->i_mutex);
919 ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
920 if (ret)
921 goto err_out_unlock;
922
923 ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos);
924 *ppos = kiocb.ki_pos;
925
926 mutex_unlock(&inode->i_mutex);
927 WARN_ON(ret < 0);
928
929 if (ret > 0) {
930 ssize_t err;
931
932 err = generic_write_sync(file, pos, ret);
933 if (err < 0)
934 ret = err;
935 WARN_ON(ret < 0);
936 }
937
938 return ret;
939
940err_out_unlock:
941 mutex_unlock(&inode->i_mutex);
942 return ret;
943}
944
945static const struct file_operations pohmelfs_file_ops = {
946 .open = generic_file_open,
947 .fsync = pohmelfs_fsync,
948
949 .llseek = generic_file_llseek,
950
951 .read = do_sync_read,
952 .aio_read = generic_file_aio_read,
953
954 .mmap = generic_file_mmap,
955
956 .splice_read = generic_file_splice_read,
957 .splice_write = generic_file_splice_write,
958
959 .write = pohmelfs_write,
960 .aio_write = generic_file_aio_write,
961};
962
963const struct inode_operations pohmelfs_symlink_inode_operations = {
964 .readlink = generic_readlink,
965 .follow_link = page_follow_link_light,
966 .put_link = page_put_link,
967};
968
969int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
970{
971 int err;
972
973 err = inode_change_ok(inode, attr);
974 if (err) {
975 dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
976 goto err_out_exit;
977 }
978
979 if ((attr->ia_valid & ATTR_SIZE) &&
980 attr->ia_size != i_size_read(inode)) {
981 err = vmtruncate(inode, attr->ia_size);
982 if (err) {
983 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
984 goto err_out_exit;
985 }
986 }
987
988 setattr_copy(inode, attr);
989 mark_inode_dirty(inode);
990
991 dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
992 __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
993 inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
994
995 return 0;
996
997err_out_exit:
998 return err;
999}
1000
1001int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
1002{
1003 struct inode *inode = dentry->d_inode;
1004 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1005 int err;
1006
1007 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1008 if (err)
1009 goto err_out_exit;
1010
1011 err = security_inode_setattr(dentry, attr);
1012 if (err)
1013 goto err_out_exit;
1014
1015 err = pohmelfs_setattr_raw(inode, attr);
1016 if (err)
1017 goto err_out_exit;
1018
1019 return 0;
1020
1021err_out_exit:
1022 return err;
1023}
1024
1025static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
1026 const char *name, const void *value, size_t attrsize, int command)
1027{
1028 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
1029 int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
1030 struct netfs_trans *t;
1031 struct netfs_cmd *cmd;
1032 void *data;
1033
1034 dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
1035 __func__, id, start, name, attrsize, command);
1036
1037 path_len = pohmelfs_path_length(pi);
1038 if (path_len < 0) {
1039 err = path_len;
1040 goto err_out_exit;
1041 }
1042
1043 t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
1044 if (!t) {
1045 err = -ENOMEM;
1046 goto err_out_exit;
1047 }
1048
1049 cmd = netfs_trans_current(t);
1050 data = cmd + 1;
1051
1052 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1053 if (path_len < 0) {
1054 err = path_len;
1055 goto err_out_put;
1056 }
1057 data += path_len;
1058
1059 /*
1060 * 'name' is a NUL-terminated string already and
1061 * 'namelen' includes 0-byte.
1062 */
1063 memcpy(data, name, namelen);
1064 data += namelen;
1065
1066 memcpy(data, value, attrsize);
1067
1068 cmd->cmd = command;
1069 cmd->id = id;
1070 cmd->start = start;
1071 cmd->size = attrsize + namelen + path_len;
1072 cmd->ext = path_len;
1073 cmd->csize = 0;
1074 cmd->cpad = 0;
1075
1076 netfs_convert_cmd(cmd);
1077 netfs_trans_update(cmd, t, namelen + path_len + attrsize);
1078
1079 return netfs_trans_finish(t, psb);
1080
1081err_out_put:
1082 t->result = err;
1083 netfs_trans_put(t);
1084err_out_exit:
1085 return err;
1086}
1087
1088static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
1089 const void *value, size_t attrsize, int flags)
1090{
1091 struct inode *inode = dentry->d_inode;
1092 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1093 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1094
1095 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1096 return -EOPNOTSUPP;
1097
1098 return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
1099 value, attrsize, NETFS_XATTR_SET);
1100}
1101
1102static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
1103 void *value, size_t attrsize)
1104{
1105 struct inode *inode = dentry->d_inode;
1106 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1107 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1108 struct pohmelfs_mcache *m;
1109 int err;
1110 long timeout = psb->mcache_timeout;
1111
1112 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1113 return -EOPNOTSUPP;
1114
1115 m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
1116 if (IS_ERR(m))
1117 return PTR_ERR(m);
1118
1119 dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
1120 __func__, pi->ino, name, attrsize);
1121
1122 err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
1123 if (err)
1124 goto err_out_put;
1125
1126 do {
1127 err = wait_for_completion_timeout(&m->complete, timeout);
1128 if (err) {
1129 err = m->err;
1130 break;
1131 }
1132
1133 /*
1134 * This loop is a bit ugly, since it waits until reference counter
1135 * hits 1 and then puts the object here. Main goal is to prevent race with
1136 * the network thread, when it can start processing the given request, i.e.
1137 * increase its reference counter but yet not complete it, while
1138 * we will exit from ->getxattr() with timeout, and although request
1139 * will not be freed (its reference counter was increased by network
1140 * thread), data pointer provided by user may be released, so we will
1141 * overwrite an already freed area in the network thread.
1142 *
1143 * Now after timeout we remove request from the cache, so it can not be
1144 * found by network thread, and wait for its reference counter to hit 1,
1145 * i.e. if network thread already started to process this request, we wait
1146 * for it to finish, and then free object locally. If reference counter is
1147 * already 1, i.e. request is not used by anyone else, we can free it without
1148 * problem.
1149 */
1150 err = -ETIMEDOUT;
1151 timeout = HZ;
1152
1153 pohmelfs_mcache_remove_locked(psb, m);
1154 } while (atomic_read(&m->refcnt) != 1);
1155
1156 pohmelfs_mcache_put(psb, m);
1157
1158 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
1159
1160 return err;
1161
1162err_out_put:
1163 pohmelfs_mcache_put(psb, m);
1164 return err;
1165}
1166
1167static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1168{
1169 struct inode *inode = dentry->d_inode;
1170#if 0
1171 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1172 int err;
1173
1174 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
1175 if (err)
1176 return err;
1177 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
1178 __func__, pi->ino, inode->i_mode, inode->i_uid,
1179 inode->i_gid, inode->i_size);
1180#endif
1181
1182 generic_fillattr(inode, stat);
1183 return 0;
1184}
1185
1186const struct inode_operations pohmelfs_file_inode_operations = {
1187 .setattr = pohmelfs_setattr,
1188 .getattr = pohmelfs_getattr,
1189 .setxattr = pohmelfs_setxattr,
1190 .getxattr = pohmelfs_getxattr,
1191};
1192
1193/*
1194 * Fill inode data: mode, size, operation callbacks and so on...
1195 */
1196void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
1197{
1198 inode->i_mode = info->mode;
1199 set_nlink(inode, info->nlink);
1200 inode->i_uid = info->uid;
1201 inode->i_gid = info->gid;
1202 inode->i_blocks = info->blocks;
1203 inode->i_rdev = info->rdev;
1204 inode->i_size = info->size;
1205 inode->i_version = info->version;
1206 inode->i_blkbits = ffs(info->blocksize);
1207
1208 dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
1209 __func__, inode, inode->i_ino, info->ino,
1210 S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
1211 S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
1212
1213 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1214
1215 /*
1216 * i_mapping is a pointer to i_data during inode initialization.
1217 */
1218 inode->i_data.a_ops = &pohmelfs_aops;
1219
1220 if (S_ISREG(inode->i_mode)) {
1221 inode->i_fop = &pohmelfs_file_ops;
1222 inode->i_op = &pohmelfs_file_inode_operations;
1223 } else if (S_ISDIR(inode->i_mode)) {
1224 inode->i_fop = &pohmelfs_dir_fops;
1225 inode->i_op = &pohmelfs_dir_inode_ops;
1226 } else if (S_ISLNK(inode->i_mode)) {
1227 inode->i_op = &pohmelfs_symlink_inode_operations;
1228 inode->i_fop = &pohmelfs_file_ops;
1229 } else {
1230 inode->i_fop = &generic_ro_fops;
1231 }
1232}
1233
1234static int pohmelfs_drop_inode(struct inode *inode)
1235{
1236 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1237 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1238
1239 spin_lock(&psb->ino_lock);
1240 list_del_init(&pi->inode_entry);
1241 spin_unlock(&psb->ino_lock);
1242
1243 return generic_drop_inode(inode);
1244}
1245
1246static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
1247 struct list_head *head, unsigned int *count)
1248{
1249 struct pohmelfs_inode *pi = NULL;
1250
1251 spin_lock(&psb->ino_lock);
1252 if (!list_empty(head)) {
1253 pi = list_entry(head->next, struct pohmelfs_inode,
1254 inode_entry);
1255 list_del_init(&pi->inode_entry);
1256 *count = pi->drop_count;
1257 pi->drop_count = 0;
1258 }
1259 spin_unlock(&psb->ino_lock);
1260
1261 return pi;
1262}
1263
1264static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
1265{
1266 struct pohmelfs_config *c;
1267
1268 mutex_lock(&psb->state_lock);
1269 list_for_each_entry(c, &psb->state_list, config_entry) {
1270 pohmelfs_state_flush_transactions(&c->state);
1271 }
1272 mutex_unlock(&psb->state_lock);
1273}
1274
1275/*
1276 * ->put_super() callback. Invoked before superblock is destroyed,
1277 * so it has to clean all private data.
1278 */
1279static void pohmelfs_put_super(struct super_block *sb)
1280{
1281 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1282 struct pohmelfs_inode *pi;
1283 unsigned int count = 0;
1284 unsigned int in_drop_list = 0;
1285 struct inode *inode, *tmp;
1286
1287 dprintk("%s.\n", __func__);
1288
1289 /*
1290 * Kill pending transactions, which could affect inodes in-flight.
1291 */
1292 pohmelfs_flush_transactions(psb);
1293
1294 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1295 inode = &pi->vfs_inode;
1296
1297 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1298 __func__, pi->ino, pi, inode, count);
1299
1300 if (atomic_read(&inode->i_count) != count) {
1301 printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
1302 __func__, pi->ino, pi, inode, count,
1303 atomic_read(&inode->i_count));
1304 count = atomic_read(&inode->i_count);
1305 in_drop_list++;
1306 }
1307
1308 while (count--)
1309 iput(&pi->vfs_inode);
1310 }
1311
1312 list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
1313 pi = POHMELFS_I(inode);
1314
1315 dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
1316 __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
1317
1318 /*
1319 * These are special inodes, they were created during
1320 * directory reading or lookup, and were not bound to dentry,
1321 * so they live here with reference counter being 1 and prevent
1322 * umount from succeed since it believes that they are busy.
1323 */
1324 count = atomic_read(&inode->i_count);
1325 if (count) {
1326 list_del_init(&inode->i_sb_list);
1327 while (count--)
1328 iput(&pi->vfs_inode);
1329 }
1330 }
1331
1332 psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
1333 cancel_delayed_work_sync(&psb->dwork);
1334 cancel_delayed_work_sync(&psb->drop_dwork);
1335 flush_scheduled_work();
1336
1337 dprintk("%s: stopped workqueues.\n", __func__);
1338
1339 pohmelfs_crypto_exit(psb);
1340 pohmelfs_state_exit(psb);
1341
1342 bdi_destroy(&psb->bdi);
1343
1344 kfree(psb);
1345 sb->s_fs_info = NULL;
1346}
1347
1348static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1349{
1350 struct super_block *sb = dentry->d_sb;
1351 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1352
1353 /*
1354 * There are no filesystem size limits yet.
1355 */
1356 memset(buf, 0, sizeof(struct kstatfs));
1357
1358 buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
1359 buf->f_bsize = sb->s_blocksize;
1360 buf->f_files = psb->ino;
1361 buf->f_namelen = 255;
1362 buf->f_files = atomic_long_read(&psb->total_inodes);
1363 buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
1364 buf->f_blocks = psb->total_size >> PAGE_SHIFT;
1365
1366 dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
1367 __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
1368
1369 return 0;
1370}
1371
1372static int pohmelfs_show_options(struct seq_file *seq, struct dentry *root)
1373{
1374 struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
1375
1376 seq_printf(seq, ",idx=%u", psb->idx);
1377 seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
1378 seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
1379 seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
1380 seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
1381 seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
1382 seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
1383 seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
1384 if (psb->crypto_fail_unsupported)
1385 seq_printf(seq, ",crypto_fail_unsupported");
1386
1387 return 0;
1388}
1389
1390enum {
1391 pohmelfs_opt_idx,
1392 pohmelfs_opt_crypto_thread_num,
1393 pohmelfs_opt_trans_max_pages,
1394 pohmelfs_opt_crypto_fail_unsupported,
1395
1396 /* Remountable options */
1397 pohmelfs_opt_trans_scan_timeout,
1398 pohmelfs_opt_drop_scan_timeout,
1399 pohmelfs_opt_wait_on_page_timeout,
1400 pohmelfs_opt_trans_retries,
1401 pohmelfs_opt_mcache_timeout,
1402};
1403
1404static struct match_token pohmelfs_tokens[] = {
1405 {pohmelfs_opt_idx, "idx=%u"},
1406 {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
1407 {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
1408 {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
1409 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
1410 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
1411 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
1412 {pohmelfs_opt_trans_retries, "trans_retries=%u"},
1413 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
1414};
1415
1416static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
1417{
1418 char *p;
1419 substring_t args[MAX_OPT_ARGS];
1420 int option, err;
1421
1422 if (!options)
1423 return 0;
1424
1425 while ((p = strsep(&options, ",")) != NULL) {
1426 int token;
1427 if (!*p)
1428 continue;
1429
1430 token = match_token(p, pohmelfs_tokens, args);
1431
1432 err = match_int(&args[0], &option);
1433 if (err)
1434 return err;
1435
1436 if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
1437 continue;
1438
1439 switch (token) {
1440 case pohmelfs_opt_idx:
1441 psb->idx = option;
1442 break;
1443 case pohmelfs_opt_trans_scan_timeout:
1444 psb->trans_scan_timeout = msecs_to_jiffies(option);
1445 break;
1446 case pohmelfs_opt_drop_scan_timeout:
1447 psb->drop_scan_timeout = msecs_to_jiffies(option);
1448 break;
1449 case pohmelfs_opt_wait_on_page_timeout:
1450 psb->wait_on_page_timeout = msecs_to_jiffies(option);
1451 break;
1452 case pohmelfs_opt_mcache_timeout:
1453 psb->mcache_timeout = msecs_to_jiffies(option);
1454 break;
1455 case pohmelfs_opt_trans_retries:
1456 psb->trans_retries = option;
1457 break;
1458 case pohmelfs_opt_crypto_thread_num:
1459 psb->crypto_thread_num = option;
1460 break;
1461 case pohmelfs_opt_trans_max_pages:
1462 psb->trans_max_pages = option;
1463 break;
1464 case pohmelfs_opt_crypto_fail_unsupported:
1465 psb->crypto_fail_unsupported = 1;
1466 break;
1467 default:
1468 return -EINVAL;
1469 }
1470 }
1471
1472 return 0;
1473}
1474
1475static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
1476{
1477 int err;
1478 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1479 unsigned long old_sb_flags = sb->s_flags;
1480
1481 err = pohmelfs_parse_options(data, psb, 1);
1482 if (err)
1483 goto err_out_restore;
1484
1485 if (!(*flags & MS_RDONLY))
1486 sb->s_flags &= ~MS_RDONLY;
1487 return 0;
1488
1489err_out_restore:
1490 sb->s_flags = old_sb_flags;
1491 return err;
1492}
1493
1494static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
1495{
1496 struct inode *inode = &pi->vfs_inode;
1497
1498 dprintk("%s: %p: ino: %llu, owned: %d.\n",
1499 __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
1500
1501 mutex_lock(&inode->i_mutex);
1502 if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
1503 filemap_fdatawrite(inode->i_mapping);
1504 inode->i_sb->s_op->write_inode(inode, 0);
1505 }
1506
1507#ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH
1508 truncate_inode_pages(inode->i_mapping, 0);
1509#endif
1510
1511 pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1512 mutex_unlock(&inode->i_mutex);
1513}
1514
1515static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
1516{
1517 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1518 __func__, pi->ino, pi, &pi->vfs_inode, count);
1519
1520 if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
1521 pohmelfs_flush_inode(pi, count);
1522
1523 while (count--)
1524 iput(&pi->vfs_inode);
1525}
1526
1527static void pohmelfs_drop_scan(struct work_struct *work)
1528{
1529 struct pohmelfs_sb *psb =
1530 container_of(work, struct pohmelfs_sb, drop_dwork.work);
1531 struct pohmelfs_inode *pi;
1532 unsigned int count = 0;
1533
1534 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count)))
1535 pohmelfs_put_inode_count(pi, count);
1536
1537 pohmelfs_check_states(psb);
1538
1539 if (psb->drop_scan_timeout)
1540 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1541}
1542
1543/*
1544 * Run through all transactions starting from the oldest,
1545 * drop transaction from current state and try to send it
1546 * to all remote nodes, which are currently installed.
1547 */
1548static void pohmelfs_trans_scan_state(struct netfs_state *st)
1549{
1550 struct rb_node *rb_node;
1551 struct netfs_trans_dst *dst;
1552 struct pohmelfs_sb *psb = st->psb;
1553 unsigned int timeout = psb->trans_scan_timeout;
1554 struct netfs_trans *t;
1555 int err;
1556
1557 mutex_lock(&st->trans_lock);
1558 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1559 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1560 t = dst->trans;
1561
1562 if (timeout && time_after(dst->send_time + timeout, jiffies)
1563 && dst->retries == 0)
1564 break;
1565
1566 dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
1567 __func__, t, t->gen, st, dst->retries, psb->trans_retries);
1568 netfs_trans_get(t);
1569
1570 rb_node = rb_next(rb_node);
1571
1572 err = -ETIMEDOUT;
1573 if (timeout && (++dst->retries < psb->trans_retries))
1574 err = netfs_trans_resend(t, psb);
1575
1576 if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
1577 if (netfs_trans_remove_nolock(dst, st))
1578 netfs_trans_drop_dst_nostate(dst);
1579 }
1580
1581 t->result = err;
1582 netfs_trans_put(t);
1583 }
1584 mutex_unlock(&st->trans_lock);
1585}
1586
1587/*
1588 * Walk through all installed network states and resend all
1589 * transactions, which are old enough.
1590 */
1591static void pohmelfs_trans_scan(struct work_struct *work)
1592{
1593 struct pohmelfs_sb *psb =
1594 container_of(work, struct pohmelfs_sb, dwork.work);
1595 struct netfs_state *st;
1596 struct pohmelfs_config *c;
1597
1598 mutex_lock(&psb->state_lock);
1599 list_for_each_entry(c, &psb->state_list, config_entry) {
1600 st = &c->state;
1601
1602 pohmelfs_trans_scan_state(st);
1603 }
1604 mutex_unlock(&psb->state_lock);
1605
1606 /*
1607 * If no timeout specified then system is in the middle of umount process,
1608 * so no need to reschedule scanning process again.
1609 */
1610 if (psb->trans_scan_timeout)
1611 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1612}
1613
1614int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
1615 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
1616{
1617 struct inode *inode = &pi->vfs_inode;
1618 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1619 int err = 0, sz;
1620 struct netfs_trans *t;
1621 int path_len, addon_len = 0;
1622 void *data;
1623 struct netfs_inode_info *info;
1624 struct netfs_cmd *cmd;
1625
1626 dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
1627
1628 path_len = pohmelfs_path_length(pi);
1629 if (path_len < 0) {
1630 err = path_len;
1631 goto err_out_exit;
1632 }
1633
1634 if (addon)
1635 addon_len = strlen(addon) + 1; /* 0-byte */
1636 sz = addon_len;
1637
1638 if (cmd_op == NETFS_INODE_INFO)
1639 sz += sizeof(struct netfs_inode_info);
1640
1641 t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
1642 if (!t) {
1643 err = -ENOMEM;
1644 goto err_out_exit;
1645 }
1646 t->complete = complete;
1647 t->private = priv;
1648
1649 cmd = netfs_trans_current(t);
1650 data = (void *)(cmd + 1);
1651
1652 if (cmd_op == NETFS_INODE_INFO) {
1653 info = (struct netfs_inode_info *)(cmd + 1);
1654 data = (void *)(info + 1);
1655
1656 /*
1657 * We are under i_mutex, can read and change whatever we want...
1658 */
1659 info->mode = inode->i_mode;
1660 info->nlink = inode->i_nlink;
1661 info->uid = inode->i_uid;
1662 info->gid = inode->i_gid;
1663 info->blocks = inode->i_blocks;
1664 info->rdev = inode->i_rdev;
1665 info->size = inode->i_size;
1666 info->version = inode->i_version;
1667
1668 netfs_convert_inode_info(info);
1669 }
1670
1671 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1672 if (path_len < 0)
1673 goto err_out_free;
1674
1675 dprintk("%s: path_len: %d.\n", __func__, path_len);
1676
1677 if (addon) {
1678 path_len--; /* Do not place null-byte before the addon */
1679 path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
1680 }
1681
1682 sz += path_len;
1683
1684 cmd->cmd = cmd_op;
1685 cmd->ext = path_len;
1686 cmd->size = sz;
1687 cmd->id = id;
1688 cmd->start = start;
1689
1690 netfs_convert_cmd(cmd);
1691 netfs_trans_update(cmd, t, sz);
1692
1693 /*
1694 * Note, that it is possible to leak error here: transaction callback will not
1695 * be invoked for allocation path failure.
1696 */
1697 return netfs_trans_finish(t, psb);
1698
1699err_out_free:
1700 netfs_trans_free(t);
1701err_out_exit:
1702 if (complete)
1703 complete(NULL, 0, priv, err);
1704 return err;
1705}
1706
1707int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
1708 netfs_trans_complete_t complete, void *priv, u64 start)
1709{
1710 return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
1711}
1712
1713/*
1714 * Send request and wait for POHMELFS root capabilities response,
1715 * which will update server's informaion about size of the export,
1716 * permissions, number of objects, available size and so on.
1717 */
1718static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
1719{
1720 struct netfs_trans *t;
1721 struct netfs_cmd *cmd;
1722 int err = -ENOMEM;
1723
1724 t = netfs_trans_alloc(psb, 0, 0, 0);
1725 if (!t)
1726 goto err_out_exit;
1727
1728 cmd = netfs_trans_current(t);
1729
1730 cmd->cmd = NETFS_CAPABILITIES;
1731 cmd->id = POHMELFS_ROOT_CAPABILITIES;
1732 cmd->size = 0;
1733 cmd->start = 0;
1734 cmd->ext = 0;
1735 cmd->csize = 0;
1736
1737 netfs_convert_cmd(cmd);
1738 netfs_trans_update(cmd, t, 0);
1739
1740 err = netfs_trans_finish(t, psb);
1741 if (err)
1742 goto err_out_exit;
1743
1744 psb->flags = ~0;
1745 err = wait_event_interruptible_timeout(psb->wait,
1746 (psb->flags != ~0),
1747 psb->wait_on_page_timeout);
1748 if (!err)
1749 err = -ETIMEDOUT;
1750 else if (err > 0)
1751 err = -psb->flags;
1752
1753 if (err)
1754 goto err_out_exit;
1755
1756 return 0;
1757
1758err_out_exit:
1759 return err;
1760}
1761
1762static int pohmelfs_show_stats(struct seq_file *m, struct dentry *root)
1763{
1764 struct netfs_state *st;
1765 struct pohmelfs_ctl *ctl;
1766 struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
1767 struct pohmelfs_config *c;
1768
1769 mutex_lock(&psb->state_lock);
1770
1771 seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
1772
1773 list_for_each_entry(c, &psb->state_list, config_entry) {
1774 st = &c->state;
1775 ctl = &st->ctl;
1776
1777 seq_printf(m, "%u ", ctl->idx);
1778 if (ctl->addr.sa_family == AF_INET) {
1779 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
1780 seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1781 } else if (ctl->addr.sa_family == AF_INET6) {
1782 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1783 seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
1784 } else {
1785 unsigned int i;
1786 for (i = 0; i < ctl->addrlen; ++i)
1787 seq_printf(m, "%02x.", ctl->addr.addr[i]);
1788 }
1789
1790 seq_printf(m, " %u %u %d %u %x\n",
1791 ctl->type, ctl->proto,
1792 st->socket != NULL,
1793 ctl->prio, ctl->perm);
1794 }
1795 mutex_unlock(&psb->state_lock);
1796
1797 return 0;
1798}
1799
1800static const struct super_operations pohmelfs_sb_ops = {
1801 .alloc_inode = pohmelfs_alloc_inode,
1802 .destroy_inode = pohmelfs_destroy_inode,
1803 .drop_inode = pohmelfs_drop_inode,
1804 .write_inode = pohmelfs_write_inode,
1805 .put_super = pohmelfs_put_super,
1806 .remount_fs = pohmelfs_remount,
1807 .statfs = pohmelfs_statfs,
1808 .show_options = pohmelfs_show_options,
1809 .show_stats = pohmelfs_show_stats,
1810};
1811
1812/*
1813 * Allocate private superblock and create root dir.
1814 */
1815static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
1816{
1817 struct pohmelfs_sb *psb;
1818 int err = -ENOMEM;
1819 struct inode *root;
1820 struct pohmelfs_inode *npi;
1821 struct qstr str;
1822
1823 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
1824 if (!psb)
1825 goto err_out_exit;
1826
1827 err = bdi_init(&psb->bdi);
1828 if (err)
1829 goto err_out_free_sb;
1830
1831 err = bdi_register(&psb->bdi, NULL, "pfs-%d", atomic_inc_return(&psb_bdi_num));
1832 if (err) {
1833 bdi_destroy(&psb->bdi);
1834 goto err_out_free_sb;
1835 }
1836
1837 sb->s_fs_info = psb;
1838 sb->s_op = &pohmelfs_sb_ops;
1839 sb->s_magic = POHMELFS_MAGIC_NUM;
1840 sb->s_maxbytes = MAX_LFS_FILESIZE;
1841 sb->s_blocksize = PAGE_SIZE;
1842 sb->s_bdi = &psb->bdi;
1843
1844 psb->sb = sb;
1845
1846 psb->ino = 2;
1847 psb->idx = 0;
1848 psb->active_state = NULL;
1849 psb->trans_retries = 5;
1850 psb->trans_data_size = PAGE_SIZE;
1851 psb->drop_scan_timeout = msecs_to_jiffies(1000);
1852 psb->trans_scan_timeout = msecs_to_jiffies(5000);
1853 psb->wait_on_page_timeout = msecs_to_jiffies(5000);
1854 init_waitqueue_head(&psb->wait);
1855
1856 spin_lock_init(&psb->ino_lock);
1857
1858 INIT_LIST_HEAD(&psb->drop_list);
1859
1860 mutex_init(&psb->mcache_lock);
1861 psb->mcache_root = RB_ROOT;
1862 psb->mcache_timeout = msecs_to_jiffies(5000);
1863 atomic_long_set(&psb->mcache_gen, 0);
1864
1865 psb->trans_max_pages = 100;
1866
1867 psb->crypto_align_size = 16;
1868 psb->crypto_attached_size = 0;
1869 psb->hash_strlen = 0;
1870 psb->cipher_strlen = 0;
1871 psb->perform_crypto = 0;
1872 psb->crypto_thread_num = 2;
1873 psb->crypto_fail_unsupported = 0;
1874 mutex_init(&psb->crypto_thread_lock);
1875 INIT_LIST_HEAD(&psb->crypto_ready_list);
1876 INIT_LIST_HEAD(&psb->crypto_active_list);
1877
1878 atomic_set(&psb->trans_gen, 1);
1879 atomic_long_set(&psb->total_inodes, 0);
1880
1881 mutex_init(&psb->state_lock);
1882 INIT_LIST_HEAD(&psb->state_list);
1883
1884 err = pohmelfs_parse_options((char *) data, psb, 0);
1885 if (err)
1886 goto err_out_free_bdi;
1887
1888 err = pohmelfs_copy_crypto(psb);
1889 if (err)
1890 goto err_out_free_bdi;
1891
1892 err = pohmelfs_state_init(psb);
1893 if (err)
1894 goto err_out_free_strings;
1895
1896 err = pohmelfs_crypto_init(psb);
1897 if (err)
1898 goto err_out_state_exit;
1899
1900 err = pohmelfs_root_handshake(psb);
1901 if (err)
1902 goto err_out_crypto_exit;
1903
1904 str.name = "/";
1905 str.hash = jhash("/", 1, 0);
1906 str.len = 1;
1907
1908 npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
1909 if (IS_ERR(npi)) {
1910 err = PTR_ERR(npi);
1911 goto err_out_crypto_exit;
1912 }
1913 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
1914 clear_bit(NETFS_INODE_OWNED, &npi->state);
1915
1916 root = &npi->vfs_inode;
1917
1918 sb->s_root = d_alloc_root(root);
1919 if (!sb->s_root)
1920 goto err_out_put_root;
1921
1922 INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
1923 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1924
1925 INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
1926 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1927
1928 return 0;
1929
1930err_out_put_root:
1931 iput(root);
1932err_out_crypto_exit:
1933 pohmelfs_crypto_exit(psb);
1934err_out_state_exit:
1935 pohmelfs_state_exit(psb);
1936err_out_free_strings:
1937 kfree(psb->cipher_string);
1938 kfree(psb->hash_string);
1939err_out_free_bdi:
1940 bdi_destroy(&psb->bdi);
1941err_out_free_sb:
1942 kfree(psb);
1943err_out_exit:
1944
1945 dprintk("%s: err: %d.\n", __func__, err);
1946 return err;
1947}
1948
1949/*
1950 * Some VFS magic here...
1951 */
1952static struct dentry *pohmelfs_mount(struct file_system_type *fs_type,
1953 int flags, const char *dev_name, void *data)
1954{
1955 return mount_nodev(fs_type, flags, data, pohmelfs_fill_super);
1956}
1957
1958/*
1959 * We need this to sync all inodes earlier, since when writeback
1960 * is invoked from the umount/mntput path dcache is already shrunk,
1961 * see generic_shutdown_super(), and no inodes can access the path.
1962 */
1963static void pohmelfs_kill_super(struct super_block *sb)
1964{
1965 sync_inodes_sb(sb);
1966 kill_anon_super(sb);
1967}
1968
1969static struct file_system_type pohmel_fs_type = {
1970 .owner = THIS_MODULE,
1971 .name = "pohmel",
1972 .mount = pohmelfs_mount,
1973 .kill_sb = pohmelfs_kill_super,
1974};
1975
1976/*
1977 * Cache and module initializations and freeing routings.
1978 */
1979static void pohmelfs_init_once(void *data)
1980{
1981 struct pohmelfs_inode *pi = data;
1982
1983 inode_init_once(&pi->vfs_inode);
1984}
1985
1986static int __init pohmelfs_init_inodecache(void)
1987{
1988 pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
1989 sizeof(struct pohmelfs_inode),
1990 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1991 pohmelfs_init_once);
1992 if (!pohmelfs_inode_cache)
1993 return -ENOMEM;
1994
1995 return 0;
1996}
1997
1998static void pohmelfs_destroy_inodecache(void)
1999{
2000 kmem_cache_destroy(pohmelfs_inode_cache);
2001}
2002
2003static int __init init_pohmel_fs(void)
2004{
2005 int err;
2006
2007 err = pohmelfs_config_init();
2008 if (err)
2009 goto err_out_exit;
2010
2011 err = pohmelfs_init_inodecache();
2012 if (err)
2013 goto err_out_config_exit;
2014
2015 err = pohmelfs_mcache_init();
2016 if (err)
2017 goto err_out_destroy;
2018
2019 err = netfs_trans_init();
2020 if (err)
2021 goto err_out_mcache_exit;
2022
2023 err = register_filesystem(&pohmel_fs_type);
2024 if (err)
2025 goto err_out_trans;
2026
2027 return 0;
2028
2029err_out_trans:
2030 netfs_trans_exit();
2031err_out_mcache_exit:
2032 pohmelfs_mcache_exit();
2033err_out_destroy:
2034 pohmelfs_destroy_inodecache();
2035err_out_config_exit:
2036 pohmelfs_config_exit();
2037err_out_exit:
2038 return err;
2039}
2040
2041static void __exit exit_pohmel_fs(void)
2042{
2043 unregister_filesystem(&pohmel_fs_type);
2044 pohmelfs_destroy_inodecache();
2045 pohmelfs_mcache_exit();
2046 pohmelfs_config_exit();
2047 netfs_trans_exit();
2048}
2049
2050module_init(init_pohmel_fs);
2051module_exit(exit_pohmel_fs);
2052
2053MODULE_LICENSE("GPL");
2054MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
2055MODULE_DESCRIPTION("Pohmel filesystem");
diff --git a/drivers/staging/pohmelfs/lock.c b/drivers/staging/pohmelfs/lock.c
deleted file mode 100644
index 6710114cd425..000000000000
--- a/drivers/staging/pohmelfs/lock.c
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/fs.h>
19#include <linux/fsnotify.h>
20#include <linux/mempool.h>
21
22#include "netfs.h"
23
24static int pohmelfs_send_lock_trans(struct pohmelfs_inode *pi,
25 u64 id, u64 start, u32 size, int type)
26{
27 struct inode *inode = &pi->vfs_inode;
28 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
29 struct netfs_trans *t;
30 struct netfs_cmd *cmd;
31 int path_len, err;
32 void *data;
33 struct netfs_lock *l;
34 int isize = (type & POHMELFS_LOCK_GRAB) ? 0 : sizeof(struct netfs_inode_info);
35
36 err = pohmelfs_path_length(pi);
37 if (err < 0)
38 goto err_out_exit;
39
40 path_len = err;
41
42 err = -ENOMEM;
43 t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize,
44 NETFS_TRANS_SINGLE_DST, 0);
45 if (!t)
46 goto err_out_exit;
47
48 cmd = netfs_trans_current(t);
49 data = cmd + 1;
50
51 err = pohmelfs_construct_path_string(pi, data, path_len);
52 if (err < 0)
53 goto err_out_free;
54 path_len = err;
55
56 l = data + path_len;
57
58 l->start = start;
59 l->size = size;
60 l->type = type;
61 l->ino = pi->ino;
62
63 cmd->cmd = NETFS_LOCK;
64 cmd->start = 0;
65 cmd->id = id;
66 cmd->size = sizeof(struct netfs_lock) + path_len + isize;
67 cmd->ext = path_len;
68 cmd->csize = 0;
69
70 netfs_convert_cmd(cmd);
71 netfs_convert_lock(l);
72
73 if (isize) {
74 struct netfs_inode_info *info = (struct netfs_inode_info *)(l + 1);
75
76 info->mode = inode->i_mode;
77 info->nlink = inode->i_nlink;
78 info->uid = inode->i_uid;
79 info->gid = inode->i_gid;
80 info->blocks = inode->i_blocks;
81 info->rdev = inode->i_rdev;
82 info->size = inode->i_size;
83 info->version = inode->i_version;
84
85 netfs_convert_inode_info(info);
86 }
87
88 netfs_trans_update(cmd, t, path_len + sizeof(struct netfs_lock) + isize);
89
90 return netfs_trans_finish(t, psb);
91
92err_out_free:
93 netfs_trans_free(t);
94err_out_exit:
95 printk("%s: err: %d.\n", __func__, err);
96 return err;
97}
98
99int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
100{
101 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
102 struct pohmelfs_mcache *m;
103 int err = -ENOMEM;
104 struct iattr iattr;
105 struct inode *inode = &pi->vfs_inode;
106
107 dprintk("%s: %p: ino: %llu, start: %llu, size: %u, "
108 "type: %d, locked as: %d, owned: %d.\n",
109 __func__, &pi->vfs_inode, pi->ino,
110 start, size, type, pi->lock_type,
111 !!test_bit(NETFS_INODE_OWNED, &pi->state));
112
113 if (!pohmelfs_need_lock(pi, type))
114 return 0;
115
116 m = pohmelfs_mcache_alloc(psb, start, size, NULL);
117 if (IS_ERR(m))
118 return PTR_ERR(m);
119
120 err = pohmelfs_send_lock_trans(pi, m->gen, start, size,
121 type | POHMELFS_LOCK_GRAB);
122 if (err)
123 goto err_out_put;
124
125 err = wait_for_completion_timeout(&m->complete, psb->mcache_timeout);
126 if (err)
127 err = m->err;
128 else
129 err = -ETIMEDOUT;
130
131 if (err) {
132 printk("%s: %p: ino: %llu, mgen: %llu, start: %llu, size: %u, err: %d.\n",
133 __func__, &pi->vfs_inode, pi->ino, m->gen, start, size, err);
134 }
135
136 if (err && (err != -ENOENT))
137 goto err_out_put;
138
139 if (!err) {
140 netfs_convert_inode_info(&m->info);
141
142 iattr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_SIZE | ATTR_ATIME;
143 iattr.ia_mode = m->info.mode;
144 iattr.ia_uid = m->info.uid;
145 iattr.ia_gid = m->info.gid;
146 iattr.ia_size = m->info.size;
147 iattr.ia_atime = CURRENT_TIME;
148
149 dprintk("%s: %p: ino: %llu, mgen: %llu, start: %llu, isize: %llu -> %llu.\n",
150 __func__, &pi->vfs_inode, pi->ino, m->gen, start, inode->i_size, m->info.size);
151
152 err = pohmelfs_setattr_raw(inode, &iattr);
153 if (!err) {
154 struct dentry *dentry = d_find_alias(inode);
155 if (dentry) {
156 fsnotify_change(dentry, iattr.ia_valid);
157 dput(dentry);
158 }
159 }
160 }
161
162 pi->lock_type = type;
163 set_bit(NETFS_INODE_OWNED, &pi->state);
164
165 pohmelfs_mcache_put(psb, m);
166
167 return 0;
168
169err_out_put:
170 pohmelfs_mcache_put(psb, m);
171 return err;
172}
173
174int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
175{
176 dprintk("%s: %p: ino: %llu, start: %llu, size: %u, type: %d.\n",
177 __func__, &pi->vfs_inode, pi->ino, start, size, type);
178 pi->lock_type = 0;
179 clear_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state);
180 clear_bit(NETFS_INODE_OWNED, &pi->state);
181 return pohmelfs_send_lock_trans(pi, pi->ino, start, size, type);
182}
diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
deleted file mode 100644
index e22665cdd16c..000000000000
--- a/drivers/staging/pohmelfs/mcache.c
+++ /dev/null
@@ -1,171 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19
20#include "netfs.h"
21
22static struct kmem_cache *pohmelfs_mcache_cache;
23static mempool_t *pohmelfs_mcache_pool;
24
25static inline int pohmelfs_mcache_cmp(u64 gen, u64 new)
26{
27 if (gen < new)
28 return 1;
29 if (gen > new)
30 return -1;
31 return 0;
32}
33
34struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen)
35{
36 struct rb_root *root = &psb->mcache_root;
37 struct rb_node *n = root->rb_node;
38 struct pohmelfs_mcache *tmp, *ret = NULL;
39 int cmp;
40
41 while (n) {
42 tmp = rb_entry(n, struct pohmelfs_mcache, mcache_entry);
43
44 cmp = pohmelfs_mcache_cmp(tmp->gen, gen);
45 if (cmp < 0)
46 n = n->rb_left;
47 else if (cmp > 0)
48 n = n->rb_right;
49 else {
50 ret = tmp;
51 pohmelfs_mcache_get(ret);
52 break;
53 }
54 }
55
56 return ret;
57}
58
59static int pohmelfs_mcache_insert(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
60{
61 struct rb_root *root = &psb->mcache_root;
62 struct rb_node **n = &root->rb_node, *parent = NULL;
63 struct pohmelfs_mcache *ret = NULL, *tmp;
64 int cmp;
65
66 while (*n) {
67 parent = *n;
68
69 tmp = rb_entry(parent, struct pohmelfs_mcache, mcache_entry);
70
71 cmp = pohmelfs_mcache_cmp(tmp->gen, m->gen);
72 if (cmp < 0)
73 n = &parent->rb_left;
74 else if (cmp > 0)
75 n = &parent->rb_right;
76 else {
77 ret = tmp;
78 break;
79 }
80 }
81
82 if (ret)
83 return -EEXIST;
84
85 rb_link_node(&m->mcache_entry, parent, n);
86 rb_insert_color(&m->mcache_entry, root);
87
88 return 0;
89}
90
91static int pohmelfs_mcache_remove(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
92{
93 if (m && m->mcache_entry.rb_parent_color) {
94 rb_erase(&m->mcache_entry, &psb->mcache_root);
95 m->mcache_entry.rb_parent_color = 0;
96 return 1;
97 }
98 return 0;
99}
100
101void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
102{
103 mutex_lock(&psb->mcache_lock);
104 pohmelfs_mcache_remove(psb, m);
105 mutex_unlock(&psb->mcache_lock);
106}
107
108struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
109 unsigned int size, void *data)
110{
111 struct pohmelfs_mcache *m;
112 int err = -ENOMEM;
113
114 m = mempool_alloc(pohmelfs_mcache_pool, GFP_KERNEL);
115 if (!m)
116 goto err_out_exit;
117
118 init_completion(&m->complete);
119 m->err = 0;
120 atomic_set(&m->refcnt, 1);
121 m->data = data;
122 m->start = start;
123 m->size = size;
124 m->gen = atomic_long_inc_return(&psb->mcache_gen);
125
126 mutex_lock(&psb->mcache_lock);
127 err = pohmelfs_mcache_insert(psb, m);
128 mutex_unlock(&psb->mcache_lock);
129 if (err)
130 goto err_out_free;
131
132 return m;
133
134err_out_free:
135 mempool_free(m, pohmelfs_mcache_pool);
136err_out_exit:
137 return ERR_PTR(err);
138}
139
140void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
141{
142 pohmelfs_mcache_remove_locked(psb, m);
143
144 mempool_free(m, pohmelfs_mcache_pool);
145}
146
147int __init pohmelfs_mcache_init(void)
148{
149 pohmelfs_mcache_cache = kmem_cache_create("pohmelfs_mcache_cache",
150 sizeof(struct pohmelfs_mcache),
151 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), NULL);
152 if (!pohmelfs_mcache_cache)
153 goto err_out_exit;
154
155 pohmelfs_mcache_pool = mempool_create_slab_pool(256, pohmelfs_mcache_cache);
156 if (!pohmelfs_mcache_pool)
157 goto err_out_free;
158
159 return 0;
160
161err_out_free:
162 kmem_cache_destroy(pohmelfs_mcache_cache);
163err_out_exit:
164 return -ENOMEM;
165}
166
167void pohmelfs_mcache_exit(void)
168{
169 mempool_destroy(pohmelfs_mcache_pool);
170 kmem_cache_destroy(pohmelfs_mcache_cache);
171}
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
deleted file mode 100644
index b2e918622088..000000000000
--- a/drivers/staging/pohmelfs/net.c
+++ /dev/null
@@ -1,1209 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/fsnotify.h>
17#include <linux/jhash.h>
18#include <linux/in.h>
19#include <linux/in6.h>
20#include <linux/kthread.h>
21#include <linux/pagemap.h>
22#include <linux/poll.h>
23#include <linux/slab.h>
24#include <linux/swap.h>
25#include <linux/syscalls.h>
26#include <linux/vmalloc.h>
27
28#include "netfs.h"
29
30/*
31 * Async machinery lives here.
32 * All commands being sent to server do _not_ require sync reply,
33 * instead, if it is really needed, like readdir or readpage, caller
34 * sleeps waiting for data, which will be placed into provided buffer
35 * and caller will be awakened.
36 *
37 * Every command response can come without some listener. For example
38 * readdir response will add new objects into cache without appropriate
39 * request from userspace. This is used in cache coherency.
40 *
41 * If object is not found for given data, it is discarded.
42 *
43 * All requests are received by dedicated kernel thread.
44 */
45
46/*
47 * Basic network sending/receiving functions.
48 * Blocked mode is used.
49 */
50static int netfs_data_recv(struct netfs_state *st, void *buf, u64 size)
51{
52 struct msghdr msg;
53 struct kvec iov;
54 int err;
55
56 BUG_ON(!size);
57
58 iov.iov_base = buf;
59 iov.iov_len = size;
60
61 msg.msg_iov = (struct iovec *)&iov;
62 msg.msg_iovlen = 1;
63 msg.msg_name = NULL;
64 msg.msg_namelen = 0;
65 msg.msg_control = NULL;
66 msg.msg_controllen = 0;
67 msg.msg_flags = MSG_DONTWAIT;
68
69 err = kernel_recvmsg(st->socket, &msg, &iov, 1, iov.iov_len,
70 msg.msg_flags);
71 if (err <= 0) {
72 printk("%s: failed to recv data: size: %llu, err: %d.\n", __func__, size, err);
73 if (err == 0)
74 err = -ECONNRESET;
75 }
76
77 return err;
78}
79
80static int pohmelfs_data_recv(struct netfs_state *st, void *data, unsigned int size)
81{
82 unsigned int revents = 0;
83 unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
84 unsigned int mask = err_mask | POLLIN;
85 int err = 0;
86
87 while (size && !err) {
88 revents = netfs_state_poll(st);
89
90 if (!(revents & mask)) {
91 DEFINE_WAIT(wait);
92
93 for (;;) {
94 prepare_to_wait(&st->thread_wait, &wait, TASK_INTERRUPTIBLE);
95 if (kthread_should_stop())
96 break;
97
98 revents = netfs_state_poll(st);
99
100 if (revents & mask)
101 break;
102
103 if (signal_pending(current))
104 break;
105
106 schedule();
107 continue;
108 }
109 finish_wait(&st->thread_wait, &wait);
110 }
111
112 err = 0;
113 netfs_state_lock(st);
114 if (st->socket && (st->read_socket == st->socket) && (revents & POLLIN)) {
115 err = netfs_data_recv(st, data, size);
116 if (err > 0) {
117 data += err;
118 size -= err;
119 err = 0;
120 } else if (err == 0)
121 err = -ECONNRESET;
122 }
123
124 if (revents & err_mask) {
125 printk("%s: revents: %x, socket: %p, size: %u, err: %d.\n",
126 __func__, revents, st->socket, size, err);
127 err = -ECONNRESET;
128 }
129 netfs_state_unlock(st);
130
131 if (err < 0) {
132 if (netfs_state_trylock_send(st)) {
133 netfs_state_exit(st);
134 err = netfs_state_init(st);
135 if (!err)
136 err = -EAGAIN;
137 netfs_state_unlock_send(st);
138 } else {
139 st->need_reset = 1;
140 }
141 }
142
143 if (kthread_should_stop())
144 err = -ENODEV;
145
146 if (err)
147 printk("%s: socket: %p, read_socket: %p, revents: %x, rev_error: %d, "
148 "should_stop: %d, size: %u, err: %d.\n",
149 __func__, st->socket, st->read_socket,
150 revents, revents & err_mask, kthread_should_stop(), size, err);
151 }
152
153 return err;
154}
155
156int pohmelfs_data_recv_and_check(struct netfs_state *st, void *data, unsigned int size)
157{
158 struct netfs_cmd *cmd = &st->cmd;
159 int err;
160
161 err = pohmelfs_data_recv(st, data, size);
162 if (err)
163 return err;
164
165 return pohmelfs_crypto_process_input_data(&st->eng, cmd->iv, data, NULL, size);
166}
167
168/*
169 * Polling machinery.
170 */
171
172struct netfs_poll_helper {
173 poll_table pt;
174 struct netfs_state *st;
175};
176
177static int netfs_queue_wake(wait_queue_t *wait, unsigned mode, int sync, void *key)
178{
179 struct netfs_state *st = container_of(wait, struct netfs_state, wait);
180
181 wake_up(&st->thread_wait);
182 return 1;
183}
184
185static void netfs_queue_func(struct file *file, wait_queue_head_t *whead,
186 poll_table *pt)
187{
188 struct netfs_state *st = container_of(pt, struct netfs_poll_helper, pt)->st;
189
190 st->whead = whead;
191 init_waitqueue_func_entry(&st->wait, netfs_queue_wake);
192 add_wait_queue(whead, &st->wait);
193}
194
195static void netfs_poll_exit(struct netfs_state *st)
196{
197 if (st->whead) {
198 remove_wait_queue(st->whead, &st->wait);
199 st->whead = NULL;
200 }
201}
202
203static int netfs_poll_init(struct netfs_state *st)
204{
205 struct netfs_poll_helper ph;
206
207 ph.st = st;
208 init_poll_funcptr(&ph.pt, &netfs_queue_func);
209
210 st->socket->ops->poll(NULL, st->socket, &ph.pt);
211 return 0;
212}
213
214/*
215 * Get response for readpage command. We search inode and page in its mapping
216 * and copy data into. If it was async request, then we queue page into shared
217 * data and wakeup listener, who will copy it to userspace.
218 *
219 * There is a work in progress of allowing to call copy_to_user() directly from
220 * async receiving kernel thread.
221 */
222static int pohmelfs_read_page_response(struct netfs_state *st)
223{
224 struct pohmelfs_sb *psb = st->psb;
225 struct netfs_cmd *cmd = &st->cmd;
226 struct inode *inode;
227 struct page *page;
228 int err = 0;
229
230 if (cmd->size > PAGE_CACHE_SIZE) {
231 err = -EINVAL;
232 goto err_out_exit;
233 }
234
235 inode = ilookup(st->psb->sb, cmd->id);
236 if (!inode) {
237 printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
238 err = -ENOENT;
239 goto err_out_exit;
240 }
241
242 page = find_get_page(inode->i_mapping, cmd->start >> PAGE_CACHE_SHIFT);
243 if (!page || !PageLocked(page)) {
244 printk("%s: failed to find/lock page: page: %p, id: %llu, start: %llu, index: %llu.\n",
245 __func__, page, cmd->id, cmd->start, cmd->start >> PAGE_CACHE_SHIFT);
246
247 while (cmd->size) {
248 unsigned int sz = min(cmd->size, st->size);
249
250 err = pohmelfs_data_recv(st, st->data, sz);
251 if (err)
252 break;
253
254 cmd->size -= sz;
255 }
256
257 err = -ENODEV;
258 if (page)
259 goto err_out_page_put;
260 goto err_out_put;
261 }
262
263 if (cmd->size) {
264 void *addr;
265
266 addr = kmap(page);
267 err = pohmelfs_data_recv(st, addr, cmd->size);
268 kunmap(page);
269
270 if (err)
271 goto err_out_page_unlock;
272 }
273
274 dprintk("%s: page: %p, start: %llu, size: %u, locked: %d.\n",
275 __func__, page, cmd->start, cmd->size, PageLocked(page));
276
277 SetPageChecked(page);
278 if ((psb->hash_string || psb->cipher_string) && psb->perform_crypto && cmd->size) {
279 err = pohmelfs_crypto_process_input_page(&st->eng, page, cmd->size, cmd->iv);
280 if (err < 0)
281 goto err_out_page_unlock;
282 } else {
283 SetPageUptodate(page);
284 unlock_page(page);
285 page_cache_release(page);
286 }
287
288 pohmelfs_put_inode(POHMELFS_I(inode));
289 wake_up(&st->psb->wait);
290
291 return 0;
292
293err_out_page_unlock:
294 SetPageError(page);
295 unlock_page(page);
296err_out_page_put:
297 page_cache_release(page);
298err_out_put:
299 pohmelfs_put_inode(POHMELFS_I(inode));
300err_out_exit:
301 wake_up(&st->psb->wait);
302 return err;
303}
304
305static int pohmelfs_check_name(struct pohmelfs_inode *parent, struct qstr *str,
306 struct netfs_inode_info *info)
307{
308 struct inode *inode;
309 struct pohmelfs_name *n;
310 int err = 0;
311 u64 ino = 0;
312
313 mutex_lock(&parent->offset_lock);
314 n = pohmelfs_search_hash(parent, str->hash);
315 if (n)
316 ino = n->ino;
317 mutex_unlock(&parent->offset_lock);
318
319 if (!ino)
320 goto out;
321
322 inode = ilookup(parent->vfs_inode.i_sb, ino);
323 if (!inode)
324 goto out;
325
326 dprintk("%s: parent: %llu, inode: %llu.\n", __func__, parent->ino, ino);
327
328 pohmelfs_fill_inode(inode, info);
329 pohmelfs_put_inode(POHMELFS_I(inode));
330 err = -EEXIST;
331out:
332 return err;
333}
334
335/*
336 * Readdir response from server. If special field is set, we wakeup
337 * listener (readdir() call), which will copy data to userspace.
338 */
339static int pohmelfs_readdir_response(struct netfs_state *st)
340{
341 struct inode *inode;
342 struct netfs_cmd *cmd = &st->cmd;
343 struct netfs_inode_info *info;
344 struct pohmelfs_inode *parent = NULL, *npi;
345 int err = 0, last = cmd->ext;
346 struct qstr str;
347
348 if (cmd->size > st->size)
349 return -EINVAL;
350
351 inode = ilookup(st->psb->sb, cmd->id);
352 if (!inode) {
353 printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
354 return -ENOENT;
355 }
356 parent = POHMELFS_I(inode);
357
358 if (!cmd->size && cmd->start) {
359 err = -cmd->start;
360 goto out;
361 }
362
363 if (cmd->size) {
364 char *name;
365
366 err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
367 if (err)
368 goto err_out_put;
369
370 info = (struct netfs_inode_info *)(st->data);
371
372 name = (char *)(info + 1);
373 str.len = cmd->size - sizeof(struct netfs_inode_info) - 1 - cmd->cpad;
374 name[str.len] = 0;
375 str.name = name;
376 str.hash = jhash(str.name, str.len, 0);
377
378 netfs_convert_inode_info(info);
379
380 if (parent) {
381 err = pohmelfs_check_name(parent, &str, info);
382 if (err) {
383 if (err == -EEXIST)
384 err = 0;
385 goto out;
386 }
387 }
388
389 info->ino = cmd->start;
390 if (!info->ino)
391 info->ino = pohmelfs_new_ino(st->psb);
392
393 dprintk("%s: parent: %llu, ino: %llu, name: '%s', hash: %x, len: %u, mode: %o.\n",
394 __func__, parent->ino, info->ino, str.name, str.hash, str.len,
395 info->mode);
396
397 npi = pohmelfs_new_inode(st->psb, parent, &str, info, 0);
398 if (IS_ERR(npi)) {
399 err = PTR_ERR(npi);
400
401 if (err != -EEXIST)
402 goto err_out_put;
403 } else {
404 struct dentry *dentry, *alias, *pd;
405
406 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
407 clear_bit(NETFS_INODE_OWNED, &npi->state);
408
409 pd = d_find_alias(&parent->vfs_inode);
410 if (pd) {
411 str.hash = full_name_hash(str.name, str.len);
412 dentry = d_alloc(pd, &str);
413 if (dentry) {
414 alias = d_materialise_unique(dentry, &npi->vfs_inode);
415 if (alias)
416 dput(alias);
417 }
418
419 dput(dentry);
420 dput(pd);
421 }
422 }
423 }
424out:
425 if (last) {
426 set_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state);
427 set_bit(NETFS_INODE_REMOTE_SYNCED, &parent->state);
428 wake_up(&st->psb->wait);
429 }
430 pohmelfs_put_inode(parent);
431
432 return err;
433
434err_out_put:
435 clear_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state);
436 printk("%s: parent: %llu, ino: %llu, cmd_id: %llu.\n", __func__, parent->ino, cmd->start, cmd->id);
437 pohmelfs_put_inode(parent);
438 wake_up(&st->psb->wait);
439 return err;
440}
441
442/*
443 * Lookup command response.
444 * It searches for inode to be looked at (if it exists) and substitutes
445 * its inode information (size, permission, mode and so on), if inode does
446 * not exist, new one will be created and inserted into caches.
447 */
448static int pohmelfs_lookup_response(struct netfs_state *st)
449{
450 struct inode *inode = NULL;
451 struct netfs_cmd *cmd = &st->cmd;
452 struct netfs_inode_info *info;
453 struct pohmelfs_inode *parent = NULL, *npi;
454 int err = -EINVAL;
455 char *name;
456
457 inode = ilookup(st->psb->sb, cmd->id);
458 if (!inode) {
459 printk("%s: lookup response: id: %llu, start: %llu, size: %u.\n",
460 __func__, cmd->id, cmd->start, cmd->size);
461 err = -ENOENT;
462 goto err_out_exit;
463 }
464 parent = POHMELFS_I(inode);
465
466 if (!cmd->size) {
467 err = -cmd->start;
468 goto err_out_put;
469 }
470
471 if (cmd->size < sizeof(struct netfs_inode_info)) {
472 printk("%s: broken lookup response: id: %llu, start: %llu, size: %u.\n",
473 __func__, cmd->id, cmd->start, cmd->size);
474 err = -EINVAL;
475 goto err_out_put;
476 }
477
478 err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
479 if (err)
480 goto err_out_put;
481
482 info = (struct netfs_inode_info *)(st->data);
483 name = (char *)(info + 1);
484
485 netfs_convert_inode_info(info);
486
487 info->ino = cmd->start;
488 if (!info->ino)
489 info->ino = pohmelfs_new_ino(st->psb);
490
491 dprintk("%s: parent: %llu, ino: %llu, name: '%s', start: %llu.\n",
492 __func__, parent->ino, info->ino, name, cmd->start);
493
494 if (cmd->start)
495 npi = pohmelfs_new_inode(st->psb, parent, NULL, info, 0);
496 else {
497 struct qstr str;
498
499 str.name = name;
500 str.len = cmd->size - sizeof(struct netfs_inode_info) - 1 - cmd->cpad;
501 str.hash = jhash(name, str.len, 0);
502
503 npi = pohmelfs_new_inode(st->psb, parent, &str, info, 0);
504 }
505 if (IS_ERR(npi)) {
506 err = PTR_ERR(npi);
507
508 if (err != -EEXIST)
509 goto err_out_put;
510 } else {
511 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
512 clear_bit(NETFS_INODE_OWNED, &npi->state);
513 }
514
515 clear_bit(NETFS_COMMAND_PENDING, &parent->state);
516 pohmelfs_put_inode(parent);
517
518 wake_up(&st->psb->wait);
519
520 return 0;
521
522err_out_put:
523 pohmelfs_put_inode(parent);
524err_out_exit:
525 clear_bit(NETFS_COMMAND_PENDING, &parent->state);
526 wake_up(&st->psb->wait);
527 printk("%s: inode: %p, id: %llu, start: %llu, size: %u, err: %d.\n",
528 __func__, inode, cmd->id, cmd->start, cmd->size, err);
529 return err;
530}
531
532/*
533 * Create response, just marks local inode as 'created', so that writeback
534 * for any of its children (or own) would not try to sync it again.
535 */
536static int pohmelfs_create_response(struct netfs_state *st)
537{
538 struct inode *inode;
539 struct netfs_cmd *cmd = &st->cmd;
540 struct pohmelfs_inode *pi;
541
542 inode = ilookup(st->psb->sb, cmd->id);
543 if (!inode) {
544 printk("%s: failed to find inode: id: %llu, start: %llu.\n",
545 __func__, cmd->id, cmd->start);
546 goto err_out_exit;
547 }
548
549 pi = POHMELFS_I(inode);
550
551 /*
552 * To lock or not to lock?
553 * We actually do not care if it races...
554 */
555 if (cmd->start)
556 make_bad_inode(inode);
557 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
558
559 pohmelfs_put_inode(pi);
560
561 wake_up(&st->psb->wait);
562 return 0;
563
564err_out_exit:
565 wake_up(&st->psb->wait);
566 return -ENOENT;
567}
568
569/*
570 * Object remove response. Just says that remove request has been received.
571 * Used in cache coherency protocol.
572 */
573static int pohmelfs_remove_response(struct netfs_state *st)
574{
575 struct netfs_cmd *cmd = &st->cmd;
576 int err;
577
578 err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
579 if (err)
580 return err;
581
582 dprintk("%s: parent: %llu, path: '%s'.\n", __func__, cmd->id, (char *)st->data);
583
584 return 0;
585}
586
587/*
588 * Transaction reply processing.
589 *
590 * Find transaction based on its generation number, bump its reference counter,
591 * so that none could free it under us, drop from the trees and lists and
592 * drop reference counter. When it hits zero (when all destinations replied
593 * and all timeout handled by async scanning code), completion will be called
594 * and transaction will be freed.
595 */
596static int pohmelfs_transaction_response(struct netfs_state *st)
597{
598 struct netfs_trans_dst *dst;
599 struct netfs_trans *t = NULL;
600 struct netfs_cmd *cmd = &st->cmd;
601 short err = (signed)cmd->ext;
602
603 mutex_lock(&st->trans_lock);
604 dst = netfs_trans_search(st, cmd->start);
605 if (dst) {
606 netfs_trans_remove_nolock(dst, st);
607 t = dst->trans;
608 }
609 mutex_unlock(&st->trans_lock);
610
611 if (!t) {
612 printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n",
613 __func__, cmd->start, cmd->id, cmd->size, cmd->ext);
614 err = -EINVAL;
615 goto out;
616 }
617
618 t->result = err;
619 netfs_trans_drop_dst_nostate(dst);
620
621out:
622 wake_up(&st->psb->wait);
623 return err;
624}
625
626/*
627 * Inode metadata cache coherency message.
628 */
629static int pohmelfs_page_cache_response(struct netfs_state *st)
630{
631 struct netfs_cmd *cmd = &st->cmd;
632 struct inode *inode;
633
634 dprintk("%s: st: %p, id: %llu, start: %llu, size: %u.\n", __func__, st, cmd->id, cmd->start, cmd->size);
635
636 inode = ilookup(st->psb->sb, cmd->id);
637 if (!inode) {
638 printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
639 return -ENOENT;
640 }
641
642 set_bit(NETFS_INODE_NEED_FLUSH, &POHMELFS_I(inode)->state);
643 pohmelfs_put_inode(POHMELFS_I(inode));
644
645 return 0;
646}
647
648/*
649 * Root capabilities response: export statistics
650 * like used and available size, number of files and dirs,
651 * permissions.
652 */
653static int pohmelfs_root_cap_response(struct netfs_state *st)
654{
655 struct netfs_cmd *cmd = &st->cmd;
656 struct netfs_root_capabilities *cap;
657 struct pohmelfs_sb *psb = st->psb;
658
659 if (cmd->size != sizeof(struct netfs_root_capabilities)) {
660 psb->flags = EPROTO;
661 wake_up(&psb->wait);
662 return -EPROTO;
663 }
664
665 cap = st->data;
666
667 netfs_convert_root_capabilities(cap);
668
669 if (psb->total_size < cap->used + cap->avail)
670 psb->total_size = cap->used + cap->avail;
671 if (cap->avail)
672 psb->avail_size = cap->avail;
673 psb->state_flags = cap->flags;
674
675 if (psb->state_flags & POHMELFS_FLAGS_RO) {
676 psb->sb->s_flags |= MS_RDONLY;
677 printk(KERN_INFO "Mounting POHMELFS (%d) read-only.\n", psb->idx);
678 }
679
680 if (psb->state_flags & POHMELFS_FLAGS_XATTR)
681 printk(KERN_INFO "Mounting POHMELFS (%d) "
682 "with extended attributes support.\n", psb->idx);
683
684 if (atomic_long_read(&psb->total_inodes) <= 1)
685 atomic_long_set(&psb->total_inodes, cap->nr_files);
686
687 dprintk("%s: total: %llu, avail: %llu, flags: %llx, inodes: %llu.\n",
688 __func__, psb->total_size, psb->avail_size, psb->state_flags, cap->nr_files);
689
690 psb->flags = 0;
691 wake_up(&psb->wait);
692 return 0;
693}
694
695/*
696 * Crypto capabilities of the server, where it says that
697 * it supports or does not requested hash/cipher algorithms.
698 */
699static int pohmelfs_crypto_cap_response(struct netfs_state *st)
700{
701 struct netfs_cmd *cmd = &st->cmd;
702 struct netfs_crypto_capabilities *cap;
703 struct pohmelfs_sb *psb = st->psb;
704 int err = 0;
705
706 if (cmd->size != sizeof(struct netfs_crypto_capabilities)) {
707 psb->flags = EPROTO;
708 wake_up(&psb->wait);
709 return -EPROTO;
710 }
711
712 cap = st->data;
713
714 dprintk("%s: cipher '%s': %s, hash: '%s': %s.\n",
715 __func__,
716 psb->cipher_string, (cap->cipher_strlen) ? "SUPPORTED" : "NOT SUPPORTED",
717 psb->hash_string, (cap->hash_strlen) ? "SUPPORTED" : "NOT SUPPORTED");
718
719 if (!cap->hash_strlen) {
720 if (psb->hash_strlen && psb->crypto_fail_unsupported)
721 err = -ENOTSUPP;
722 psb->hash_strlen = 0;
723 kfree(psb->hash_string);
724 psb->hash_string = NULL;
725 }
726
727 if (!cap->cipher_strlen) {
728 if (psb->cipher_strlen && psb->crypto_fail_unsupported)
729 err = -ENOTSUPP;
730 psb->cipher_strlen = 0;
731 kfree(psb->cipher_string);
732 psb->cipher_string = NULL;
733 }
734
735 return err;
736}
737
738/*
739 * Capabilities handshake response.
740 */
741static int pohmelfs_capabilities_response(struct netfs_state *st)
742{
743 struct netfs_cmd *cmd = &st->cmd;
744 int err = 0;
745
746 err = pohmelfs_data_recv(st, st->data, cmd->size);
747 if (err)
748 return err;
749
750 switch (cmd->id) {
751 case POHMELFS_CRYPTO_CAPABILITIES:
752 return pohmelfs_crypto_cap_response(st);
753 case POHMELFS_ROOT_CAPABILITIES:
754 return pohmelfs_root_cap_response(st);
755 default:
756 break;
757 }
758 return -EINVAL;
759}
760
761/*
762 * Receiving extended attribute.
763 * Does not work properly if received size is more than requested one,
764 * it should not happen with current request/reply model though.
765 */
766static int pohmelfs_getxattr_response(struct netfs_state *st)
767{
768 struct pohmelfs_sb *psb = st->psb;
769 struct netfs_cmd *cmd = &st->cmd;
770 struct pohmelfs_mcache *m;
771 short error = (signed short)cmd->ext, err;
772 unsigned int sz, total_size;
773
774 m = pohmelfs_mcache_search(psb, cmd->id);
775
776 dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
777 __func__, cmd->id, (m) ? m->gen : 0, error);
778
779 if (!m) {
780 printk("%s: failed to find getxattr cache entry: id: %llu.\n", __func__, cmd->id);
781 return -ENOENT;
782 }
783
784 if (cmd->size) {
785 sz = min_t(unsigned int, cmd->size, m->size);
786 err = pohmelfs_data_recv_and_check(st, m->data, sz);
787 if (err) {
788 error = err;
789 goto out;
790 }
791
792 m->size = sz;
793 total_size = cmd->size - sz;
794
795 while (total_size) {
796 sz = min(total_size, st->size);
797
798 err = pohmelfs_data_recv_and_check(st, st->data, sz);
799 if (err) {
800 error = err;
801 break;
802 }
803
804 total_size -= sz;
805 }
806 }
807
808out:
809 m->err = error;
810 complete(&m->complete);
811 pohmelfs_mcache_put(psb, m);
812
813 return error;
814}
815
816int pohmelfs_data_lock_response(struct netfs_state *st)
817{
818 struct pohmelfs_sb *psb = st->psb;
819 struct netfs_cmd *cmd = &st->cmd;
820 struct pohmelfs_mcache *m;
821 short err = (signed short)cmd->ext;
822 u64 id = cmd->id;
823
824 m = pohmelfs_mcache_search(psb, id);
825
826 dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
827 __func__, cmd->id, (m) ? m->gen : 0, err);
828
829 if (!m) {
830 pohmelfs_data_recv(st, st->data, cmd->size);
831 printk("%s: failed to find data lock response: id: %llu.\n", __func__, cmd->id);
832 return -ENOENT;
833 }
834
835 if (cmd->size)
836 err = pohmelfs_data_recv_and_check(st, &m->info, cmd->size);
837
838 m->err = err;
839 complete(&m->complete);
840 pohmelfs_mcache_put(psb, m);
841
842 return err;
843}
844
845static void __inline__ netfs_state_reset(struct netfs_state *st)
846{
847 netfs_state_lock_send(st);
848 netfs_state_exit(st);
849 netfs_state_init(st);
850 netfs_state_unlock_send(st);
851}
852
853/*
854 * Main receiving function, called from dedicated kernel thread.
855 */
856static int pohmelfs_recv(void *data)
857{
858 int err = -EINTR;
859 struct netfs_state *st = data;
860 struct netfs_cmd *cmd = &st->cmd;
861
862 while (!kthread_should_stop()) {
863 /*
864 * If socket will be reset after this statement, then
865 * pohmelfs_data_recv() will just fail and loop will
866 * start again, so it can be done without any locks.
867 *
868 * st->read_socket is needed to prevents state machine
869 * breaking between this data reading and subsequent one
870 * in protocol specific functions during connection reset.
871 * In case of reset we have to read next command and do
872 * not expect data for old command to magically appear in
873 * new connection.
874 */
875 st->read_socket = st->socket;
876 err = pohmelfs_data_recv(st, cmd, sizeof(struct netfs_cmd));
877 if (err) {
878 msleep(1000);
879 continue;
880 }
881
882 netfs_convert_cmd(cmd);
883
884 dprintk("%s: cmd: %u, id: %llu, start: %llu, size: %u, "
885 "ext: %u, csize: %u, cpad: %u.\n",
886 __func__, cmd->cmd, cmd->id, cmd->start,
887 cmd->size, cmd->ext, cmd->csize, cmd->cpad);
888
889 if (cmd->csize) {
890 struct pohmelfs_crypto_engine *e = &st->eng;
891
892 if (unlikely(cmd->csize > e->size/2)) {
893 netfs_state_reset(st);
894 continue;
895 }
896
897 if (e->hash && unlikely(cmd->csize != st->psb->crypto_attached_size)) {
898 dprintk("%s: cmd: cmd: %u, id: %llu, start: %llu, size: %u, "
899 "csize: %u != digest size %u.\n",
900 __func__, cmd->cmd, cmd->id, cmd->start, cmd->size,
901 cmd->csize, st->psb->crypto_attached_size);
902 netfs_state_reset(st);
903 continue;
904 }
905
906 err = pohmelfs_data_recv(st, e->data, cmd->csize);
907 if (err) {
908 netfs_state_reset(st);
909 continue;
910 }
911
912#ifdef CONFIG_POHMELFS_DEBUG
913 {
914 unsigned int i;
915 unsigned char *hash = e->data;
916
917 dprintk("%s: received hash: ", __func__);
918 for (i = 0; i < cmd->csize; ++i)
919 printk("%02x ", hash[i]);
920
921 printk("\n");
922 }
923#endif
924 cmd->size -= cmd->csize;
925 }
926
927 /*
928 * This should catch protocol breakage and random garbage instead of commands.
929 */
930 if (unlikely((cmd->size > st->size) && (cmd->cmd != NETFS_XATTR_GET))) {
931 netfs_state_reset(st);
932 continue;
933 }
934
935 switch (cmd->cmd) {
936 case NETFS_READ_PAGE:
937 err = pohmelfs_read_page_response(st);
938 break;
939 case NETFS_READDIR:
940 err = pohmelfs_readdir_response(st);
941 break;
942 case NETFS_LOOKUP:
943 err = pohmelfs_lookup_response(st);
944 break;
945 case NETFS_CREATE:
946 err = pohmelfs_create_response(st);
947 break;
948 case NETFS_REMOVE:
949 err = pohmelfs_remove_response(st);
950 break;
951 case NETFS_TRANS:
952 err = pohmelfs_transaction_response(st);
953 break;
954 case NETFS_PAGE_CACHE:
955 err = pohmelfs_page_cache_response(st);
956 break;
957 case NETFS_CAPABILITIES:
958 err = pohmelfs_capabilities_response(st);
959 break;
960 case NETFS_LOCK:
961 err = pohmelfs_data_lock_response(st);
962 break;
963 case NETFS_XATTR_GET:
964 err = pohmelfs_getxattr_response(st);
965 break;
966 default:
967 printk("%s: wrong cmd: %u, id: %llu, start: %llu, size: %u, ext: %u.\n",
968 __func__, cmd->cmd, cmd->id, cmd->start, cmd->size, cmd->ext);
969 netfs_state_reset(st);
970 break;
971 }
972 }
973
974 while (!kthread_should_stop())
975 schedule_timeout_uninterruptible(msecs_to_jiffies(10));
976
977 return err;
978}
979
980int netfs_state_init(struct netfs_state *st)
981{
982 int err;
983 struct pohmelfs_ctl *ctl = &st->ctl;
984
985 err = sock_create(ctl->addr.sa_family, ctl->type, ctl->proto, &st->socket);
986 if (err) {
987 printk("%s: failed to create a socket: family: %d, type: %d, proto: %d, err: %d.\n",
988 __func__, ctl->addr.sa_family, ctl->type, ctl->proto, err);
989 goto err_out_exit;
990 }
991
992 st->socket->sk->sk_allocation = GFP_NOIO;
993 st->socket->sk->sk_sndtimeo = st->socket->sk->sk_rcvtimeo = msecs_to_jiffies(60000);
994
995 err = kernel_connect(st->socket, (struct sockaddr *)&ctl->addr, ctl->addrlen, 0);
996 if (err) {
997 printk("%s: failed to connect to server: idx: %u, err: %d.\n",
998 __func__, st->psb->idx, err);
999 goto err_out_release;
1000 }
1001 st->socket->sk->sk_sndtimeo = st->socket->sk->sk_rcvtimeo = msecs_to_jiffies(60000);
1002
1003 err = netfs_poll_init(st);
1004 if (err)
1005 goto err_out_release;
1006
1007 if (st->socket->ops->family == AF_INET) {
1008 struct sockaddr_in *sin = (struct sockaddr_in *)&ctl->addr;
1009 printk(KERN_INFO "%s: (re)connected to peer %pi4:%d.\n", __func__,
1010 &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1011 } else if (st->socket->ops->family == AF_INET6) {
1012 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&ctl->addr;
1013 printk(KERN_INFO "%s: (re)connected to peer %pi6:%d", __func__,
1014 &sin->sin6_addr, ntohs(sin->sin6_port));
1015 }
1016
1017 return 0;
1018
1019err_out_release:
1020 sock_release(st->socket);
1021err_out_exit:
1022 st->socket = NULL;
1023 return err;
1024}
1025
1026void netfs_state_exit(struct netfs_state *st)
1027{
1028 if (st->socket) {
1029 netfs_poll_exit(st);
1030 st->socket->ops->shutdown(st->socket, 2);
1031
1032 if (st->socket->ops->family == AF_INET) {
1033 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
1034 printk(KERN_INFO "%s: disconnected from peer %pi4:%d.\n", __func__,
1035 &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1036 } else if (st->socket->ops->family == AF_INET6) {
1037 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1038 printk(KERN_INFO "%s: disconnected from peer %pi6:%d", __func__,
1039 &sin->sin6_addr, ntohs(sin->sin6_port));
1040 }
1041
1042 sock_release(st->socket);
1043 st->socket = NULL;
1044 st->read_socket = NULL;
1045 st->need_reset = 0;
1046 }
1047}
1048
1049int pohmelfs_state_init_one(struct pohmelfs_sb *psb, struct pohmelfs_config *conf)
1050{
1051 struct netfs_state *st = &conf->state;
1052 int err = -ENOMEM;
1053
1054 mutex_init(&st->__state_lock);
1055 mutex_init(&st->__state_send_lock);
1056 init_waitqueue_head(&st->thread_wait);
1057
1058 st->psb = psb;
1059 st->trans_root = RB_ROOT;
1060 mutex_init(&st->trans_lock);
1061
1062 st->size = psb->trans_data_size;
1063 st->data = kmalloc(st->size, GFP_KERNEL);
1064 if (!st->data)
1065 goto err_out_exit;
1066
1067 if (psb->perform_crypto) {
1068 err = pohmelfs_crypto_engine_init(&st->eng, psb);
1069 if (err)
1070 goto err_out_free_data;
1071 }
1072
1073 err = netfs_state_init(st);
1074 if (err)
1075 goto err_out_free_engine;
1076
1077 st->thread = kthread_run(pohmelfs_recv, st, "pohmelfs/%u", psb->idx);
1078 if (IS_ERR(st->thread)) {
1079 err = PTR_ERR(st->thread);
1080 goto err_out_netfs_exit;
1081 }
1082
1083 if (!psb->active_state)
1084 psb->active_state = conf;
1085
1086 dprintk("%s: conf: %p, st: %p, socket: %p.\n",
1087 __func__, conf, st, st->socket);
1088 return 0;
1089
1090err_out_netfs_exit:
1091 netfs_state_exit(st);
1092err_out_free_engine:
1093 pohmelfs_crypto_engine_exit(&st->eng);
1094err_out_free_data:
1095 kfree(st->data);
1096err_out_exit:
1097 return err;
1098
1099}
1100
1101void pohmelfs_state_flush_transactions(struct netfs_state *st)
1102{
1103 struct rb_node *rb_node;
1104 struct netfs_trans_dst *dst;
1105
1106 mutex_lock(&st->trans_lock);
1107 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1108 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1109 rb_node = rb_next(rb_node);
1110
1111 dst->trans->result = -EINVAL;
1112 netfs_trans_remove_nolock(dst, st);
1113 netfs_trans_drop_dst_nostate(dst);
1114 }
1115 mutex_unlock(&st->trans_lock);
1116}
1117
1118static void pohmelfs_state_exit_one(struct pohmelfs_config *c)
1119{
1120 struct netfs_state *st = &c->state;
1121
1122 dprintk("%s: exiting, st: %p.\n", __func__, st);
1123 if (st->thread) {
1124 kthread_stop(st->thread);
1125 st->thread = NULL;
1126 }
1127
1128 netfs_state_lock_send(st);
1129 netfs_state_exit(st);
1130 netfs_state_unlock_send(st);
1131
1132 pohmelfs_state_flush_transactions(st);
1133
1134 pohmelfs_crypto_engine_exit(&st->eng);
1135 kfree(st->data);
1136
1137 kfree(c);
1138}
1139
1140/*
1141 * Initialize network stack. It searches for given ID in global
1142 * configuration table, this contains information of the remote server
1143 * (address (any supported by socket interface) and port, protocol and so on).
1144 */
1145int pohmelfs_state_init(struct pohmelfs_sb *psb)
1146{
1147 int err = -ENOMEM;
1148
1149 err = pohmelfs_copy_config(psb);
1150 if (err) {
1151 pohmelfs_state_exit(psb);
1152 return err;
1153 }
1154
1155 return 0;
1156}
1157
1158void pohmelfs_state_exit(struct pohmelfs_sb *psb)
1159{
1160 struct pohmelfs_config *c, *tmp;
1161
1162 list_for_each_entry_safe(c, tmp, &psb->state_list, config_entry) {
1163 list_del(&c->config_entry);
1164 pohmelfs_state_exit_one(c);
1165 }
1166}
1167
1168void pohmelfs_switch_active(struct pohmelfs_sb *psb)
1169{
1170 struct pohmelfs_config *c = psb->active_state;
1171
1172 if (!list_empty(&psb->state_list)) {
1173 if (c->config_entry.next != &psb->state_list) {
1174 psb->active_state = list_entry(c->config_entry.next,
1175 struct pohmelfs_config, config_entry);
1176 } else {
1177 psb->active_state = list_entry(psb->state_list.next,
1178 struct pohmelfs_config, config_entry);
1179 }
1180
1181 dprintk("%s: empty: %d, active %p -> %p.\n",
1182 __func__, list_empty(&psb->state_list), c,
1183 psb->active_state);
1184 } else
1185 psb->active_state = NULL;
1186}
1187
1188void pohmelfs_check_states(struct pohmelfs_sb *psb)
1189{
1190 struct pohmelfs_config *c, *tmp;
1191 LIST_HEAD(delete_list);
1192
1193 mutex_lock(&psb->state_lock);
1194 list_for_each_entry_safe(c, tmp, &psb->state_list, config_entry) {
1195 if (pohmelfs_config_check(c, psb->idx)) {
1196
1197 if (psb->active_state == c)
1198 pohmelfs_switch_active(psb);
1199 list_move(&c->config_entry, &delete_list);
1200 }
1201 }
1202 pohmelfs_copy_config(psb);
1203 mutex_unlock(&psb->state_lock);
1204
1205 list_for_each_entry_safe(c, tmp, &delete_list, config_entry) {
1206 list_del(&c->config_entry);
1207 pohmelfs_state_exit_one(c);
1208 }
1209}
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
deleted file mode 100644
index f26894f2a57f..000000000000
--- a/drivers/staging/pohmelfs/netfs.h
+++ /dev/null
@@ -1,919 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __NETFS_H
17#define __NETFS_H
18
19#include <linux/types.h>
20#include <linux/connector.h>
21#include <linux/backing-dev.h>
22
23#define POHMELFS_CN_IDX 5
24#define POHMELFS_CN_VAL 0
25
26#define POHMELFS_CTLINFO_ACK 1
27#define POHMELFS_NOINFO_ACK 2
28
29#define POHMELFS_NULL_IDX 65535
30
31/*
32 * Network command structure.
33 * Will be extended.
34 */
35struct netfs_cmd {
36 __u16 cmd; /* Command number */
37 __u16 csize; /* Attached crypto information size */
38 __u16 cpad; /* Attached padding size */
39 __u16 ext; /* External flags */
40 __u32 size; /* Size of the attached data */
41 __u32 trans; /* Transaction id */
42 __u64 id; /* Object ID to operate on. Used for feedback.*/
43 __u64 start; /* Start of the object. */
44 __u64 iv; /* IV sequence */
45 __u8 data[0];
46};
47
48static inline void netfs_convert_cmd(struct netfs_cmd *cmd)
49{
50 cmd->id = __be64_to_cpu(cmd->id);
51 cmd->start = __be64_to_cpu(cmd->start);
52 cmd->iv = __be64_to_cpu(cmd->iv);
53 cmd->cmd = __be16_to_cpu(cmd->cmd);
54 cmd->ext = __be16_to_cpu(cmd->ext);
55 cmd->csize = __be16_to_cpu(cmd->csize);
56 cmd->cpad = __be16_to_cpu(cmd->cpad);
57 cmd->size = __be32_to_cpu(cmd->size);
58}
59
60#define NETFS_TRANS_SINGLE_DST (1<<0)
61
62enum {
63 NETFS_READDIR = 1, /* Read directory for given inode number */
64 NETFS_READ_PAGE, /* Read data page from the server */
65 NETFS_WRITE_PAGE, /* Write data page to the server */
66 NETFS_CREATE, /* Create directory entry */
67 NETFS_REMOVE, /* Remove directory entry */
68
69 NETFS_LOOKUP, /* Lookup single object */
70 NETFS_LINK, /* Create a link */
71 NETFS_TRANS, /* Transaction */
72 NETFS_OPEN, /* Open intent */
73 NETFS_INODE_INFO, /* Metadata cache coherency synchronization message */
74
75 NETFS_PAGE_CACHE, /* Page cache invalidation message */
76 NETFS_READ_PAGES, /* Read multiple contiguous pages in one go */
77 NETFS_RENAME, /* Rename object */
78 NETFS_CAPABILITIES, /* Capabilities of the client, for example supported crypto */
79 NETFS_LOCK, /* Distributed lock message */
80
81 NETFS_XATTR_SET, /* Set extended attribute */
82 NETFS_XATTR_GET, /* Get extended attribute */
83 NETFS_CMD_MAX
84};
85
86enum {
87 POHMELFS_FLAGS_ADD = 0, /* Network state control message for ADD */
88 POHMELFS_FLAGS_DEL, /* Network state control message for DEL */
89 POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */
90 POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */
91 POHMELFS_FLAGS_MODIFY, /* Network state modification message */
92 POHMELFS_FLAGS_DUMP, /* Network state control message for SHOW ALL */
93 POHMELFS_FLAGS_FLUSH, /* Network state control message for FLUSH */
94};
95
96/*
97 * Always wanted to copy it from socket headers into public one,
98 * since they are __KERNEL__ protected there.
99 */
100#define _K_SS_MAXSIZE 128
101
102struct saddr {
103 unsigned short sa_family;
104 char addr[_K_SS_MAXSIZE];
105};
106
107enum {
108 POHMELFS_CRYPTO_HASH = 0,
109 POHMELFS_CRYPTO_CIPHER,
110};
111
112struct pohmelfs_crypto {
113 unsigned int idx; /* Config index */
114 unsigned short strlen; /* Size of the attached crypto string including 0-byte
115 * "cbc(aes)" for example */
116 unsigned short type; /* HMAC, cipher, both */
117 unsigned int keysize; /* Key size */
118 unsigned char data[0]; /* Algorithm string, key and IV */
119};
120
121#define POHMELFS_IO_PERM_READ (1<<0)
122#define POHMELFS_IO_PERM_WRITE (1<<1)
123
124/*
125 * Configuration command used to create table of different remote servers.
126 */
127struct pohmelfs_ctl {
128 __u32 idx; /* Config index */
129 __u32 type; /* Socket type */
130 __u32 proto; /* Socket protocol */
131 __u16 addrlen; /* Size of the address */
132 __u16 perm; /* IO permission */
133 __u16 prio; /* IO priority */
134 struct saddr addr; /* Remote server address */
135};
136
137/*
138 * Ack for userspace about requested command.
139 */
140struct pohmelfs_cn_ack {
141 struct cn_msg msg;
142 int error;
143 int msg_num;
144 int unused[3];
145 struct pohmelfs_ctl ctl;
146};
147
148/*
149 * Inode info structure used to sync with server.
150 * Check what stat() returns.
151 */
152struct netfs_inode_info {
153 unsigned int mode;
154 unsigned int nlink;
155 unsigned int uid;
156 unsigned int gid;
157 unsigned int blocksize;
158 unsigned int padding;
159 __u64 ino;
160 __u64 blocks;
161 __u64 rdev;
162 __u64 size;
163 __u64 version;
164};
165
166static inline void netfs_convert_inode_info(struct netfs_inode_info *info)
167{
168 info->mode = __cpu_to_be32(info->mode);
169 info->nlink = __cpu_to_be32(info->nlink);
170 info->uid = __cpu_to_be32(info->uid);
171 info->gid = __cpu_to_be32(info->gid);
172 info->blocksize = __cpu_to_be32(info->blocksize);
173 info->blocks = __cpu_to_be64(info->blocks);
174 info->rdev = __cpu_to_be64(info->rdev);
175 info->size = __cpu_to_be64(info->size);
176 info->version = __cpu_to_be64(info->version);
177 info->ino = __cpu_to_be64(info->ino);
178}
179
180/*
181 * Cache state machine.
182 */
183enum {
184 NETFS_COMMAND_PENDING = 0, /* Command is being executed */
185 NETFS_INODE_REMOTE_SYNCED, /* Inode was synced to server */
186 NETFS_INODE_REMOTE_DIR_SYNCED, /* Inode (directory) was synced from the server */
187 NETFS_INODE_OWNED, /* Inode is owned by given host */
188 NETFS_INODE_NEED_FLUSH, /* Inode has to be flushed to the server */
189};
190
191/*
192 * POHMELFS capabilities: information about supported
193 * crypto operations (hash/cipher, modes, key sizes and so on),
194 * root information (used/available size, number of objects, permissions)
195 */
196enum pohmelfs_capabilities {
197 POHMELFS_CRYPTO_CAPABILITIES = 0,
198 POHMELFS_ROOT_CAPABILITIES,
199};
200
201/* Read-only mount */
202#define POHMELFS_FLAGS_RO (1<<0)
203/* Extended attributes support on/off */
204#define POHMELFS_FLAGS_XATTR (1<<1)
205
206struct netfs_root_capabilities {
207 __u64 nr_files;
208 __u64 used, avail;
209 __u64 flags;
210};
211
212static inline void netfs_convert_root_capabilities(struct netfs_root_capabilities *cap)
213{
214 cap->nr_files = __cpu_to_be64(cap->nr_files);
215 cap->used = __cpu_to_be64(cap->used);
216 cap->avail = __cpu_to_be64(cap->avail);
217 cap->flags = __cpu_to_be64(cap->flags);
218}
219
220struct netfs_crypto_capabilities {
221 unsigned short hash_strlen; /* Hash string length, like "hmac(sha1) including 0 byte "*/
222 unsigned short cipher_strlen; /* Cipher string length with the same format */
223 unsigned int cipher_keysize; /* Cipher key size */
224};
225
226static inline void netfs_convert_crypto_capabilities(struct netfs_crypto_capabilities *cap)
227{
228 cap->hash_strlen = __cpu_to_be16(cap->hash_strlen);
229 cap->cipher_strlen = __cpu_to_be16(cap->cipher_strlen);
230 cap->cipher_keysize = __cpu_to_be32(cap->cipher_keysize);
231}
232
233enum pohmelfs_lock_type {
234 POHMELFS_LOCK_GRAB = (1<<15),
235
236 POHMELFS_READ_LOCK = 0,
237 POHMELFS_WRITE_LOCK,
238};
239
240struct netfs_lock {
241 __u64 start;
242 __u64 ino;
243 __u32 size;
244 __u32 type;
245};
246
247static inline void netfs_convert_lock(struct netfs_lock *lock)
248{
249 lock->start = __cpu_to_be64(lock->start);
250 lock->ino = __cpu_to_be64(lock->ino);
251 lock->size = __cpu_to_be32(lock->size);
252 lock->type = __cpu_to_be32(lock->type);
253}
254
255#ifdef __KERNEL__
256
257#include <linux/kernel.h>
258#include <linux/completion.h>
259#include <linux/rbtree.h>
260#include <linux/net.h>
261#include <linux/poll.h>
262
263/*
264 * Private POHMELFS cache of objects in directory.
265 */
266struct pohmelfs_name {
267 struct rb_node hash_node;
268
269 struct list_head sync_create_entry;
270
271 u64 ino;
272
273 u32 hash;
274 u32 mode;
275 u32 len;
276
277 char *data;
278};
279
280/*
281 * POHMELFS inode. Main object.
282 */
283struct pohmelfs_inode {
284 struct list_head inode_entry; /* Entry in superblock list.
285 * Objects which are not bound to dentry require to be dropped
286 * in ->put_super()
287 */
288 struct rb_root hash_root; /* The same, but indexed by name hash and len */
289 struct mutex offset_lock; /* Protect both above trees */
290
291 struct list_head sync_create_list; /* List of created but not yet synced to the server children */
292
293 unsigned int drop_count;
294
295 int lock_type; /* How this inode is locked: read or write */
296
297 int error; /* Transaction error for given inode */
298
299 long state; /* State machine above */
300
301 u64 ino; /* Inode number */
302 u64 total_len; /* Total length of all children names, used to create offsets */
303
304 struct inode vfs_inode;
305};
306
307struct netfs_trans;
308typedef int (*netfs_trans_complete_t)(struct page **pages, unsigned int page_num,
309 void *private, int err);
310
311struct netfs_state;
312struct pohmelfs_sb;
313
314struct netfs_trans {
315 /*
316 * Transaction header and attached contiguous data live here.
317 */
318 struct iovec iovec;
319
320 /*
321 * Pages attached to transaction.
322 */
323 struct page **pages;
324
325 /*
326 * List and protecting lock for transaction destination
327 * network states.
328 */
329 spinlock_t dst_lock;
330 struct list_head dst_list;
331
332 /*
333 * Number of users for given transaction.
334 * For example each network state attached to transaction
335 * via dst_list increases it.
336 */
337 atomic_t refcnt;
338
339 /*
340 * Number of pages attached to given transaction.
341 * Some slots in above page array can be NULL, since
342 * for example page can be under writeback already,
343 * so we skip it in this transaction.
344 */
345 unsigned int page_num;
346
347 /*
348 * Transaction flags: single dst or broadcast and so on.
349 */
350 unsigned int flags;
351
352 /*
353 * Size of the data, which can be placed into
354 * iovec.iov_base area.
355 */
356 unsigned int total_size;
357
358 /*
359 * Number of pages to be sent to remote server.
360 * Usually equal to above page_num, but in case of partial
361 * writeback it can accumulate only pages already completed
362 * previous writeback.
363 */
364 unsigned int attached_pages;
365
366 /*
367 * Attached number of bytes in all above pages.
368 */
369 unsigned int attached_size;
370
371 /*
372 * Unique transacton generation number.
373 * Used as identity in the network state tree of transactions.
374 */
375 unsigned int gen;
376
377 /*
378 * Transaction completion status.
379 */
380 int result;
381
382 /*
383 * Superblock this transaction belongs to
384 */
385 struct pohmelfs_sb *psb;
386
387 /*
388 * Crypto engine, which processed this transaction.
389 * Can be not NULL only if crypto engine holds encrypted pages.
390 */
391 struct pohmelfs_crypto_engine *eng;
392
393 /* Private data */
394 void *private;
395
396 /* Completion callback, invoked just before transaction is destroyed */
397 netfs_trans_complete_t complete;
398};
399
400static inline int netfs_trans_cur_len(struct netfs_trans *t)
401{
402 return (signed)(t->total_size - t->iovec.iov_len);
403}
404
405static inline void *netfs_trans_current(struct netfs_trans *t)
406{
407 return t->iovec.iov_base + t->iovec.iov_len;
408}
409
410struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
411 unsigned int flags, unsigned int nr);
412void netfs_trans_free(struct netfs_trans *t);
413int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb);
414int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb);
415
416static inline void netfs_trans_reset(struct netfs_trans *t)
417{
418 t->complete = NULL;
419}
420
421struct netfs_trans_dst {
422 struct list_head trans_entry;
423 struct rb_node state_entry;
424
425 unsigned long send_time;
426
427 /*
428 * Times this transaction was resent to its old or new,
429 * depending on flags, destinations. When it reaches maximum
430 * allowed number, specified in superblock->trans_retries,
431 * transaction will be freed with ETIMEDOUT error.
432 */
433 unsigned int retries;
434
435 struct netfs_trans *trans;
436 struct netfs_state *state;
437};
438
439struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen);
440void netfs_trans_drop_dst(struct netfs_trans_dst *dst);
441void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst);
442void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st);
443void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st);
444int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb);
445int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st);
446
447int netfs_trans_init(void);
448void netfs_trans_exit(void);
449
450struct pohmelfs_crypto_engine {
451 u64 iv; /* Crypto IV for current operation */
452 unsigned long timeout; /* Crypto waiting timeout */
453 unsigned int size; /* Size of crypto scratchpad */
454 void *data; /* Temporal crypto scratchpad */
455 /*
456 * Crypto operations performed on objects.
457 */
458 struct crypto_hash *hash;
459 struct crypto_ablkcipher *cipher;
460
461 struct pohmelfs_crypto_thread *thread; /* Crypto thread which hosts this engine */
462
463 struct page **pages;
464 unsigned int page_num;
465};
466
467struct pohmelfs_crypto_thread {
468 struct list_head thread_entry;
469
470 struct task_struct *thread;
471 struct pohmelfs_sb *psb;
472
473 struct pohmelfs_crypto_engine eng;
474
475 struct netfs_trans *trans;
476
477 wait_queue_head_t wait;
478 int error;
479
480 unsigned int size;
481 struct page *page;
482};
483
484void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th);
485
486/*
487 * Network state, attached to one server.
488 */
489struct netfs_state {
490 struct mutex __state_lock; /* Can not allow to use the same socket simultaneously */
491 struct mutex __state_send_lock;
492 struct netfs_cmd cmd; /* Cached command */
493 struct netfs_inode_info info; /* Cached inode info */
494
495 void *data; /* Cached some data */
496 unsigned int size; /* Size of that data */
497
498 struct pohmelfs_sb *psb; /* Superblock */
499
500 struct task_struct *thread; /* Async receiving thread */
501
502 /* Waiting/polling machinery */
503 wait_queue_t wait;
504 wait_queue_head_t *whead;
505 wait_queue_head_t thread_wait;
506
507 struct mutex trans_lock;
508 struct rb_root trans_root;
509
510 struct pohmelfs_ctl ctl; /* Remote peer */
511
512 struct socket *socket; /* Socket object */
513 struct socket *read_socket; /* Cached pointer to socket object.
514 * Used to determine if between lock drops socket was changed.
515 * Never used to read data or any kind of access.
516 */
517 /*
518 * Crypto engines to process incoming data.
519 */
520 struct pohmelfs_crypto_engine eng;
521
522 int need_reset;
523};
524
525int netfs_state_init(struct netfs_state *st);
526void netfs_state_exit(struct netfs_state *st);
527
528static inline void netfs_state_lock_send(struct netfs_state *st)
529{
530 mutex_lock(&st->__state_send_lock);
531}
532
533static inline int netfs_state_trylock_send(struct netfs_state *st)
534{
535 return mutex_trylock(&st->__state_send_lock);
536}
537
538static inline void netfs_state_unlock_send(struct netfs_state *st)
539{
540 BUG_ON(!mutex_is_locked(&st->__state_send_lock));
541
542 mutex_unlock(&st->__state_send_lock);
543}
544
545static inline void netfs_state_lock(struct netfs_state *st)
546{
547 mutex_lock(&st->__state_lock);
548}
549
550static inline void netfs_state_unlock(struct netfs_state *st)
551{
552 BUG_ON(!mutex_is_locked(&st->__state_lock));
553
554 mutex_unlock(&st->__state_lock);
555}
556
557static inline unsigned int netfs_state_poll(struct netfs_state *st)
558{
559 unsigned int revents = POLLHUP | POLLERR;
560
561 netfs_state_lock(st);
562 if (st->socket)
563 revents = st->socket->ops->poll(NULL, st->socket, NULL);
564 netfs_state_unlock(st);
565
566 return revents;
567}
568
569struct pohmelfs_config;
570
571struct pohmelfs_sb {
572 struct rb_root mcache_root;
573 struct mutex mcache_lock;
574 atomic_long_t mcache_gen;
575 unsigned long mcache_timeout;
576
577 unsigned int idx;
578
579 unsigned int trans_retries;
580
581 atomic_t trans_gen;
582
583 unsigned int crypto_attached_size;
584 unsigned int crypto_align_size;
585
586 unsigned int crypto_fail_unsupported;
587
588 unsigned int crypto_thread_num;
589 struct list_head crypto_active_list, crypto_ready_list;
590 struct mutex crypto_thread_lock;
591
592 unsigned int trans_max_pages;
593 unsigned long trans_data_size;
594 unsigned long trans_timeout;
595
596 unsigned long drop_scan_timeout;
597 unsigned long trans_scan_timeout;
598
599 unsigned long wait_on_page_timeout;
600
601 struct list_head flush_list;
602 struct list_head drop_list;
603 spinlock_t ino_lock;
604 u64 ino;
605
606 /*
607 * Remote nodes POHMELFS connected to.
608 */
609 struct list_head state_list;
610 struct mutex state_lock;
611
612 /*
613 * Currently active state to request data from.
614 */
615 struct pohmelfs_config *active_state;
616
617
618 wait_queue_head_t wait;
619
620 /*
621 * Timed checks: stale transactions, inodes to be freed and so on.
622 */
623 struct delayed_work dwork;
624 struct delayed_work drop_dwork;
625
626 struct super_block *sb;
627
628 struct backing_dev_info bdi;
629
630 /*
631 * Algorithm strings.
632 */
633 char *hash_string;
634 char *cipher_string;
635
636 u8 *hash_key;
637 u8 *cipher_key;
638
639 /*
640 * Algorithm string lengths.
641 */
642 unsigned int hash_strlen;
643 unsigned int cipher_strlen;
644 unsigned int hash_keysize;
645 unsigned int cipher_keysize;
646
647 /*
648 * Controls whether to perfrom crypto processing or not.
649 */
650 int perform_crypto;
651
652 /*
653 * POHMELFS statistics.
654 */
655 u64 total_size;
656 u64 avail_size;
657 atomic_long_t total_inodes;
658
659 /*
660 * Xattr support, read-only and so on.
661 */
662 u64 state_flags;
663
664 /*
665 * Temporary storage to detect changes in the wait queue.
666 */
667 long flags;
668};
669
670static inline void netfs_trans_update(struct netfs_cmd *cmd,
671 struct netfs_trans *t, unsigned int size)
672{
673 unsigned int sz = ALIGN(size, t->psb->crypto_align_size);
674
675 t->iovec.iov_len += sizeof(struct netfs_cmd) + sz;
676 cmd->cpad = __cpu_to_be16(sz - size);
677}
678
679static inline struct pohmelfs_sb *POHMELFS_SB(struct super_block *sb)
680{
681 return sb->s_fs_info;
682}
683
684static inline struct pohmelfs_inode *POHMELFS_I(struct inode *inode)
685{
686 return container_of(inode, struct pohmelfs_inode, vfs_inode);
687}
688
689static inline u64 pohmelfs_new_ino(struct pohmelfs_sb *psb)
690{
691 u64 ino;
692
693 spin_lock(&psb->ino_lock);
694 ino = psb->ino++;
695 spin_unlock(&psb->ino_lock);
696
697 return ino;
698}
699
700static inline void pohmelfs_put_inode(struct pohmelfs_inode *pi)
701{
702 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
703
704 spin_lock(&psb->ino_lock);
705 list_move_tail(&pi->inode_entry, &psb->drop_list);
706 pi->drop_count++;
707 spin_unlock(&psb->ino_lock);
708}
709
710struct pohmelfs_config {
711 struct list_head config_entry;
712
713 struct netfs_state state;
714};
715
716struct pohmelfs_config_group {
717 /*
718 * Entry in the global config group list.
719 */
720 struct list_head group_entry;
721
722 /*
723 * Index of the current group.
724 */
725 unsigned int idx;
726 /*
727 * Number of config_list entries in this group entry.
728 */
729 unsigned int num_entry;
730 /*
731 * Algorithm strings.
732 */
733 char *hash_string;
734 char *cipher_string;
735
736 /*
737 * Algorithm string lengths.
738 */
739 unsigned int hash_strlen;
740 unsigned int cipher_strlen;
741
742 /*
743 * Key and its size.
744 */
745 unsigned int hash_keysize;
746 unsigned int cipher_keysize;
747 u8 *hash_key;
748 u8 *cipher_key;
749
750 /*
751 * List of config entries (network state info) for given idx.
752 */
753 struct list_head config_list;
754};
755
756int __init pohmelfs_config_init(void);
757void pohmelfs_config_exit(void);
758int pohmelfs_copy_config(struct pohmelfs_sb *psb);
759int pohmelfs_copy_crypto(struct pohmelfs_sb *psb);
760int pohmelfs_config_check(struct pohmelfs_config *config, int idx);
761int pohmelfs_state_init_one(struct pohmelfs_sb *psb, struct pohmelfs_config *conf);
762
763extern const struct file_operations pohmelfs_dir_fops;
764extern const struct inode_operations pohmelfs_dir_inode_ops;
765
766int pohmelfs_state_init(struct pohmelfs_sb *psb);
767void pohmelfs_state_exit(struct pohmelfs_sb *psb);
768void pohmelfs_state_flush_transactions(struct netfs_state *st);
769
770void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info);
771
772void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
773void pohmelfs_free_names(struct pohmelfs_inode *parent);
774struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash);
775
776void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
777
778struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
779 struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode);
780
781int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
782
783int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans);
784int pohmelfs_remove_child(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
785
786struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
787 struct pohmelfs_inode *parent, struct qstr *str,
788 struct netfs_inode_info *info, int link);
789
790int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr);
791int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr);
792
793int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
794 netfs_trans_complete_t complete, void *priv, u64 start);
795int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
796 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start);
797
798void pohmelfs_check_states(struct pohmelfs_sb *psb);
799void pohmelfs_switch_active(struct pohmelfs_sb *psb);
800
801int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len);
802int pohmelfs_path_length(struct pohmelfs_inode *pi);
803
804struct pohmelfs_crypto_completion {
805 struct completion complete;
806 int error;
807};
808
809int pohmelfs_trans_crypt(struct netfs_trans *t, struct pohmelfs_sb *psb);
810void pohmelfs_crypto_exit(struct pohmelfs_sb *psb);
811int pohmelfs_crypto_init(struct pohmelfs_sb *psb);
812
813int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb);
814void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e);
815
816int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 iv,
817 void *data, struct page *page, unsigned int size);
818int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
819 struct page *page, unsigned int size, u64 iv);
820
821static inline u64 pohmelfs_gen_iv(struct netfs_trans *t)
822{
823 u64 iv = t->gen;
824
825 iv <<= 32;
826 iv |= ((unsigned long)t) & 0xffffffff;
827
828 return iv;
829}
830
831int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
832int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
833int pohmelfs_data_lock_response(struct netfs_state *st);
834
835static inline int pohmelfs_need_lock(struct pohmelfs_inode *pi, int type)
836{
837 if (test_bit(NETFS_INODE_OWNED, &pi->state)) {
838 if (type == pi->lock_type)
839 return 0;
840 if ((type == POHMELFS_READ_LOCK) && (pi->lock_type == POHMELFS_WRITE_LOCK))
841 return 0;
842 }
843
844 if (!test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
845 return 0;
846
847 return 1;
848}
849
850int __init pohmelfs_mcache_init(void);
851void pohmelfs_mcache_exit(void);
852
853/* #define CONFIG_POHMELFS_DEBUG */
854
855#ifdef CONFIG_POHMELFS_DEBUG
856#define dprintka(f, a...) printk(f, ##a)
857#define dprintk(f, a...) printk("%d: " f, task_pid_vnr(current), ##a)
858#else
859#define dprintka(f, a...) do {} while (0)
860#define dprintk(f, a...) do {} while (0)
861#endif
862
863static inline void netfs_trans_get(struct netfs_trans *t)
864{
865 atomic_inc(&t->refcnt);
866}
867
868static inline void netfs_trans_put(struct netfs_trans *t)
869{
870 if (atomic_dec_and_test(&t->refcnt)) {
871 dprintk("%s: t: %p, gen: %u, err: %d.\n",
872 __func__, t, t->gen, t->result);
873 if (t->complete)
874 t->complete(t->pages, t->page_num,
875 t->private, t->result);
876 netfs_trans_free(t);
877 }
878}
879
880struct pohmelfs_mcache {
881 struct rb_node mcache_entry;
882 struct completion complete;
883
884 atomic_t refcnt;
885
886 u64 gen;
887
888 void *data;
889 u64 start;
890 u32 size;
891 int err;
892
893 struct netfs_inode_info info;
894};
895
896struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
897 unsigned int size, void *data);
898void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
899struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen);
900void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
901
902static inline void pohmelfs_mcache_get(struct pohmelfs_mcache *m)
903{
904 atomic_inc(&m->refcnt);
905}
906
907static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb,
908 struct pohmelfs_mcache *m)
909{
910 if (atomic_dec_and_test(&m->refcnt))
911 pohmelfs_mcache_free(psb, m);
912}
913
914/*#define POHMELFS_TRUNCATE_ON_INODE_FLUSH
915 */
916
917#endif /* __KERNEL__*/
918
919#endif /* __NETFS_H */
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
deleted file mode 100644
index 400a9fc386ad..000000000000
--- a/drivers/staging/pohmelfs/path_entry.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/fs.h>
18#include <linux/ktime.h>
19#include <linux/fs_struct.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/mount.h>
23#include <linux/mm.h>
24
25#include "netfs.h"
26
27#define UNHASHED_OBSCURE_STRING_SIZE sizeof(" (deleted)")
28
29/*
30 * Create path from root for given inode.
31 * Path is formed as set of stuctures, containing name of the object
32 * and its inode data (mode, permissions and so on).
33 */
34int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len)
35{
36 struct path path;
37 struct dentry *d;
38 char *ptr;
39 int err = 0, strlen, reduce = 0;
40
41 d = d_find_alias(&pi->vfs_inode);
42 if (!d) {
43 printk("%s: no alias, list_empty: %d.\n", __func__, list_empty(&pi->vfs_inode.i_dentry));
44 return -ENOENT;
45 }
46
47 spin_lock(&current->fs->lock);
48 path.mnt = mntget(current->fs->root.mnt);
49 spin_unlock(&current->fs->lock);
50
51 path.dentry = d;
52
53 if (!IS_ROOT(d) && d_unhashed(d))
54 reduce = 1;
55
56 ptr = d_path(&path, data, len);
57 if (IS_ERR(ptr)) {
58 err = PTR_ERR(ptr);
59 goto out;
60 }
61
62 if (reduce && len >= UNHASHED_OBSCURE_STRING_SIZE) {
63 char *end = data + len - UNHASHED_OBSCURE_STRING_SIZE;
64 *end = '\0';
65 }
66
67 strlen = len - (ptr - (char *)data);
68 memmove(data, ptr, strlen);
69 ptr = data;
70
71 err = strlen;
72
73 dprintk("%s: dname: '%s', len: %u, maxlen: %u, name: '%s', strlen: %d.\n",
74 __func__, d->d_name.name, d->d_name.len, len, ptr, strlen);
75
76out:
77 dput(d);
78 mntput(path.mnt);
79
80 return err;
81}
82
83int pohmelfs_path_length(struct pohmelfs_inode *pi)
84{
85 struct dentry *d, *root, *first;
86 int len;
87 unsigned seq;
88
89 first = d_find_alias(&pi->vfs_inode);
90 if (!first) {
91 dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode);
92 return -ENOENT;
93 }
94
95 spin_lock(&current->fs->lock);
96 root = dget(current->fs->root.dentry);
97 spin_unlock(&current->fs->lock);
98
99rename_retry:
100 len = 1; /* Root slash */
101 d = first;
102 seq = read_seqbegin(&rename_lock);
103 rcu_read_lock();
104
105 if (!IS_ROOT(d) && d_unhashed(d))
106 len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
107
108 while (d && d != root && !IS_ROOT(d)) {
109 len += d->d_name.len + 1; /* Plus slash */
110 d = d->d_parent;
111 }
112 rcu_read_unlock();
113 if (read_seqretry(&rename_lock, seq))
114 goto rename_retry;
115
116 dput(root);
117 dput(first);
118
119 return len + 1; /* Including zero-byte */
120}
diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
deleted file mode 100644
index 06c1a7451b1b..000000000000
--- a/drivers/staging/pohmelfs/trans.c
+++ /dev/null
@@ -1,706 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/crypto.h>
18#include <linux/fs.h>
19#include <linux/jhash.h>
20#include <linux/hash.h>
21#include <linux/ktime.h>
22#include <linux/mempool.h>
23#include <linux/mm.h>
24#include <linux/mount.h>
25#include <linux/pagemap.h>
26#include <linux/parser.h>
27#include <linux/poll.h>
28#include <linux/swap.h>
29#include <linux/slab.h>
30#include <linux/statfs.h>
31#include <linux/writeback.h>
32
33#include "netfs.h"
34
35static struct kmem_cache *netfs_trans_dst;
36static mempool_t *netfs_trans_dst_pool;
37
38static void netfs_trans_init_static(struct netfs_trans *t, int num, int size)
39{
40 t->page_num = num;
41 t->total_size = size;
42 atomic_set(&t->refcnt, 1);
43
44 spin_lock_init(&t->dst_lock);
45 INIT_LIST_HEAD(&t->dst_list);
46}
47
48static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
49{
50 int err = 0;
51 unsigned int i, attached_pages = t->attached_pages, ci;
52 struct msghdr msg;
53 struct page **pages = (t->eng) ? t->eng->pages : t->pages;
54 struct page *p;
55 unsigned int size;
56
57 msg.msg_name = NULL;
58 msg.msg_namelen = 0;
59 msg.msg_control = NULL;
60 msg.msg_controllen = 0;
61 msg.msg_flags = MSG_WAITALL | MSG_MORE;
62
63 ci = 0;
64 for (i = 0; i < t->page_num; ++i) {
65 struct page *page = pages[ci];
66 struct netfs_cmd cmd;
67 struct iovec io;
68
69 p = t->pages[i];
70
71 if (!p)
72 continue;
73
74 size = page_private(p);
75
76 io.iov_base = &cmd;
77 io.iov_len = sizeof(struct netfs_cmd);
78
79 cmd.cmd = NETFS_WRITE_PAGE;
80 cmd.ext = 0;
81 cmd.id = 0;
82 cmd.size = size;
83 cmd.start = p->index;
84 cmd.start <<= PAGE_CACHE_SHIFT;
85 cmd.csize = 0;
86 cmd.cpad = 0;
87 cmd.iv = pohmelfs_gen_iv(t);
88
89 netfs_convert_cmd(&cmd);
90
91 msg.msg_iov = &io;
92 msg.msg_iovlen = 1;
93 msg.msg_flags = MSG_WAITALL | MSG_MORE;
94
95 err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, sizeof(struct netfs_cmd));
96 if (err <= 0) {
97 printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
98 __func__, i, t->page_num, t, t->gen, err);
99 if (err == 0)
100 err = -ECONNRESET;
101 goto err_out;
102 }
103
104 msg.msg_flags = MSG_WAITALL | (attached_pages == 1 ? 0 :
105 MSG_MORE);
106
107 err = kernel_sendpage(st->socket, page, 0, size, msg.msg_flags);
108 if (err <= 0) {
109 printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
110 __func__, i, t->page_num, t, t->gen, size, err);
111 if (err == 0)
112 err = -ECONNRESET;
113 goto err_out;
114 }
115
116 dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
117 __func__, i, t->page_num, t, t->gen, page, p, size);
118
119 err = 0;
120 attached_pages--;
121 if (!attached_pages)
122 break;
123 ci++;
124
125 continue;
126
127err_out:
128 printk("%s: t: %p, gen: %u, err: %d.\n", __func__, t, t->gen, err);
129 netfs_state_exit(st);
130 break;
131 }
132
133 return err;
134}
135
136int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
137{
138 int err;
139 struct msghdr msg;
140
141 BUG_ON(!t->iovec.iov_len);
142 BUG_ON(t->iovec.iov_len > 1024*1024*1024);
143
144 netfs_state_lock_send(st);
145 if (!st->socket) {
146 err = netfs_state_init(st);
147 if (err)
148 goto err_out_unlock_return;
149 }
150
151 msg.msg_iov = &t->iovec;
152 msg.msg_iovlen = 1;
153 msg.msg_name = NULL;
154 msg.msg_namelen = 0;
155 msg.msg_control = NULL;
156 msg.msg_controllen = 0;
157 msg.msg_flags = MSG_WAITALL;
158
159 if (t->attached_pages)
160 msg.msg_flags |= MSG_MORE;
161
162 err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, t->iovec.iov_len);
163 if (err <= 0) {
164 printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
165 __func__, t, t->gen, t->iovec.iov_len, err);
166 if (err == 0)
167 err = -ECONNRESET;
168 goto err_out_unlock_return;
169 }
170
171 dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
172 __func__, (t->page_num) ? "partial" : "full",
173 t, t->gen, t->iovec.iov_len, t->page_num);
174
175 err = 0;
176 if (t->attached_pages)
177 err = netfs_trans_send_pages(t, st);
178
179err_out_unlock_return:
180
181 if (st->need_reset)
182 netfs_state_exit(st);
183
184 netfs_state_unlock_send(st);
185
186 dprintk("%s: t: %p, gen: %u, err: %d.\n",
187 __func__, t, t->gen, err);
188
189 t->result = err;
190 return err;
191}
192
193static inline int netfs_trans_cmp(unsigned int gen, unsigned int new)
194{
195 if (gen < new)
196 return 1;
197 if (gen > new)
198 return -1;
199 return 0;
200}
201
202struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen)
203{
204 struct rb_root *root = &st->trans_root;
205 struct rb_node *n = root->rb_node;
206 struct netfs_trans_dst *tmp, *ret = NULL;
207 struct netfs_trans *t;
208 int cmp;
209
210 while (n) {
211 tmp = rb_entry(n, struct netfs_trans_dst, state_entry);
212 t = tmp->trans;
213
214 cmp = netfs_trans_cmp(t->gen, gen);
215 if (cmp < 0)
216 n = n->rb_left;
217 else if (cmp > 0)
218 n = n->rb_right;
219 else {
220 ret = tmp;
221 break;
222 }
223 }
224
225 return ret;
226}
227
228static int netfs_trans_insert(struct netfs_trans_dst *ndst, struct netfs_state *st)
229{
230 struct rb_root *root = &st->trans_root;
231 struct rb_node **n = &root->rb_node, *parent = NULL;
232 struct netfs_trans_dst *ret = NULL, *tmp;
233 struct netfs_trans *t = NULL, *new = ndst->trans;
234 int cmp;
235
236 while (*n) {
237 parent = *n;
238
239 tmp = rb_entry(parent, struct netfs_trans_dst, state_entry);
240 t = tmp->trans;
241
242 cmp = netfs_trans_cmp(t->gen, new->gen);
243 if (cmp < 0)
244 n = &parent->rb_left;
245 else if (cmp > 0)
246 n = &parent->rb_right;
247 else {
248 ret = tmp;
249 break;
250 }
251 }
252
253 if (ret) {
254 printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
255 "new: gen: %u, flags: %x, send_time: %lu.\n",
256 __func__, t->gen, t->flags, ret->send_time,
257 new->gen, new->flags, ndst->send_time);
258 return -EEXIST;
259 }
260
261 rb_link_node(&ndst->state_entry, parent, n);
262 rb_insert_color(&ndst->state_entry, root);
263 ndst->send_time = jiffies;
264
265 return 0;
266}
267
268int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st)
269{
270 if (dst && dst->state_entry.rb_parent_color) {
271 rb_erase(&dst->state_entry, &st->trans_root);
272 dst->state_entry.rb_parent_color = 0;
273 return 1;
274 }
275 return 0;
276}
277
278static int netfs_trans_remove_state(struct netfs_trans_dst *dst)
279{
280 int ret;
281 struct netfs_state *st = dst->state;
282
283 mutex_lock(&st->trans_lock);
284 ret = netfs_trans_remove_nolock(dst, st);
285 mutex_unlock(&st->trans_lock);
286
287 return ret;
288}
289
290/*
291 * Create new destination for given transaction associated with given network state.
292 * Transaction's reference counter is bumped and will be dropped when either
293 * reply is received or when async timeout detection task will fail resending
294 * and drop transaction.
295 */
296static int netfs_trans_push_dst(struct netfs_trans *t, struct netfs_state *st)
297{
298 struct netfs_trans_dst *dst;
299 int err;
300
301 dst = mempool_alloc(netfs_trans_dst_pool, GFP_KERNEL);
302 if (!dst)
303 return -ENOMEM;
304
305 dst->retries = 0;
306 dst->send_time = 0;
307 dst->state = st;
308 dst->trans = t;
309 netfs_trans_get(t);
310
311 mutex_lock(&st->trans_lock);
312 err = netfs_trans_insert(dst, st);
313 mutex_unlock(&st->trans_lock);
314
315 if (err)
316 goto err_out_free;
317
318 spin_lock(&t->dst_lock);
319 list_add_tail(&dst->trans_entry, &t->dst_list);
320 spin_unlock(&t->dst_lock);
321
322 return 0;
323
324err_out_free:
325 t->result = err;
326 netfs_trans_put(t);
327 mempool_free(dst, netfs_trans_dst_pool);
328 return err;
329}
330
331static void netfs_trans_free_dst(struct netfs_trans_dst *dst)
332{
333 netfs_trans_put(dst->trans);
334 mempool_free(dst, netfs_trans_dst_pool);
335}
336
337static void netfs_trans_remove_dst(struct netfs_trans_dst *dst)
338{
339 if (netfs_trans_remove_state(dst))
340 netfs_trans_free_dst(dst);
341}
342
343/*
344 * Drop destination transaction entry when we know it.
345 */
346void netfs_trans_drop_dst(struct netfs_trans_dst *dst)
347{
348 struct netfs_trans *t = dst->trans;
349
350 spin_lock(&t->dst_lock);
351 list_del_init(&dst->trans_entry);
352 spin_unlock(&t->dst_lock);
353
354 netfs_trans_remove_dst(dst);
355}
356
357/*
358 * Drop destination transaction entry when we know it and when we
359 * already removed dst from state tree.
360 */
361void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst)
362{
363 struct netfs_trans *t = dst->trans;
364
365 spin_lock(&t->dst_lock);
366 list_del_init(&dst->trans_entry);
367 spin_unlock(&t->dst_lock);
368
369 netfs_trans_free_dst(dst);
370}
371
372/*
373 * This drops destination transaction entry from appropriate network state
374 * tree and drops related reference counter. It is possible that transaction
375 * will be freed here if its reference counter hits zero.
376 * Destination transaction entry will be freed.
377 */
378void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st)
379{
380 struct netfs_trans_dst *dst, *tmp, *ret = NULL;
381
382 spin_lock(&t->dst_lock);
383 list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
384 if (dst->state == st) {
385 ret = dst;
386 list_del(&dst->trans_entry);
387 break;
388 }
389 }
390 spin_unlock(&t->dst_lock);
391
392 if (ret)
393 netfs_trans_remove_dst(ret);
394}
395
396/*
397 * This drops destination transaction entry from appropriate network state
398 * tree and drops related reference counter. It is possible that transaction
399 * will be freed here if its reference counter hits zero.
400 * Destination transaction entry will be freed.
401 */
402void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st)
403{
404 struct netfs_trans_dst *dst, *tmp, *ret;
405
406 spin_lock(&t->dst_lock);
407 ret = list_entry(t->dst_list.prev, struct netfs_trans_dst, trans_entry);
408 if (ret->state != st) {
409 ret = NULL;
410 list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
411 if (dst->state == st) {
412 ret = dst;
413 list_del_init(&dst->trans_entry);
414 break;
415 }
416 }
417 } else {
418 list_del(&ret->trans_entry);
419 }
420 spin_unlock(&t->dst_lock);
421
422 if (ret)
423 netfs_trans_remove_dst(ret);
424}
425
426static int netfs_trans_push(struct netfs_trans *t, struct netfs_state *st)
427{
428 int err;
429
430 err = netfs_trans_push_dst(t, st);
431 if (err)
432 return err;
433
434 err = netfs_trans_send(t, st);
435 if (err)
436 goto err_out_free;
437
438 if (t->flags & NETFS_TRANS_SINGLE_DST)
439 pohmelfs_switch_active(st->psb);
440
441 return 0;
442
443err_out_free:
444 t->result = err;
445 netfs_trans_drop_last(t, st);
446
447 return err;
448}
449
450int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
451{
452 struct pohmelfs_config *c;
453 int err = -ENODEV;
454 struct netfs_state *st;
455#if 0
456 dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
457 __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
458#endif
459 mutex_lock(&psb->state_lock);
460 list_for_each_entry(c, &psb->state_list, config_entry) {
461 st = &c->state;
462
463 if (t->flags & NETFS_TRANS_SINGLE_DST) {
464 if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
465 continue;
466 } else {
467 if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
468 continue;
469 }
470
471 if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio) &&
472 (t->flags & NETFS_TRANS_SINGLE_DST))
473 st = &psb->active_state->state;
474
475 err = netfs_trans_push(t, st);
476 if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
477 break;
478 }
479
480 mutex_unlock(&psb->state_lock);
481#if 0
482 dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
483 __func__, t, t->gen, t->iovec.iov_len, t->page_num, err);
484#endif
485 if (err)
486 t->result = err;
487 return err;
488}
489
490int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
491{
492 int err;
493 struct netfs_cmd *cmd = t->iovec.iov_base;
494
495 t->gen = atomic_inc_return(&psb->trans_gen);
496
497 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
498 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
499 cmd->cmd = NETFS_TRANS;
500 cmd->start = t->gen;
501 cmd->id = 0;
502
503 if (psb->perform_crypto) {
504 cmd->ext = psb->crypto_attached_size;
505 cmd->csize = psb->crypto_attached_size;
506 }
507
508 dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
509 __func__, t->gen, cmd->size, t->iovec.iov_len, t->attached_size, t->attached_pages);
510 err = pohmelfs_trans_crypt(t, psb);
511 if (err) {
512 t->result = err;
513 netfs_convert_cmd(cmd);
514 dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
515 __func__, cmd->start, psb->crypto_attached_size, t->attached_size, t->attached_pages, cmd->size, err);
516 }
517 netfs_trans_put(t);
518 return err;
519}
520
521/*
522 * Resend transaction to remote server(s).
523 * If new servers were added into superblock, we can try to send data
524 * to them too.
525 *
526 * It is called under superblock's state_lock, so we can safely
527 * dereference psb->state_list. Also, transaction's reference counter is
528 * bumped, so it can not go away under us, thus we can safely access all
529 * its members. State is locked.
530 *
531 * This function returns 0 if transaction was successfully sent to at
532 * least one destination target.
533 */
534int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb)
535{
536 struct netfs_trans_dst *dst;
537 struct netfs_state *st;
538 struct pohmelfs_config *c;
539 int err, exist, error = -ENODEV;
540
541 list_for_each_entry(c, &psb->state_list, config_entry) {
542 st = &c->state;
543
544 exist = 0;
545 spin_lock(&t->dst_lock);
546 list_for_each_entry(dst, &t->dst_list, trans_entry) {
547 if (st == dst->state) {
548 exist = 1;
549 break;
550 }
551 }
552 spin_unlock(&t->dst_lock);
553
554 if (exist) {
555 if (!(t->flags & NETFS_TRANS_SINGLE_DST) ||
556 (c->config_entry.next == &psb->state_list)) {
557 dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
558 __func__, st, t, t->gen);
559 err = netfs_trans_send(t, st);
560 if (!err)
561 error = 0;
562 }
563 continue;
564 }
565
566 dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
567 __func__, st, t, t->gen);
568 err = netfs_trans_push(t, st);
569 if (err)
570 continue;
571 error = 0;
572 if (t->flags & NETFS_TRANS_SINGLE_DST)
573 break;
574 }
575
576 t->result = error;
577 return error;
578}
579
580void *netfs_trans_add(struct netfs_trans *t, unsigned int size)
581{
582 struct iovec *io = &t->iovec;
583 void *ptr;
584
585 if (size > t->total_size) {
586 ptr = ERR_PTR(-EINVAL);
587 goto out;
588 }
589
590 if (io->iov_len + size > t->total_size) {
591 dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
592 __func__, t, t->gen, io->iov_len, size, t->total_size);
593 ptr = ERR_PTR(-E2BIG);
594 goto out;
595 }
596
597 ptr = io->iov_base + io->iov_len;
598 io->iov_len += size;
599
600out:
601 dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
602 __func__, t, t->gen, size, io->iov_len);
603 return ptr;
604}
605
606void netfs_trans_free(struct netfs_trans *t)
607{
608 if (t->eng)
609 pohmelfs_crypto_thread_make_ready(t->eng->thread);
610 kfree(t);
611}
612
613struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
614 unsigned int flags, unsigned int nr)
615{
616 struct netfs_trans *t;
617 unsigned int num, cont, pad, size_no_trans;
618 unsigned int crypto_added = 0;
619 struct netfs_cmd *cmd;
620
621 if (psb->perform_crypto)
622 crypto_added = psb->crypto_attached_size;
623
624 /*
625 * |sizeof(struct netfs_trans)|
626 * |sizeof(struct netfs_cmd)| - transaction header
627 * |size| - buffer with requested size
628 * |padding| - crypto padding, zero bytes
629 * |nr * sizeof(struct page *)| - array of page pointers
630 *
631 * Overall size should be less than PAGE_SIZE for guaranteed allocation.
632 */
633
634 cont = size;
635 size = ALIGN(size, psb->crypto_align_size);
636 pad = size - cont;
637
638 size_no_trans = size + sizeof(struct netfs_cmd) * 2 + crypto_added;
639
640 cont = sizeof(struct netfs_trans) + size_no_trans;
641
642 num = (PAGE_SIZE - cont)/sizeof(struct page *);
643
644 if (nr > num)
645 nr = num;
646
647 t = kzalloc(cont + nr*sizeof(struct page *), GFP_NOIO);
648 if (!t)
649 goto err_out_exit;
650
651 t->iovec.iov_base = (void *)(t + 1);
652 t->pages = (struct page **)(t->iovec.iov_base + size_no_trans);
653
654 /*
655 * Reserving space for transaction header.
656 */
657 t->iovec.iov_len = sizeof(struct netfs_cmd) + crypto_added;
658
659 netfs_trans_init_static(t, nr, size_no_trans);
660
661 t->flags = flags;
662 t->psb = psb;
663
664 cmd = (struct netfs_cmd *)t->iovec.iov_base;
665
666 cmd->size = size;
667 cmd->cpad = pad;
668 cmd->csize = crypto_added;
669
670 dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
671 "page_num: %u, base: %p, pages: %p.\n",
672 __func__, t, t->gen, size, pad, psb->crypto_align_size, flags, nr,
673 t->iovec.iov_base, t->pages);
674
675 return t;
676
677err_out_exit:
678 return NULL;
679}
680
681int netfs_trans_init(void)
682{
683 int err = -ENOMEM;
684
685 netfs_trans_dst = kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst),
686 0, 0, NULL);
687 if (!netfs_trans_dst)
688 goto err_out_exit;
689
690 netfs_trans_dst_pool = mempool_create_slab_pool(256, netfs_trans_dst);
691 if (!netfs_trans_dst_pool)
692 goto err_out_free;
693
694 return 0;
695
696err_out_free:
697 kmem_cache_destroy(netfs_trans_dst);
698err_out_exit:
699 return err;
700}
701
702void netfs_trans_exit(void)
703{
704 mempool_destroy(netfs_trans_dst_pool);
705 kmem_cache_destroy(netfs_trans_dst);
706}
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 9b5d771e650c..ed85b4415207 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -37,6 +37,8 @@ struct _adapter;
37#include "wlan_bssdef.h" 37#include "wlan_bssdef.h"
38#include "rtl8712_spec.h" 38#include "rtl8712_spec.h"
39#include "rtl8712_hal.h" 39#include "rtl8712_hal.h"
40#include <linux/mutex.h>
41#include <linux/completion.h>
40 42
41enum _NIC_VERSION { 43enum _NIC_VERSION {
42 RTL8711_NIC, 44 RTL8711_NIC,
@@ -168,6 +170,7 @@ struct _adapter {
168 s32 bSurpriseRemoved; 170 s32 bSurpriseRemoved;
169 u32 IsrContent; 171 u32 IsrContent;
170 u32 ImrContent; 172 u32 ImrContent;
173 bool fw_found;
171 u8 EepromAddressSize; 174 u8 EepromAddressSize;
172 u8 hw_init_completed; 175 u8 hw_init_completed;
173 struct task_struct *cmdThread; 176 struct task_struct *cmdThread;
@@ -184,6 +187,10 @@ struct _adapter {
184 _workitem wkFilterRxFF0; 187 _workitem wkFilterRxFF0;
185 u8 blnEnableRxFF0Filter; 188 u8 blnEnableRxFF0Filter;
186 spinlock_t lockRxFF0Filter; 189 spinlock_t lockRxFF0Filter;
190 const struct firmware *fw;
191 struct usb_interface *pusb_intf;
192 struct mutex mutex_start;
193 struct completion rtl8712_fw_ready;
187}; 194};
188 195
189static inline u8 *myid(struct eeprom_priv *peepriv) 196static inline u8 *myid(struct eeprom_priv *peepriv)
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index d0029aa4cd3c..cc893c0f5ad3 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -42,29 +42,56 @@
42#define FWBUFF_ALIGN_SZ 512 42#define FWBUFF_ALIGN_SZ 512
43#define MAX_DUMP_FWSZ 49152 /*default = 49152 (48k)*/ 43#define MAX_DUMP_FWSZ 49152 /*default = 49152 (48k)*/
44 44
45static u32 rtl871x_open_fw(struct _adapter *padapter, void **pphfwfile_hdl, 45static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
46 const u8 **ppmappedfw)
47{ 46{
47 struct _adapter *padapter = context;
48
49 complete(&padapter->rtl8712_fw_ready);
50 if (!firmware) {
51 struct usb_device *udev = padapter->dvobjpriv.pusbdev;
52 struct usb_interface *pusb_intf = padapter->pusb_intf;
53 printk(KERN_ERR "r8712u: Firmware request failed\n");
54 padapter->fw_found = false;
55 usb_put_dev(udev);
56 usb_set_intfdata(pusb_intf, NULL);
57 return;
58 }
59 padapter->fw = firmware;
60 padapter->fw_found = true;
61 /* firmware available - start netdev */
62 register_netdev(padapter->pnetdev);
63}
64
65static const char firmware_file[] = "rtlwifi/rtl8712u.bin";
66
67int rtl871x_load_fw(struct _adapter *padapter)
68{
69 struct device *dev = &padapter->dvobjpriv.pusbdev->dev;
48 int rc; 70 int rc;
49 const char firmware_file[] = "rtlwifi/rtl8712u.bin";
50 const struct firmware **praw = (const struct firmware **)
51 (pphfwfile_hdl);
52 struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)
53 (&padapter->dvobjpriv);
54 struct usb_device *pusbdev = pdvobjpriv->pusbdev;
55 71
72 init_completion(&padapter->rtl8712_fw_ready);
56 printk(KERN_INFO "r8712u: Loading firmware from \"%s\"\n", 73 printk(KERN_INFO "r8712u: Loading firmware from \"%s\"\n",
57 firmware_file); 74 firmware_file);
58 rc = request_firmware(praw, firmware_file, &pusbdev->dev); 75 rc = request_firmware_nowait(THIS_MODULE, 1, firmware_file, dev,
59 if (rc < 0) { 76 GFP_KERNEL, padapter, rtl871x_load_fw_cb);
60 printk(KERN_ERR "r8712u: Unable to load firmware\n"); 77 if (rc)
61 printk(KERN_ERR "r8712u: Install latest linux-firmware\n"); 78 printk(KERN_ERR "r8712u: Firmware request error %d\n", rc);
79 return rc;
80}
81MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
82
83static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
84{
85 const struct firmware **praw = &padapter->fw;
86
87 if (padapter->fw->size > 200000) {
88 printk(KERN_ERR "r8172u: Badfw->size of %d\n",
89 (int)padapter->fw->size);
62 return 0; 90 return 0;
63 } 91 }
64 *ppmappedfw = (u8 *)((*praw)->data); 92 *ppmappedfw = (u8 *)((*praw)->data);
65 return (*praw)->size; 93 return (*praw)->size;
66} 94}
67MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
68 95
69static void fill_fwpriv(struct _adapter *padapter, struct fw_priv *pfwpriv) 96static void fill_fwpriv(struct _adapter *padapter, struct fw_priv *pfwpriv)
70{ 97{
@@ -142,18 +169,17 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
142 uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */ 169 uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */
143 struct fw_hdr fwhdr; 170 struct fw_hdr fwhdr;
144 u32 ulfilelength; /* FW file size */ 171 u32 ulfilelength; /* FW file size */
145 void *phfwfile_hdl = NULL;
146 const u8 *pmappedfw = NULL; 172 const u8 *pmappedfw = NULL;
147 u8 *ptmpchar = NULL, *ppayload, *ptr; 173 u8 *ptmpchar = NULL, *ppayload, *ptr;
148 struct tx_desc *ptx_desc; 174 struct tx_desc *ptx_desc;
149 u32 txdscp_sz = sizeof(struct tx_desc); 175 u32 txdscp_sz = sizeof(struct tx_desc);
150 u8 ret = _FAIL; 176 u8 ret = _FAIL;
151 177
152 ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw); 178 ulfilelength = rtl871x_open_fw(padapter, &pmappedfw);
153 if (pmappedfw && (ulfilelength > 0)) { 179 if (pmappedfw && (ulfilelength > 0)) {
154 update_fwhdr(&fwhdr, pmappedfw); 180 update_fwhdr(&fwhdr, pmappedfw);
155 if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL) 181 if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
156 goto firmware_rel; 182 return ret;
157 fill_fwpriv(padapter, &fwhdr.fwpriv); 183 fill_fwpriv(padapter, &fwhdr.fwpriv);
158 /* firmware check ok */ 184 /* firmware check ok */
159 maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ? 185 maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -161,7 +187,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
161 maxlen += txdscp_sz; 187 maxlen += txdscp_sz;
162 ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ); 188 ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
163 if (ptmpchar == NULL) 189 if (ptmpchar == NULL)
164 goto firmware_rel; 190 return ret;
165 191
166 ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ - 192 ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
167 ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1))); 193 ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -297,8 +323,6 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
297 323
298exit_fail: 324exit_fail:
299 kfree(ptmpchar); 325 kfree(ptmpchar);
300firmware_rel:
301 release_firmware((struct firmware *)phfwfile_hdl);
302 return ret; 326 return ret;
303} 327}
304 328
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 9a75c6dbe505..98a3d684f9b2 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -31,6 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kthread.h> 33#include <linux/kthread.h>
34#include <linux/firmware.h>
34#include "osdep_service.h" 35#include "osdep_service.h"
35#include "drv_types.h" 36#include "drv_types.h"
36#include "xmit_osdep.h" 37#include "xmit_osdep.h"
@@ -264,12 +265,12 @@ static void start_drv_timers(struct _adapter *padapter)
264void r8712_stop_drv_timers(struct _adapter *padapter) 265void r8712_stop_drv_timers(struct _adapter *padapter)
265{ 266{
266 _cancel_timer_ex(&padapter->mlmepriv.assoc_timer); 267 _cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
267 _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
268 sitesurvey_ctrl_timer);
269 _cancel_timer_ex(&padapter->securitypriv.tkip_timer); 268 _cancel_timer_ex(&padapter->securitypriv.tkip_timer);
270 _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer); 269 _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
271 _cancel_timer_ex(&padapter->mlmepriv.dhcp_timer); 270 _cancel_timer_ex(&padapter->mlmepriv.dhcp_timer);
272 _cancel_timer_ex(&padapter->mlmepriv.wdg_timer); 271 _cancel_timer_ex(&padapter->mlmepriv.wdg_timer);
272 _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
273 sitesurvey_ctrl_timer);
273} 274}
274 275
275static u8 init_default_value(struct _adapter *padapter) 276static u8 init_default_value(struct _adapter *padapter)
@@ -347,7 +348,8 @@ u8 r8712_free_drv_sw(struct _adapter *padapter)
347 r8712_free_mlme_priv(&padapter->mlmepriv); 348 r8712_free_mlme_priv(&padapter->mlmepriv);
348 r8712_free_io_queue(padapter); 349 r8712_free_io_queue(padapter);
349 _free_xmit_priv(&padapter->xmitpriv); 350 _free_xmit_priv(&padapter->xmitpriv);
350 _r8712_free_sta_priv(&padapter->stapriv); 351 if (padapter->fw_found)
352 _r8712_free_sta_priv(&padapter->stapriv);
351 _r8712_free_recv_priv(&padapter->recvpriv); 353 _r8712_free_recv_priv(&padapter->recvpriv);
352 mp871xdeinit(padapter); 354 mp871xdeinit(padapter);
353 if (pnetdev) 355 if (pnetdev)
@@ -388,6 +390,7 @@ static int netdev_open(struct net_device *pnetdev)
388{ 390{
389 struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev); 391 struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
390 392
393 mutex_lock(&padapter->mutex_start);
391 if (padapter->bup == false) { 394 if (padapter->bup == false) {
392 padapter->bDriverStopped = false; 395 padapter->bDriverStopped = false;
393 padapter->bSurpriseRemoved = false; 396 padapter->bSurpriseRemoved = false;
@@ -435,11 +438,13 @@ static int netdev_open(struct net_device *pnetdev)
435 /* start driver mlme relation timer */ 438 /* start driver mlme relation timer */
436 start_drv_timers(padapter); 439 start_drv_timers(padapter);
437 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK); 440 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK);
441 mutex_unlock(&padapter->mutex_start);
438 return 0; 442 return 0;
439netdev_open_error: 443netdev_open_error:
440 padapter->bup = false; 444 padapter->bup = false;
441 netif_carrier_off(pnetdev); 445 netif_carrier_off(pnetdev);
442 netif_stop_queue(pnetdev); 446 netif_stop_queue(pnetdev);
447 mutex_unlock(&padapter->mutex_start);
443 return -1; 448 return -1;
444} 449}
445 450
@@ -473,6 +478,9 @@ static int netdev_close(struct net_device *pnetdev)
473 r8712_free_network_queue(padapter); 478 r8712_free_network_queue(padapter);
474 /* The interface is no longer Up: */ 479 /* The interface is no longer Up: */
475 padapter->bup = false; 480 padapter->bup = false;
481 release_firmware(padapter->fw);
482 /* never exit with a firmware callback pending */
483 wait_for_completion(&padapter->rtl8712_fw_ready);
476 return 0; 484 return 0;
477} 485}
478 486
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 665e71838172..d19865a5a50c 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -145,5 +145,6 @@ struct hal_priv {
145}; 145};
146 146
147uint rtl8712_hal_init(struct _adapter *padapter); 147uint rtl8712_hal_init(struct _adapter *padapter);
148int rtl871x_load_fw(struct _adapter *padapter);
148 149
149#endif 150#endif
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 64f569618839..81bde803c59f 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -43,6 +43,7 @@ static void _init_stainfo(struct sta_info *psta)
43 _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv); 43 _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
44 _r8712_init_sta_recv_priv(&psta->sta_recvpriv); 44 _r8712_init_sta_recv_priv(&psta->sta_recvpriv);
45#ifdef CONFIG_R8712_AP 45#ifdef CONFIG_R8712_AP
46 _init_listhead(&psta->asoc_list);
46 _init_listhead(&psta->auth_list); 47 _init_listhead(&psta->auth_list);
47#endif 48#endif
48} 49}
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 5385da2e9cdb..9bade184883b 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
89 {USB_DEVICE(0x0DF6, 0x0045)}, 89 {USB_DEVICE(0x0DF6, 0x0045)},
90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */ 90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
91 {USB_DEVICE(0x0DF6, 0x004B)}, 91 {USB_DEVICE(0x0DF6, 0x004B)},
92 {USB_DEVICE(0x0DF6, 0x005B)},
92 {USB_DEVICE(0x0DF6, 0x005D)}, 93 {USB_DEVICE(0x0DF6, 0x005D)},
93 {USB_DEVICE(0x0DF6, 0x0063)}, 94 {USB_DEVICE(0x0DF6, 0x0063)},
94 /* Sweex */ 95 /* Sweex */
@@ -389,6 +390,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
389 pdvobjpriv = &padapter->dvobjpriv; 390 pdvobjpriv = &padapter->dvobjpriv;
390 pdvobjpriv->padapter = padapter; 391 pdvobjpriv->padapter = padapter;
391 padapter->dvobjpriv.pusbdev = udev; 392 padapter->dvobjpriv.pusbdev = udev;
393 padapter->pusb_intf = pusb_intf;
392 usb_set_intfdata(pusb_intf, pnetdev); 394 usb_set_intfdata(pusb_intf, pnetdev);
393 SET_NETDEV_DEV(pnetdev, &pusb_intf->dev); 395 SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
394 /* step 2. */ 396 /* step 2. */
@@ -595,10 +597,11 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
595 "%pM\n", mac); 597 "%pM\n", mac);
596 memcpy(pnetdev->dev_addr, mac, ETH_ALEN); 598 memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
597 } 599 }
598 /* step 6. Tell the network stack we exist */ 600 /* step 6. Load the firmware asynchronously */
599 if (register_netdev(pnetdev) != 0) 601 if (rtl871x_load_fw(padapter))
600 goto error; 602 goto error;
601 spin_lock_init(&padapter->lockRxFF0Filter); 603 spin_lock_init(&padapter->lockRxFF0Filter);
604 mutex_init(&padapter->mutex_start);
602 return 0; 605 return 0;
603error: 606error:
604 usb_put_dev(udev); 607 usb_put_dev(udev);
@@ -629,7 +632,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
629 flush_scheduled_work(); 632 flush_scheduled_work();
630 udelay(1); 633 udelay(1);
631 /*Stop driver mlme relation timer */ 634 /*Stop driver mlme relation timer */
632 r8712_stop_drv_timers(padapter); 635 if (padapter->fw_found)
636 r8712_stop_drv_timers(padapter);
633 r871x_dev_unload(padapter); 637 r871x_dev_unload(padapter);
634 r8712_free_drv_sw(padapter); 638 r8712_free_drv_sw(padapter);
635 } 639 }
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index e1c4492a7105..dde559d06c43 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -1046,8 +1046,6 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1046 1046
1047 /* Free the driver's device context: */ 1047 /* Free the driver's device context: */
1048 kfree(drv_datap->base_img); 1048 kfree(drv_datap->base_img);
1049 kfree(drv_datap);
1050 dev_set_drvdata(bridge, NULL);
1051 kfree((void *)dev_ctxt); 1049 kfree((void *)dev_ctxt);
1052 return status; 1050 return status;
1053} 1051}
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 76cfc6edecd9..385740bad0de 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -410,6 +410,9 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
410 DBC_ASSERT(ret == true); 410 DBC_ASSERT(ret == true);
411 } 411 }
412 412
413 kfree(drv_datap);
414 dev_set_drvdata(bridge, NULL);
415
413func_cont: 416func_cont:
414 mem_ext_phys_pool_release(); 417 mem_ext_phys_pool_release();
415 418
@@ -500,35 +503,42 @@ static int bridge_open(struct inode *ip, struct file *filp)
500 } 503 }
501#endif 504#endif
502 pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL); 505 pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
503 if (pr_ctxt) { 506 if (!pr_ctxt)
504 pr_ctxt->res_state = PROC_RES_ALLOCATED; 507 return -ENOMEM;
505 spin_lock_init(&pr_ctxt->dmm_map_lock); 508
506 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); 509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
507 spin_lock_init(&pr_ctxt->dmm_rsv_lock); 510 spin_lock_init(&pr_ctxt->dmm_map_lock);
508 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); 511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
509 512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
510 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
511 if (pr_ctxt->node_id) {
512 idr_init(pr_ctxt->node_id);
513 } else {
514 status = -ENOMEM;
515 goto err;
516 }
517 514
518 pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
519 if (pr_ctxt->stream_id) 516 if (!pr_ctxt->node_id) {
520 idr_init(pr_ctxt->stream_id);
521 else
522 status = -ENOMEM;
523 } else {
524 status = -ENOMEM; 517 status = -ENOMEM;
518 goto err1;
525 } 519 }
526err: 520
521 idr_init(pr_ctxt->node_id);
522
523 pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
524 if (!pr_ctxt->stream_id) {
525 status = -ENOMEM;
526 goto err2;
527 }
528
529 idr_init(pr_ctxt->stream_id);
530
527 filp->private_data = pr_ctxt; 531 filp->private_data = pr_ctxt;
532
528#ifdef CONFIG_TIDSPBRIDGE_RECOVERY 533#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
529 if (!status) 534 atomic_inc(&bridge_cref);
530 atomic_inc(&bridge_cref);
531#endif 535#endif
536 return 0;
537
538err2:
539 kfree(pr_ctxt->node_id);
540err1:
541 kfree(pr_ctxt);
532 return status; 542 return status;
533} 543}
534 544
@@ -550,6 +560,8 @@ static int bridge_release(struct inode *ip, struct file *filp)
550 flush_signals(current); 560 flush_signals(current);
551 drv_remove_all_resources(pr_ctxt); 561 drv_remove_all_resources(pr_ctxt);
552 proc_detach(pr_ctxt); 562 proc_detach(pr_ctxt);
563 kfree(pr_ctxt->node_id);
564 kfree(pr_ctxt->stream_id);
553 kfree(pr_ctxt); 565 kfree(pr_ctxt);
554 566
555 filp->private_data = NULL; 567 filp->private_data = NULL;
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index 2d6317850064..705a9e530a19 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -246,8 +246,9 @@ static int __init usbip_host_init(void)
246{ 246{
247 int ret; 247 int ret;
248 248
249 stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN); 249 init_busid_table();
250 250
251 stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN);
251 if (!stub_priv_cache) { 252 if (!stub_priv_cache) {
252 pr_err("kmem_cache_create failed\n"); 253 pr_err("kmem_cache_create failed\n");
253 return -ENOMEM; 254 return -ENOMEM;
@@ -266,7 +267,6 @@ static int __init usbip_host_init(void)
266 goto err_create_file; 267 goto err_create_file;
267 } 268 }
268 269
269 init_busid_table();
270 pr_info(DRIVER_DESC " v" USBIP_VERSION "\n"); 270 pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
271 return ret; 271 return ret;
272 272
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 642840c612ac..ef7c52bb1df9 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -358,8 +358,8 @@ static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
358 if (unlikely(zbpg == NULL)) 358 if (unlikely(zbpg == NULL))
359 goto out; 359 goto out;
360 /* ok, have a page, now compress the data before taking locks */ 360 /* ok, have a page, now compress the data before taking locks */
361 spin_lock(&zbpg->lock);
362 spin_lock(&zbud_budlists_spinlock); 361 spin_lock(&zbud_budlists_spinlock);
362 spin_lock(&zbpg->lock);
363 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list); 363 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
364 zbud_unbuddied[nchunks].count++; 364 zbud_unbuddied[nchunks].count++;
365 zh = &zbpg->buddy[0]; 365 zh = &zbpg->buddy[0];
@@ -389,12 +389,11 @@ init_zh:
389 zh->oid = *oid; 389 zh->oid = *oid;
390 zh->pool_id = pool_id; 390 zh->pool_id = pool_id;
391 zh->client_id = client_id; 391 zh->client_id = client_id;
392 /* can wait to copy the data until the list locks are dropped */
393 spin_unlock(&zbud_budlists_spinlock);
394
395 to = zbud_data(zh, size); 392 to = zbud_data(zh, size);
396 memcpy(to, cdata, size); 393 memcpy(to, cdata, size);
397 spin_unlock(&zbpg->lock); 394 spin_unlock(&zbpg->lock);
395 spin_unlock(&zbud_budlists_spinlock);
396
398 zbud_cumul_chunk_counts[nchunks]++; 397 zbud_cumul_chunk_counts[nchunks]++;
399 atomic_inc(&zcache_zbud_curr_zpages); 398 atomic_inc(&zcache_zbud_curr_zpages);
400 zcache_zbud_cumul_zpages++; 399 zcache_zbud_cumul_zpages++;
@@ -655,8 +654,8 @@ static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
655 */ 654 */
656static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; 655static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
657 656
658static unsigned long zv_curr_dist_counts[NCHUNKS]; 657static atomic_t zv_curr_dist_counts[NCHUNKS];
659static unsigned long zv_cumul_dist_counts[NCHUNKS]; 658static atomic_t zv_cumul_dist_counts[NCHUNKS];
660 659
661static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, 660static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
662 struct tmem_oid *oid, uint32_t index, 661 struct tmem_oid *oid, uint32_t index,
@@ -675,8 +674,8 @@ static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
675 &page, &offset, ZCACHE_GFP_MASK); 674 &page, &offset, ZCACHE_GFP_MASK);
676 if (unlikely(ret)) 675 if (unlikely(ret))
677 goto out; 676 goto out;
678 zv_curr_dist_counts[chunks]++; 677 atomic_inc(&zv_curr_dist_counts[chunks]);
679 zv_cumul_dist_counts[chunks]++; 678 atomic_inc(&zv_cumul_dist_counts[chunks]);
680 zv = kmap_atomic(page, KM_USER0) + offset; 679 zv = kmap_atomic(page, KM_USER0) + offset;
681 zv->index = index; 680 zv->index = index;
682 zv->oid = *oid; 681 zv->oid = *oid;
@@ -698,7 +697,7 @@ static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
698 697
699 ASSERT_SENTINEL(zv, ZVH); 698 ASSERT_SENTINEL(zv, ZVH);
700 BUG_ON(chunks >= NCHUNKS); 699 BUG_ON(chunks >= NCHUNKS);
701 zv_curr_dist_counts[chunks]--; 700 atomic_dec(&zv_curr_dist_counts[chunks]);
702 size -= sizeof(*zv); 701 size -= sizeof(*zv);
703 BUG_ON(size == 0); 702 BUG_ON(size == 0);
704 INVERT_SENTINEL(zv, ZVH); 703 INVERT_SENTINEL(zv, ZVH);
@@ -738,7 +737,7 @@ static int zv_curr_dist_counts_show(char *buf)
738 char *p = buf; 737 char *p = buf;
739 738
740 for (i = 0; i < NCHUNKS; i++) { 739 for (i = 0; i < NCHUNKS; i++) {
741 n = zv_curr_dist_counts[i]; 740 n = atomic_read(&zv_curr_dist_counts[i]);
742 p += sprintf(p, "%lu ", n); 741 p += sprintf(p, "%lu ", n);
743 chunks += n; 742 chunks += n;
744 sum_total_chunks += i * n; 743 sum_total_chunks += i * n;
@@ -754,7 +753,7 @@ static int zv_cumul_dist_counts_show(char *buf)
754 char *p = buf; 753 char *p = buf;
755 754
756 for (i = 0; i < NCHUNKS; i++) { 755 for (i = 0; i < NCHUNKS; i++) {
757 n = zv_cumul_dist_counts[i]; 756 n = atomic_read(&zv_cumul_dist_counts[i]);
758 p += sprintf(p, "%lu ", n); 757 p += sprintf(p, "%lu ", n);
759 chunks += n; 758 chunks += n;
760 sum_total_chunks += i * n; 759 sum_total_chunks += i * n;
@@ -1782,9 +1781,9 @@ static int zcache_frontswap_poolid = -1;
1782 * Swizzling increases objects per swaptype, increasing tmem concurrency 1781 * Swizzling increases objects per swaptype, increasing tmem concurrency
1783 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS 1782 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1784 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from 1783 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1785 * frontswap_get_page() 1784 * frontswap_get_page(), but has side-effects. Hence using 8.
1786 */ 1785 */
1787#define SWIZ_BITS 27 1786#define SWIZ_BITS 8
1788#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) 1787#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1789#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) 1788#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1790#define iswiz(_ind) (_ind >> SWIZ_BITS) 1789#define iswiz(_ind) (_ind >> SWIZ_BITS)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index ac44af165b27..44262908def5 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1061,7 +1061,7 @@ attach_cmd:
1061 if (ret < 0) 1061 if (ret < 0)
1062 return iscsit_add_reject_from_cmd( 1062 return iscsit_add_reject_from_cmd(
1063 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1063 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1064 1, 1, buf, cmd); 1064 1, 0, buf, cmd);
1065 /* 1065 /*
1066 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1066 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1067 * the Immediate Bit is not set, and no Immediate 1067 * the Immediate Bit is not set, and no Immediate
@@ -3164,6 +3164,30 @@ static int iscsit_send_task_mgt_rsp(
3164 return 0; 3164 return 0;
3165} 3165}
3166 3166
3167static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3168{
3169 bool ret = false;
3170
3171 if (np->np_sockaddr.ss_family == AF_INET6) {
3172 const struct sockaddr_in6 sin6 = {
3173 .sin6_addr = IN6ADDR_ANY_INIT };
3174 struct sockaddr_in6 *sock_in6 =
3175 (struct sockaddr_in6 *)&np->np_sockaddr;
3176
3177 if (!memcmp(sock_in6->sin6_addr.s6_addr,
3178 sin6.sin6_addr.s6_addr, 16))
3179 ret = true;
3180 } else {
3181 struct sockaddr_in * sock_in =
3182 (struct sockaddr_in *)&np->np_sockaddr;
3183
3184 if (sock_in->sin_addr.s_addr == INADDR_ANY)
3185 ret = true;
3186 }
3187
3188 return ret;
3189}
3190
3167static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3191static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3168{ 3192{
3169 char *payload = NULL; 3193 char *payload = NULL;
@@ -3213,12 +3237,17 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3213 spin_lock(&tpg->tpg_np_lock); 3237 spin_lock(&tpg->tpg_np_lock);
3214 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3238 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3215 tpg_np_list) { 3239 tpg_np_list) {
3240 struct iscsi_np *np = tpg_np->tpg_np;
3241 bool inaddr_any = iscsit_check_inaddr_any(np);
3242
3216 len = sprintf(buf, "TargetAddress=" 3243 len = sprintf(buf, "TargetAddress="
3217 "%s%s%s:%hu,%hu", 3244 "%s%s%s:%hu,%hu",
3218 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3245 (np->np_sockaddr.ss_family == AF_INET6) ?
3219 "[" : "", tpg_np->tpg_np->np_ip, 3246 "[" : "", (inaddr_any == false) ?
3220 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3247 np->np_ip : conn->local_ip,
3221 "]" : "", tpg_np->tpg_np->np_port, 3248 (np->np_sockaddr.ss_family == AF_INET6) ?
3249 "]" : "", (inaddr_any == false) ?
3250 np->np_port : conn->local_port,
3222 tpg->tpgt); 3251 tpg->tpgt);
3223 len += 1; 3252 len += 1;
3224 3253
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 3468caab47a2..6b35b37988ed 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/configfs.h> 22#include <linux/configfs.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/inet.h>
24#include <target/target_core_base.h> 25#include <target/target_core_base.h>
25#include <target/target_core_fabric.h> 26#include <target/target_core_fabric.h>
26#include <target/target_core_fabric_configfs.h> 27#include <target/target_core_fabric_configfs.h>
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index f1a02dad05a0..0ec3b77a0c27 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -508,6 +508,7 @@ struct iscsi_conn {
508 u16 cid; 508 u16 cid;
509 /* Remote TCP Port */ 509 /* Remote TCP Port */
510 u16 login_port; 510 u16 login_port;
511 u16 local_port;
511 int net_size; 512 int net_size;
512 u32 auth_id; 513 u32 auth_id;
513#define CONNFLAG_SCTP_STRUCT_FILE 0x01 514#define CONNFLAG_SCTP_STRUCT_FILE 0x01
@@ -527,6 +528,7 @@ struct iscsi_conn {
527 unsigned char bad_hdr[ISCSI_HDR_LEN]; 528 unsigned char bad_hdr[ISCSI_HDR_LEN];
528#define IPV6_ADDRESS_SPACE 48 529#define IPV6_ADDRESS_SPACE 48
529 unsigned char login_ip[IPV6_ADDRESS_SPACE]; 530 unsigned char login_ip[IPV6_ADDRESS_SPACE];
531 unsigned char local_ip[IPV6_ADDRESS_SPACE];
530 int conn_usage_count; 532 int conn_usage_count;
531 int conn_waiting_on_uc; 533 int conn_waiting_on_uc;
532 atomic_t check_immediate_queue; 534 atomic_t check_immediate_queue;
@@ -561,8 +563,8 @@ struct iscsi_conn {
561 struct hash_desc conn_tx_hash; 563 struct hash_desc conn_tx_hash;
562 /* Used for scheduling TX and RX connection kthreads */ 564 /* Used for scheduling TX and RX connection kthreads */
563 cpumask_var_t conn_cpumask; 565 cpumask_var_t conn_cpumask;
564 int conn_rx_reset_cpumask:1; 566 unsigned int conn_rx_reset_cpumask:1;
565 int conn_tx_reset_cpumask:1; 567 unsigned int conn_tx_reset_cpumask:1;
566 /* list_head of struct iscsi_cmd for this connection */ 568 /* list_head of struct iscsi_cmd for this connection */
567 struct list_head conn_cmd_list; 569 struct list_head conn_cmd_list;
568 struct list_head immed_queue_list; 570 struct list_head immed_queue_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 255c0d67e898..27901e37c125 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1238,7 +1238,7 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
1238{ 1238{
1239 struct iscsi_conn *conn = cmd->conn; 1239 struct iscsi_conn *conn = cmd->conn;
1240 struct iscsi_session *sess = conn->sess; 1240 struct iscsi_session *sess = conn->sess;
1241 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1241 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1242 1242
1243 spin_lock_bh(&cmd->dataout_timeout_lock); 1243 spin_lock_bh(&cmd->dataout_timeout_lock);
1244 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { 1244 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1261,7 +1261,7 @@ void iscsit_start_dataout_timer(
1261 struct iscsi_conn *conn) 1261 struct iscsi_conn *conn)
1262{ 1262{
1263 struct iscsi_session *sess = conn->sess; 1263 struct iscsi_session *sess = conn->sess;
1264 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1264 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1265 1265
1266 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING) 1266 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
1267 return; 1267 return;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 373b0cc6abd8..38cb7ce8469e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -615,8 +615,8 @@ static int iscsi_post_login_handler(
615 } 615 }
616 616
617 pr_debug("iSCSI Login successful on CID: %hu from %s to" 617 pr_debug("iSCSI Login successful on CID: %hu from %s to"
618 " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip, 618 " %s:%hu,%hu\n", conn->cid, conn->login_ip,
619 np->np_port, tpg->tpgt); 619 conn->local_ip, conn->local_port, tpg->tpgt);
620 620
621 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 621 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
622 atomic_inc(&sess->nconn); 622 atomic_inc(&sess->nconn);
@@ -658,7 +658,8 @@ static int iscsi_post_login_handler(
658 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 658 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
659 659
660 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n", 660 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
661 conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt); 661 conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
662 tpg->tpgt);
662 663
663 spin_lock_bh(&sess->conn_lock); 664 spin_lock_bh(&sess->conn_lock);
664 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 665 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
@@ -841,6 +842,14 @@ int iscsi_target_setup_login_socket(
841 goto fail; 842 goto fail;
842 } 843 }
843 844
845 ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
846 (char *)&opt, sizeof(opt));
847 if (ret < 0) {
848 pr_err("kernel_setsockopt() for IP_FREEBIND"
849 " failed\n");
850 goto fail;
851 }
852
844 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len); 853 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
845 if (ret < 0) { 854 if (ret < 0) {
846 pr_err("kernel_bind() failed: %d\n", ret); 855 pr_err("kernel_bind() failed: %d\n", ret);
@@ -1020,6 +1029,18 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1020 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1029 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1021 &sock_in6.sin6_addr.in6_u); 1030 &sock_in6.sin6_addr.in6_u);
1022 conn->login_port = ntohs(sock_in6.sin6_port); 1031 conn->login_port = ntohs(sock_in6.sin6_port);
1032
1033 if (conn->sock->ops->getname(conn->sock,
1034 (struct sockaddr *)&sock_in6, &err, 0) < 0) {
1035 pr_err("sock_ops->getname() failed.\n");
1036 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1037 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1038 goto new_sess_out;
1039 }
1040 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
1041 &sock_in6.sin6_addr.in6_u);
1042 conn->local_port = ntohs(sock_in6.sin6_port);
1043
1023 } else { 1044 } else {
1024 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1045 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1025 1046
@@ -1032,6 +1053,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1032 } 1053 }
1033 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); 1054 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
1034 conn->login_port = ntohs(sock_in.sin_port); 1055 conn->login_port = ntohs(sock_in.sin_port);
1056
1057 if (conn->sock->ops->getname(conn->sock,
1058 (struct sockaddr *)&sock_in, &err, 0) < 0) {
1059 pr_err("sock_ops->getname() failed.\n");
1060 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1061 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1062 goto new_sess_out;
1063 }
1064 sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
1065 conn->local_port = ntohs(sock_in.sin_port);
1035 } 1066 }
1036 1067
1037 conn->network_transport = np->np_network_transport; 1068 conn->network_transport = np->np_network_transport;
@@ -1039,7 +1070,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1039 pr_debug("Received iSCSI login request from %s on %s Network" 1070 pr_debug("Received iSCSI login request from %s on %s Network"
1040 " Portal %s:%hu\n", conn->login_ip, 1071 " Portal %s:%hu\n", conn->login_ip,
1041 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", 1072 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
1042 np->np_ip, np->np_port); 1073 conn->local_ip, conn->local_port);
1043 1074
1044 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1075 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1045 conn->conn_state = TARG_CONN_STATE_IN_LOGIN; 1076 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a05ca1c4f01c..11287e1ece13 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -849,6 +849,17 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
849 case ISCSI_OP_SCSI_TMFUNC: 849 case ISCSI_OP_SCSI_TMFUNC:
850 transport_generic_free_cmd(&cmd->se_cmd, 1); 850 transport_generic_free_cmd(&cmd->se_cmd, 1);
851 break; 851 break;
852 case ISCSI_OP_REJECT:
853 /*
854 * Handle special case for REJECT when iscsi_add_reject*() has
855 * overwritten the original iscsi_opcode assignment, and the
856 * associated cmd->se_cmd needs to be released.
857 */
858 if (cmd->se_cmd.se_tfo != NULL) {
859 transport_generic_free_cmd(&cmd->se_cmd, 1);
860 break;
861 }
862 /* Fall-through */
852 default: 863 default:
853 iscsit_release_cmd(cmd); 864 iscsit_release_cmd(cmd);
854 break; 865 break;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 1b1edd14f4bf..01a2691dfb47 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -78,7 +78,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
78 return -EINVAL; 78 return -EINVAL;
79 } 79 }
80 80
81 buf = transport_kmap_first_data_page(cmd); 81 buf = transport_kmap_data_sg(cmd);
82 82
83 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 83 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
84 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 84 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
@@ -163,7 +163,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
163 buf[2] = ((rd_len >> 8) & 0xff); 163 buf[2] = ((rd_len >> 8) & 0xff);
164 buf[3] = (rd_len & 0xff); 164 buf[3] = (rd_len & 0xff);
165 165
166 transport_kunmap_first_data_page(cmd); 166 transport_kunmap_data_sg(cmd);
167 167
168 task->task_scsi_status = GOOD; 168 task->task_scsi_status = GOOD;
169 transport_complete_task(task, 1); 169 transport_complete_task(task, 1);
@@ -194,7 +194,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
194 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 194 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 buf = transport_kmap_first_data_page(cmd); 197 buf = transport_kmap_data_sg(cmd);
198 198
199 /* 199 /*
200 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed 200 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -351,7 +351,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
351 } 351 }
352 352
353out: 353out:
354 transport_kunmap_first_data_page(cmd); 354 transport_kunmap_data_sg(cmd);
355 task->task_scsi_status = GOOD; 355 task->task_scsi_status = GOOD;
356 transport_complete_task(task, 1); 356 transport_complete_task(task, 1);
357 return 0; 357 return 0;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 2f2235edefff..f3d71fa88a28 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -83,7 +83,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 buf = transport_kmap_first_data_page(cmd); 86 buf = transport_kmap_data_sg(cmd);
87 87
88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) { 88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
89 buf[0] = 0x3f; /* Not connected */ 89 buf[0] = 0x3f; /* Not connected */
@@ -134,7 +134,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
134 buf[4] = 31; /* Set additional length to 31 */ 134 buf[4] = 31; /* Set additional length to 31 */
135 135
136out: 136out:
137 transport_kunmap_first_data_page(cmd); 137 transport_kunmap_data_sg(cmd);
138 return 0; 138 return 0;
139} 139}
140 140
@@ -698,6 +698,13 @@ int target_emulate_inquiry(struct se_task *task)
698 int p, ret; 698 int p, ret;
699 699
700 if (!(cdb[1] & 0x1)) { 700 if (!(cdb[1] & 0x1)) {
701 if (cdb[2]) {
702 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
703 cdb[2]);
704 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
705 return -EINVAL;
706 }
707
701 ret = target_emulate_inquiry_std(cmd); 708 ret = target_emulate_inquiry_std(cmd);
702 goto out; 709 goto out;
703 } 710 }
@@ -716,7 +723,7 @@ int target_emulate_inquiry(struct se_task *task)
716 return -EINVAL; 723 return -EINVAL;
717 } 724 }
718 725
719 buf = transport_kmap_first_data_page(cmd); 726 buf = transport_kmap_data_sg(cmd);
720 727
721 buf[0] = dev->transport->get_device_type(dev); 728 buf[0] = dev->transport->get_device_type(dev);
722 729
@@ -729,11 +736,11 @@ int target_emulate_inquiry(struct se_task *task)
729 } 736 }
730 737
731 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 738 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
732 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 739 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
733 ret = -EINVAL; 740 ret = -EINVAL;
734 741
735out_unmap: 742out_unmap:
736 transport_kunmap_first_data_page(cmd); 743 transport_kunmap_data_sg(cmd);
737out: 744out:
738 if (!ret) { 745 if (!ret) {
739 task->task_scsi_status = GOOD; 746 task->task_scsi_status = GOOD;
@@ -755,7 +762,7 @@ int target_emulate_readcapacity(struct se_task *task)
755 else 762 else
756 blocks = (u32)blocks_long; 763 blocks = (u32)blocks_long;
757 764
758 buf = transport_kmap_first_data_page(cmd); 765 buf = transport_kmap_data_sg(cmd);
759 766
760 buf[0] = (blocks >> 24) & 0xff; 767 buf[0] = (blocks >> 24) & 0xff;
761 buf[1] = (blocks >> 16) & 0xff; 768 buf[1] = (blocks >> 16) & 0xff;
@@ -771,7 +778,7 @@ int target_emulate_readcapacity(struct se_task *task)
771 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 778 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
772 put_unaligned_be32(0xFFFFFFFF, &buf[0]); 779 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
773 780
774 transport_kunmap_first_data_page(cmd); 781 transport_kunmap_data_sg(cmd);
775 782
776 task->task_scsi_status = GOOD; 783 task->task_scsi_status = GOOD;
777 transport_complete_task(task, 1); 784 transport_complete_task(task, 1);
@@ -785,7 +792,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
785 unsigned char *buf; 792 unsigned char *buf;
786 unsigned long long blocks = dev->transport->get_blocks(dev); 793 unsigned long long blocks = dev->transport->get_blocks(dev);
787 794
788 buf = transport_kmap_first_data_page(cmd); 795 buf = transport_kmap_data_sg(cmd);
789 796
790 buf[0] = (blocks >> 56) & 0xff; 797 buf[0] = (blocks >> 56) & 0xff;
791 buf[1] = (blocks >> 48) & 0xff; 798 buf[1] = (blocks >> 48) & 0xff;
@@ -806,7 +813,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
806 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 813 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
807 buf[14] = 0x80; 814 buf[14] = 0x80;
808 815
809 transport_kunmap_first_data_page(cmd); 816 transport_kunmap_data_sg(cmd);
810 817
811 task->task_scsi_status = GOOD; 818 task->task_scsi_status = GOOD;
812 transport_complete_task(task, 1); 819 transport_complete_task(task, 1);
@@ -1019,9 +1026,9 @@ int target_emulate_modesense(struct se_task *task)
1019 offset = cmd->data_length; 1026 offset = cmd->data_length;
1020 } 1027 }
1021 1028
1022 rbuf = transport_kmap_first_data_page(cmd); 1029 rbuf = transport_kmap_data_sg(cmd);
1023 memcpy(rbuf, buf, offset); 1030 memcpy(rbuf, buf, offset);
1024 transport_kunmap_first_data_page(cmd); 1031 transport_kunmap_data_sg(cmd);
1025 1032
1026 task->task_scsi_status = GOOD; 1033 task->task_scsi_status = GOOD;
1027 transport_complete_task(task, 1); 1034 transport_complete_task(task, 1);
@@ -1043,7 +1050,7 @@ int target_emulate_request_sense(struct se_task *task)
1043 return -ENOSYS; 1050 return -ENOSYS;
1044 } 1051 }
1045 1052
1046 buf = transport_kmap_first_data_page(cmd); 1053 buf = transport_kmap_data_sg(cmd);
1047 1054
1048 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1055 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1049 /* 1056 /*
@@ -1051,11 +1058,8 @@ int target_emulate_request_sense(struct se_task *task)
1051 */ 1058 */
1052 buf[0] = 0x70; 1059 buf[0] = 0x70;
1053 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1060 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1054 /* 1061
1055 * Make sure request data length is enough for additional 1062 if (cmd->data_length < 18) {
1056 * sense data.
1057 */
1058 if (cmd->data_length <= 18) {
1059 buf[7] = 0x00; 1063 buf[7] = 0x00;
1060 err = -EINVAL; 1064 err = -EINVAL;
1061 goto end; 1065 goto end;
@@ -1072,11 +1076,8 @@ int target_emulate_request_sense(struct se_task *task)
1072 */ 1076 */
1073 buf[0] = 0x70; 1077 buf[0] = 0x70;
1074 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1078 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1075 /* 1079
1076 * Make sure request data length is enough for additional 1080 if (cmd->data_length < 18) {
1077 * sense data.
1078 */
1079 if (cmd->data_length <= 18) {
1080 buf[7] = 0x00; 1081 buf[7] = 0x00;
1081 err = -EINVAL; 1082 err = -EINVAL;
1082 goto end; 1083 goto end;
@@ -1089,7 +1090,7 @@ int target_emulate_request_sense(struct se_task *task)
1089 } 1090 }
1090 1091
1091end: 1092end:
1092 transport_kunmap_first_data_page(cmd); 1093 transport_kunmap_data_sg(cmd);
1093 task->task_scsi_status = GOOD; 1094 task->task_scsi_status = GOOD;
1094 transport_complete_task(task, 1); 1095 transport_complete_task(task, 1);
1095 return 0; 1096 return 0;
@@ -1123,7 +1124,7 @@ int target_emulate_unmap(struct se_task *task)
1123 dl = get_unaligned_be16(&cdb[0]); 1124 dl = get_unaligned_be16(&cdb[0]);
1124 bd_dl = get_unaligned_be16(&cdb[2]); 1125 bd_dl = get_unaligned_be16(&cdb[2]);
1125 1126
1126 buf = transport_kmap_first_data_page(cmd); 1127 buf = transport_kmap_data_sg(cmd);
1127 1128
1128 ptr = &buf[offset]; 1129 ptr = &buf[offset];
1129 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" 1130 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
@@ -1147,7 +1148,7 @@ int target_emulate_unmap(struct se_task *task)
1147 } 1148 }
1148 1149
1149err: 1150err:
1150 transport_kunmap_first_data_page(cmd); 1151 transport_kunmap_data_sg(cmd);
1151 if (!ret) { 1152 if (!ret) {
1152 task->task_scsi_status = GOOD; 1153 task->task_scsi_status = GOOD;
1153 transport_complete_task(task, 1); 1154 transport_complete_task(task, 1);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0955bb8979fb..6e043eeb1db9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1704,13 +1704,15 @@ static ssize_t target_core_store_dev_alias(
1704 return -EINVAL; 1704 return -EINVAL;
1705 } 1705 }
1706 1706
1707 se_dev->su_dev_flags |= SDF_USING_ALIAS;
1708 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1707 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1709 "%s", page); 1708 "%s", page);
1710 1709 if (!read_bytes)
1710 return -EINVAL;
1711 if (se_dev->se_dev_alias[read_bytes - 1] == '\n') 1711 if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
1712 se_dev->se_dev_alias[read_bytes - 1] = '\0'; 1712 se_dev->se_dev_alias[read_bytes - 1] = '\0';
1713 1713
1714 se_dev->su_dev_flags |= SDF_USING_ALIAS;
1715
1714 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1716 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1715 config_item_name(&hba->hba_group.cg_item), 1717 config_item_name(&hba->hba_group.cg_item),
1716 config_item_name(&se_dev->se_dev_group.cg_item), 1718 config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1753,13 +1755,15 @@ static ssize_t target_core_store_dev_udev_path(
1753 return -EINVAL; 1755 return -EINVAL;
1754 } 1756 }
1755 1757
1756 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1757 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1758 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1758 "%s", page); 1759 "%s", page);
1759 1760 if (!read_bytes)
1761 return -EINVAL;
1760 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') 1762 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
1761 se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; 1763 se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
1762 1764
1765 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1766
1763 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1767 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1764 config_item_name(&hba->hba_group.cg_item), 1768 config_item_name(&hba->hba_group.cg_item),
1765 config_item_name(&se_dev->se_dev_group.cg_item), 1769 config_item_name(&se_dev->se_dev_group.cg_item),
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 0c5992f0d946..edbcabbf85f7 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -320,11 +320,12 @@ int core_free_device_list_for_node(
320void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) 320void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
321{ 321{
322 struct se_dev_entry *deve; 322 struct se_dev_entry *deve;
323 unsigned long flags;
323 324
324 spin_lock_irq(&se_nacl->device_list_lock); 325 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
325 deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; 326 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
326 deve->deve_cmds--; 327 deve->deve_cmds--;
327 spin_unlock_irq(&se_nacl->device_list_lock); 328 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
328} 329}
329 330
330void core_update_device_list_access( 331void core_update_device_list_access(
@@ -656,7 +657,7 @@ int target_report_luns(struct se_task *se_task)
656 unsigned char *buf; 657 unsigned char *buf;
657 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 658 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
658 659
659 buf = transport_kmap_first_data_page(se_cmd); 660 buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
660 661
661 /* 662 /*
662 * If no struct se_session pointer is present, this struct se_cmd is 663 * If no struct se_session pointer is present, this struct se_cmd is
@@ -694,7 +695,7 @@ int target_report_luns(struct se_task *se_task)
694 * See SPC3 r07, page 159. 695 * See SPC3 r07, page 159.
695 */ 696 */
696done: 697done:
697 transport_kunmap_first_data_page(se_cmd); 698 transport_kunmap_data_sg(se_cmd);
698 lun_count *= 8; 699 lun_count *= 8;
699 buf[0] = ((lun_count >> 24) & 0xff); 700 buf[0] = ((lun_count >> 24) & 0xff);
700 buf[1] = ((lun_count >> 16) & 0xff); 701 buf[1] = ((lun_count >> 16) & 0xff);
@@ -1294,24 +1295,26 @@ struct se_lun *core_dev_add_lun(
1294{ 1295{
1295 struct se_lun *lun_p; 1296 struct se_lun *lun_p;
1296 u32 lun_access = 0; 1297 u32 lun_access = 0;
1298 int rc;
1297 1299
1298 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1300 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1299 pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1301 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1300 atomic_read(&dev->dev_access_obj.obj_access_count)); 1302 atomic_read(&dev->dev_access_obj.obj_access_count));
1301 return NULL; 1303 return ERR_PTR(-EACCES);
1302 } 1304 }
1303 1305
1304 lun_p = core_tpg_pre_addlun(tpg, lun); 1306 lun_p = core_tpg_pre_addlun(tpg, lun);
1305 if ((IS_ERR(lun_p)) || !lun_p) 1307 if (IS_ERR(lun_p))
1306 return NULL; 1308 return lun_p;
1307 1309
1308 if (dev->dev_flags & DF_READ_ONLY) 1310 if (dev->dev_flags & DF_READ_ONLY)
1309 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1311 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1310 else 1312 else
1311 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 1313 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1312 1314
1313 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1315 rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
1314 return NULL; 1316 if (rc < 0)
1317 return ERR_PTR(rc);
1315 1318
1316 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1319 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1317 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1320 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -1348,11 +1351,10 @@ int core_dev_del_lun(
1348 u32 unpacked_lun) 1351 u32 unpacked_lun)
1349{ 1352{
1350 struct se_lun *lun; 1353 struct se_lun *lun;
1351 int ret = 0;
1352 1354
1353 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1355 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1354 if (!lun) 1356 if (IS_ERR(lun))
1355 return ret; 1357 return PTR_ERR(lun);
1356 1358
1357 core_tpg_post_dellun(tpg, lun); 1359 core_tpg_post_dellun(tpg, lun);
1358 1360
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 4f77cce22646..9a2ce11e1a6e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -766,9 +766,9 @@ static int target_fabric_port_link(
766 766
767 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, 767 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
768 lun->unpacked_lun); 768 lun->unpacked_lun);
769 if (IS_ERR(lun_p) || !lun_p) { 769 if (IS_ERR(lun_p)) {
770 pr_err("core_dev_add_lun() failed\n"); 770 pr_err("core_dev_add_lun() failed\n");
771 ret = -EINVAL; 771 ret = PTR_ERR(lun_p);
772 goto out; 772 goto out;
773 } 773 }
774 774
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index cc8e6b58ef20..8572eae62da7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -129,7 +129,7 @@ static struct se_device *iblock_create_virtdevice(
129 /* 129 /*
130 * These settings need to be made tunable.. 130 * These settings need to be made tunable..
131 */ 131 */
132 ib_dev->ibd_bio_set = bioset_create(32, 64); 132 ib_dev->ibd_bio_set = bioset_create(32, 0);
133 if (!ib_dev->ibd_bio_set) { 133 if (!ib_dev->ibd_bio_set) {
134 pr_err("IBLOCK: Unable to create bioset()\n"); 134 pr_err("IBLOCK: Unable to create bioset()\n");
135 return ERR_PTR(-ENOMEM); 135 return ERR_PTR(-ENOMEM);
@@ -181,7 +181,7 @@ static struct se_device *iblock_create_virtdevice(
181 */ 181 */
182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
183 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 183 dev->se_sub_dev->se_dev_attrib.unmap_granularity =
184 q->limits.discard_granularity; 184 q->limits.discard_granularity >> 9;
185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
186 q->limits.discard_alignment; 186 q->limits.discard_alignment;
187 187
@@ -488,6 +488,13 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
488 struct iblock_req *ib_req = IBLOCK_REQ(task); 488 struct iblock_req *ib_req = IBLOCK_REQ(task);
489 struct bio *bio; 489 struct bio *bio;
490 490
491 /*
492 * Only allocate as many vector entries as the bio code allows us to,
493 * we'll loop later on until we have handled the whole request.
494 */
495 if (sg_num > BIO_MAX_PAGES)
496 sg_num = BIO_MAX_PAGES;
497
491 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 498 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
492 if (!bio) { 499 if (!bio) {
493 pr_err("Unable to allocate memory for bio\n"); 500 pr_err("Unable to allocate memory for bio\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 26f135e94f6e..45001364788a 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -90,7 +90,7 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
90struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 90struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
91int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, 91int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
92 u32, void *); 92 u32, void *);
93struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *); 93struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
94int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 94int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
95 95
96/* target_core_transport.c */ 96/* target_core_transport.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 429ad7291664..b7c779389eea 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -478,6 +478,7 @@ static int core_scsi3_pr_seq_non_holder(
478 case READ_MEDIA_SERIAL_NUMBER: 478 case READ_MEDIA_SERIAL_NUMBER:
479 case REPORT_LUNS: 479 case REPORT_LUNS:
480 case REQUEST_SENSE: 480 case REQUEST_SENSE:
481 case PERSISTENT_RESERVE_IN:
481 ret = 0; /*/ Allowed CDBs */ 482 ret = 0; /*/ Allowed CDBs */
482 break; 483 break;
483 default: 484 default:
@@ -1534,7 +1535,7 @@ static int core_scsi3_decode_spec_i_port(
1534 tidh_new->dest_local_nexus = 1; 1535 tidh_new->dest_local_nexus = 1;
1535 list_add_tail(&tidh_new->dest_list, &tid_dest_list); 1536 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1536 1537
1537 buf = transport_kmap_first_data_page(cmd); 1538 buf = transport_kmap_data_sg(cmd);
1538 /* 1539 /*
1539 * For a PERSISTENT RESERVE OUT specify initiator ports payload, 1540 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
1540 * first extract TransportID Parameter Data Length, and make sure 1541 * first extract TransportID Parameter Data Length, and make sure
@@ -1785,7 +1786,7 @@ static int core_scsi3_decode_spec_i_port(
1785 1786
1786 } 1787 }
1787 1788
1788 transport_kunmap_first_data_page(cmd); 1789 transport_kunmap_data_sg(cmd);
1789 1790
1790 /* 1791 /*
1791 * Go ahead and create a registrations from tid_dest_list for the 1792 * Go ahead and create a registrations from tid_dest_list for the
@@ -1833,7 +1834,7 @@ static int core_scsi3_decode_spec_i_port(
1833 1834
1834 return 0; 1835 return 0;
1835out: 1836out:
1836 transport_kunmap_first_data_page(cmd); 1837 transport_kunmap_data_sg(cmd);
1837 /* 1838 /*
1838 * For the failure case, release everything from tid_dest_list 1839 * For the failure case, release everything from tid_dest_list
1839 * including *dest_pr_reg and the configfs dependances.. 1840 * including *dest_pr_reg and the configfs dependances..
@@ -3120,7 +3121,7 @@ static int core_scsi3_pro_preempt(
3120 if (!calling_it_nexus) 3121 if (!calling_it_nexus)
3121 core_scsi3_ua_allocate(pr_reg_nacl, 3122 core_scsi3_ua_allocate(pr_reg_nacl,
3122 pr_res_mapped_lun, 0x2A, 3123 pr_res_mapped_lun, 0x2A,
3123 ASCQ_2AH_RESERVATIONS_PREEMPTED); 3124 ASCQ_2AH_REGISTRATIONS_PREEMPTED);
3124 } 3125 }
3125 spin_unlock(&pr_tmpl->registration_lock); 3126 spin_unlock(&pr_tmpl->registration_lock);
3126 /* 3127 /*
@@ -3233,7 +3234,7 @@ static int core_scsi3_pro_preempt(
3233 * additional sense code set to REGISTRATIONS PREEMPTED; 3234 * additional sense code set to REGISTRATIONS PREEMPTED;
3234 */ 3235 */
3235 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3236 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
3236 ASCQ_2AH_RESERVATIONS_PREEMPTED); 3237 ASCQ_2AH_REGISTRATIONS_PREEMPTED);
3237 } 3238 }
3238 spin_unlock(&pr_tmpl->registration_lock); 3239 spin_unlock(&pr_tmpl->registration_lock);
3239 /* 3240 /*
@@ -3410,14 +3411,14 @@ static int core_scsi3_emulate_pro_register_and_move(
3410 * will be moved to for the TransportID containing SCSI initiator WWN 3411 * will be moved to for the TransportID containing SCSI initiator WWN
3411 * information. 3412 * information.
3412 */ 3413 */
3413 buf = transport_kmap_first_data_page(cmd); 3414 buf = transport_kmap_data_sg(cmd);
3414 rtpi = (buf[18] & 0xff) << 8; 3415 rtpi = (buf[18] & 0xff) << 8;
3415 rtpi |= buf[19] & 0xff; 3416 rtpi |= buf[19] & 0xff;
3416 tid_len = (buf[20] & 0xff) << 24; 3417 tid_len = (buf[20] & 0xff) << 24;
3417 tid_len |= (buf[21] & 0xff) << 16; 3418 tid_len |= (buf[21] & 0xff) << 16;
3418 tid_len |= (buf[22] & 0xff) << 8; 3419 tid_len |= (buf[22] & 0xff) << 8;
3419 tid_len |= buf[23] & 0xff; 3420 tid_len |= buf[23] & 0xff;
3420 transport_kunmap_first_data_page(cmd); 3421 transport_kunmap_data_sg(cmd);
3421 buf = NULL; 3422 buf = NULL;
3422 3423
3423 if ((tid_len + 24) != cmd->data_length) { 3424 if ((tid_len + 24) != cmd->data_length) {
@@ -3469,7 +3470,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3469 return -EINVAL; 3470 return -EINVAL;
3470 } 3471 }
3471 3472
3472 buf = transport_kmap_first_data_page(cmd); 3473 buf = transport_kmap_data_sg(cmd);
3473 proto_ident = (buf[24] & 0x0f); 3474 proto_ident = (buf[24] & 0x0f);
3474#if 0 3475#if 0
3475 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3476 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3503,7 +3504,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3503 goto out; 3504 goto out;
3504 } 3505 }
3505 3506
3506 transport_kunmap_first_data_page(cmd); 3507 transport_kunmap_data_sg(cmd);
3507 buf = NULL; 3508 buf = NULL;
3508 3509
3509 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3510 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
@@ -3768,13 +3769,13 @@ after_iport_check:
3768 " REGISTER_AND_MOVE\n"); 3769 " REGISTER_AND_MOVE\n");
3769 } 3770 }
3770 3771
3771 transport_kunmap_first_data_page(cmd); 3772 transport_kunmap_data_sg(cmd);
3772 3773
3773 core_scsi3_put_pr_reg(dest_pr_reg); 3774 core_scsi3_put_pr_reg(dest_pr_reg);
3774 return 0; 3775 return 0;
3775out: 3776out:
3776 if (buf) 3777 if (buf)
3777 transport_kunmap_first_data_page(cmd); 3778 transport_kunmap_data_sg(cmd);
3778 if (dest_se_deve) 3779 if (dest_se_deve)
3779 core_scsi3_lunacl_undepend_item(dest_se_deve); 3780 core_scsi3_lunacl_undepend_item(dest_se_deve);
3780 if (dest_node_acl) 3781 if (dest_node_acl)
@@ -3848,7 +3849,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3848 scope = (cdb[2] & 0xf0); 3849 scope = (cdb[2] & 0xf0);
3849 type = (cdb[2] & 0x0f); 3850 type = (cdb[2] & 0x0f);
3850 3851
3851 buf = transport_kmap_first_data_page(cmd); 3852 buf = transport_kmap_data_sg(cmd);
3852 /* 3853 /*
3853 * From PERSISTENT_RESERVE_OUT parameter list (payload) 3854 * From PERSISTENT_RESERVE_OUT parameter list (payload)
3854 */ 3855 */
@@ -3866,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3866 aptpl = (buf[17] & 0x01); 3867 aptpl = (buf[17] & 0x01);
3867 unreg = (buf[17] & 0x02); 3868 unreg = (buf[17] & 0x02);
3868 } 3869 }
3869 transport_kunmap_first_data_page(cmd); 3870 transport_kunmap_data_sg(cmd);
3870 buf = NULL; 3871 buf = NULL;
3871 3872
3872 /* 3873 /*
@@ -3966,7 +3967,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3966 return -EINVAL; 3967 return -EINVAL;
3967 } 3968 }
3968 3969
3969 buf = transport_kmap_first_data_page(cmd); 3970 buf = transport_kmap_data_sg(cmd);
3970 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 3971 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
3971 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 3972 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
3972 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 3973 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4000,7 +4001,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
4000 buf[6] = ((add_len >> 8) & 0xff); 4001 buf[6] = ((add_len >> 8) & 0xff);
4001 buf[7] = (add_len & 0xff); 4002 buf[7] = (add_len & 0xff);
4002 4003
4003 transport_kunmap_first_data_page(cmd); 4004 transport_kunmap_data_sg(cmd);
4004 4005
4005 return 0; 4006 return 0;
4006} 4007}
@@ -4026,7 +4027,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4026 return -EINVAL; 4027 return -EINVAL;
4027 } 4028 }
4028 4029
4029 buf = transport_kmap_first_data_page(cmd); 4030 buf = transport_kmap_data_sg(cmd);
4030 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4031 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
4031 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4032 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
4032 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4033 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4085,7 +4086,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4085 4086
4086err: 4087err:
4087 spin_unlock(&se_dev->dev_reservation_lock); 4088 spin_unlock(&se_dev->dev_reservation_lock);
4088 transport_kunmap_first_data_page(cmd); 4089 transport_kunmap_data_sg(cmd);
4089 4090
4090 return 0; 4091 return 0;
4091} 4092}
@@ -4109,7 +4110,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4109 return -EINVAL; 4110 return -EINVAL;
4110 } 4111 }
4111 4112
4112 buf = transport_kmap_first_data_page(cmd); 4113 buf = transport_kmap_data_sg(cmd);
4113 4114
4114 buf[0] = ((add_len << 8) & 0xff); 4115 buf[0] = ((add_len << 8) & 0xff);
4115 buf[1] = (add_len & 0xff); 4116 buf[1] = (add_len & 0xff);
@@ -4141,7 +4142,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4141 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ 4142 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
4142 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 4143 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4143 4144
4144 transport_kunmap_first_data_page(cmd); 4145 transport_kunmap_data_sg(cmd);
4145 4146
4146 return 0; 4147 return 0;
4147} 4148}
@@ -4171,7 +4172,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4171 return -EINVAL; 4172 return -EINVAL;
4172 } 4173 }
4173 4174
4174 buf = transport_kmap_first_data_page(cmd); 4175 buf = transport_kmap_data_sg(cmd);
4175 4176
4176 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4177 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
4177 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4178 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
@@ -4292,7 +4293,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4292 buf[6] = ((add_len >> 8) & 0xff); 4293 buf[6] = ((add_len >> 8) & 0xff);
4293 buf[7] = (add_len & 0xff); 4294 buf[7] = (add_len & 0xff);
4294 4295
4295 transport_kunmap_first_data_page(cmd); 4296 transport_kunmap_data_sg(cmd);
4296 4297
4297 return 0; 4298 return 0;
4298} 4299}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index d35467d42e12..8d4def30e9e8 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -693,7 +693,7 @@ static int pscsi_transport_complete(struct se_task *task)
693 693
694 if (task->task_se_cmd->se_deve->lun_flags & 694 if (task->task_se_cmd->se_deve->lun_flags &
695 TRANSPORT_LUNFLAGS_READ_ONLY) { 695 TRANSPORT_LUNFLAGS_READ_ONLY) {
696 unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); 696 unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
697 697
698 if (cdb[0] == MODE_SENSE_10) { 698 if (cdb[0] == MODE_SENSE_10) {
699 if (!(buf[3] & 0x80)) 699 if (!(buf[3] & 0x80))
@@ -703,7 +703,7 @@ static int pscsi_transport_complete(struct se_task *task)
703 buf[2] |= 0x80; 703 buf[2] |= 0x80;
704 } 704 }
705 705
706 transport_kunmap_first_data_page(task->task_se_cmd); 706 transport_kunmap_data_sg(task->task_se_cmd);
707 } 707 }
708 } 708 }
709after_mode_sense: 709after_mode_sense:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b7668029bb31..06336ecd872d 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -807,8 +807,7 @@ static void core_tpg_shutdown_lun(
807 807
808struct se_lun *core_tpg_pre_dellun( 808struct se_lun *core_tpg_pre_dellun(
809 struct se_portal_group *tpg, 809 struct se_portal_group *tpg,
810 u32 unpacked_lun, 810 u32 unpacked_lun)
811 int *ret)
812{ 811{
813 struct se_lun *lun; 812 struct se_lun *lun;
814 813
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d3ddd1361949..58cea07b12fb 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1255,32 +1255,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
1255static void scsi_dump_inquiry(struct se_device *dev) 1255static void scsi_dump_inquiry(struct se_device *dev)
1256{ 1256{
1257 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1257 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1258 char buf[17];
1258 int i, device_type; 1259 int i, device_type;
1259 /* 1260 /*
1260 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1261 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1261 */ 1262 */
1262 pr_debug(" Vendor: ");
1263 for (i = 0; i < 8; i++) 1263 for (i = 0; i < 8; i++)
1264 if (wwn->vendor[i] >= 0x20) 1264 if (wwn->vendor[i] >= 0x20)
1265 pr_debug("%c", wwn->vendor[i]); 1265 buf[i] = wwn->vendor[i];
1266 else 1266 else
1267 pr_debug(" "); 1267 buf[i] = ' ';
1268 buf[i] = '\0';
1269 pr_debug(" Vendor: %s\n", buf);
1268 1270
1269 pr_debug(" Model: ");
1270 for (i = 0; i < 16; i++) 1271 for (i = 0; i < 16; i++)
1271 if (wwn->model[i] >= 0x20) 1272 if (wwn->model[i] >= 0x20)
1272 pr_debug("%c", wwn->model[i]); 1273 buf[i] = wwn->model[i];
1273 else 1274 else
1274 pr_debug(" "); 1275 buf[i] = ' ';
1276 buf[i] = '\0';
1277 pr_debug(" Model: %s\n", buf);
1275 1278
1276 pr_debug(" Revision: ");
1277 for (i = 0; i < 4; i++) 1279 for (i = 0; i < 4; i++)
1278 if (wwn->revision[i] >= 0x20) 1280 if (wwn->revision[i] >= 0x20)
1279 pr_debug("%c", wwn->revision[i]); 1281 buf[i] = wwn->revision[i];
1280 else 1282 else
1281 pr_debug(" "); 1283 buf[i] = ' ';
1282 1284 buf[i] = '\0';
1283 pr_debug("\n"); 1285 pr_debug(" Revision: %s\n", buf);
1284 1286
1285 device_type = dev->transport->get_device_type(dev); 1287 device_type = dev->transport->get_device_type(dev);
1286 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1288 pr_debug(" Type: %s ", scsi_device_type(device_type));
@@ -1655,7 +1657,7 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
1655 * This may only be called from process context, and also currently 1657 * This may only be called from process context, and also currently
1656 * assumes internal allocation of fabric payload buffer by target-core. 1658 * assumes internal allocation of fabric payload buffer by target-core.
1657 **/ 1659 **/
1658int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1660void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1659 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1661 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1660 u32 data_length, int task_attr, int data_dir, int flags) 1662 u32 data_length, int task_attr, int data_dir, int flags)
1661{ 1663{
@@ -1688,15 +1690,21 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1688 /* 1690 /*
1689 * Locate se_lun pointer and attach it to struct se_cmd 1691 * Locate se_lun pointer and attach it to struct se_cmd
1690 */ 1692 */
1691 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) 1693 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1692 goto out_check_cond; 1694 transport_send_check_condition_and_sense(se_cmd,
1695 se_cmd->scsi_sense_reason, 0);
1696 target_put_sess_cmd(se_sess, se_cmd);
1697 return;
1698 }
1693 /* 1699 /*
1694 * Sanitize CDBs via transport_generic_cmd_sequencer() and 1700 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1695 * allocate the necessary tasks to complete the received CDB+data 1701 * allocate the necessary tasks to complete the received CDB+data
1696 */ 1702 */
1697 rc = transport_generic_allocate_tasks(se_cmd, cdb); 1703 rc = transport_generic_allocate_tasks(se_cmd, cdb);
1698 if (rc != 0) 1704 if (rc != 0) {
1699 goto out_check_cond; 1705 transport_generic_request_failure(se_cmd);
1706 return;
1707 }
1700 /* 1708 /*
1701 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend 1709 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1702 * for immediate execution of READs, otherwise wait for 1710 * for immediate execution of READs, otherwise wait for
@@ -1704,12 +1712,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1704 * when fabric has filled the incoming buffer. 1712 * when fabric has filled the incoming buffer.
1705 */ 1713 */
1706 transport_handle_cdb_direct(se_cmd); 1714 transport_handle_cdb_direct(se_cmd);
1707 return 0; 1715 return;
1708
1709out_check_cond:
1710 transport_send_check_condition_and_sense(se_cmd,
1711 se_cmd->scsi_sense_reason, 0);
1712 return 0;
1713} 1716}
1714EXPORT_SYMBOL(target_submit_cmd); 1717EXPORT_SYMBOL(target_submit_cmd);
1715 1718
@@ -2694,7 +2697,7 @@ static int transport_generic_cmd_sequencer(
2694 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2697 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2695 2698
2696 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2699 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2697 goto out_invalid_cdb_field; 2700 goto out_unsupported_cdb;
2698 if (!passthrough) 2701 if (!passthrough)
2699 cmd->execute_task = target_emulate_write_same; 2702 cmd->execute_task = target_emulate_write_same;
2700 break; 2703 break;
@@ -2977,7 +2980,7 @@ static int transport_generic_cmd_sequencer(
2977 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2980 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2978 2981
2979 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2982 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2980 goto out_invalid_cdb_field; 2983 goto out_unsupported_cdb;
2981 if (!passthrough) 2984 if (!passthrough)
2982 cmd->execute_task = target_emulate_write_same; 2985 cmd->execute_task = target_emulate_write_same;
2983 break; 2986 break;
@@ -3000,7 +3003,7 @@ static int transport_generic_cmd_sequencer(
3000 * of byte 1 bit 3 UNMAP instead of original reserved field 3003 * of byte 1 bit 3 UNMAP instead of original reserved field
3001 */ 3004 */
3002 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3005 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3003 goto out_invalid_cdb_field; 3006 goto out_unsupported_cdb;
3004 if (!passthrough) 3007 if (!passthrough)
3005 cmd->execute_task = target_emulate_write_same; 3008 cmd->execute_task = target_emulate_write_same;
3006 break; 3009 break;
@@ -3082,11 +3085,6 @@ static int transport_generic_cmd_sequencer(
3082 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3085 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3083 goto out_unsupported_cdb; 3086 goto out_unsupported_cdb;
3084 3087
3085 /* Let's limit control cdbs to a page, for simplicity's sake. */
3086 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3087 size > PAGE_SIZE)
3088 goto out_invalid_cdb_field;
3089
3090 transport_set_supported_SAM_opcode(cmd); 3088 transport_set_supported_SAM_opcode(cmd);
3091 return ret; 3089 return ret;
3092 3090
@@ -3490,9 +3488,11 @@ int transport_generic_map_mem_to_cmd(
3490} 3488}
3491EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 3489EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3492 3490
3493void *transport_kmap_first_data_page(struct se_cmd *cmd) 3491void *transport_kmap_data_sg(struct se_cmd *cmd)
3494{ 3492{
3495 struct scatterlist *sg = cmd->t_data_sg; 3493 struct scatterlist *sg = cmd->t_data_sg;
3494 struct page **pages;
3495 int i;
3496 3496
3497 BUG_ON(!sg); 3497 BUG_ON(!sg);
3498 /* 3498 /*
@@ -3500,15 +3500,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
3500 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 3500 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3501 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 3501 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3502 */ 3502 */
3503 return kmap(sg_page(sg)) + sg->offset; 3503 if (!cmd->t_data_nents)
3504 return NULL;
3505 else if (cmd->t_data_nents == 1)
3506 return kmap(sg_page(sg)) + sg->offset;
3507
3508 /* >1 page. use vmap */
3509 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3510 if (!pages)
3511 return NULL;
3512
3513 /* convert sg[] to pages[] */
3514 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3515 pages[i] = sg_page(sg);
3516 }
3517
3518 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
3519 kfree(pages);
3520 if (!cmd->t_data_vmap)
3521 return NULL;
3522
3523 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3504} 3524}
3505EXPORT_SYMBOL(transport_kmap_first_data_page); 3525EXPORT_SYMBOL(transport_kmap_data_sg);
3506 3526
3507void transport_kunmap_first_data_page(struct se_cmd *cmd) 3527void transport_kunmap_data_sg(struct se_cmd *cmd)
3508{ 3528{
3509 kunmap(sg_page(cmd->t_data_sg)); 3529 if (!cmd->t_data_nents)
3530 return;
3531 else if (cmd->t_data_nents == 1)
3532 kunmap(sg_page(cmd->t_data_sg));
3533
3534 vunmap(cmd->t_data_vmap);
3535 cmd->t_data_vmap = NULL;
3510} 3536}
3511EXPORT_SYMBOL(transport_kunmap_first_data_page); 3537EXPORT_SYMBOL(transport_kunmap_data_sg);
3512 3538
3513static int 3539static int
3514transport_generic_get_mem(struct se_cmd *cmd) 3540transport_generic_get_mem(struct se_cmd *cmd)
@@ -3516,6 +3542,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
3516 u32 length = cmd->data_length; 3542 u32 length = cmd->data_length;
3517 unsigned int nents; 3543 unsigned int nents;
3518 struct page *page; 3544 struct page *page;
3545 gfp_t zero_flag;
3519 int i = 0; 3546 int i = 0;
3520 3547
3521 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3548 nents = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -3526,9 +3553,11 @@ transport_generic_get_mem(struct se_cmd *cmd)
3526 cmd->t_data_nents = nents; 3553 cmd->t_data_nents = nents;
3527 sg_init_table(cmd->t_data_sg, nents); 3554 sg_init_table(cmd->t_data_sg, nents);
3528 3555
3556 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3557
3529 while (length) { 3558 while (length) {
3530 u32 page_len = min_t(u32, length, PAGE_SIZE); 3559 u32 page_len = min_t(u32, length, PAGE_SIZE);
3531 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3560 page = alloc_page(GFP_KERNEL | zero_flag);
3532 if (!page) 3561 if (!page)
3533 goto out; 3562 goto out;
3534 3563
@@ -3756,6 +3785,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
3756 struct se_task *task; 3785 struct se_task *task;
3757 unsigned long flags; 3786 unsigned long flags;
3758 3787
3788 /* Workaround for handling zero-length control CDBs */
3789 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3790 !cmd->data_length)
3791 return 0;
3792
3759 task = transport_generic_get_task(cmd, cmd->data_direction); 3793 task = transport_generic_get_task(cmd, cmd->data_direction);
3760 if (!task) 3794 if (!task)
3761 return -ENOMEM; 3795 return -ENOMEM;
@@ -3827,6 +3861,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3827 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 3861 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3828 cmd->t_state = TRANSPORT_COMPLETE; 3862 cmd->t_state = TRANSPORT_COMPLETE;
3829 atomic_set(&cmd->t_transport_active, 1); 3863 atomic_set(&cmd->t_transport_active, 1);
3864
3865 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3866 u8 ua_asc = 0, ua_ascq = 0;
3867
3868 core_scsi3_ua_clear_for_request_sense(cmd,
3869 &ua_asc, &ua_ascq);
3870 }
3871
3830 INIT_WORK(&cmd->work, target_complete_ok_work); 3872 INIT_WORK(&cmd->work, target_complete_ok_work);
3831 queue_work(target_completion_wq, &cmd->work); 3873 queue_work(target_completion_wq, &cmd->work);
3832 return 0; 3874 return 0;
@@ -4448,8 +4490,8 @@ int transport_send_check_condition_and_sense(
4448 /* CURRENT ERROR */ 4490 /* CURRENT ERROR */
4449 buffer[offset] = 0x70; 4491 buffer[offset] = 0x70;
4450 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4492 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4451 /* ABORTED COMMAND */ 4493 /* ILLEGAL REQUEST */
4452 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4494 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4453 /* INVALID FIELD IN CDB */ 4495 /* INVALID FIELD IN CDB */
4454 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4496 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4455 break; 4497 break;
@@ -4457,8 +4499,8 @@ int transport_send_check_condition_and_sense(
4457 /* CURRENT ERROR */ 4499 /* CURRENT ERROR */
4458 buffer[offset] = 0x70; 4500 buffer[offset] = 0x70;
4459 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4501 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4460 /* ABORTED COMMAND */ 4502 /* ILLEGAL REQUEST */
4461 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4503 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4462 /* INVALID FIELD IN PARAMETER LIST */ 4504 /* INVALID FIELD IN PARAMETER LIST */
4463 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 4505 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4464 break; 4506 break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index addc18f727ea..9e7e26c74c79 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -540,7 +540,6 @@ static void ft_send_work(struct work_struct *work)
540 int data_dir = 0; 540 int data_dir = 0;
541 u32 data_len; 541 u32 data_len;
542 int task_attr; 542 int task_attr;
543 int ret;
544 543
545 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 544 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
546 if (!fcp) 545 if (!fcp)
@@ -603,14 +602,10 @@ static void ft_send_work(struct work_struct *work)
603 * Use a single se_cmd->cmd_kref as we expect to release se_cmd 602 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
604 * directly from ft_check_stop_free callback in response path. 603 * directly from ft_check_stop_free callback in response path.
605 */ 604 */
606 ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb, 605 target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
607 &cmd->ft_sense_buffer[0], cmd->lun, data_len, 606 &cmd->ft_sense_buffer[0], cmd->lun, data_len,
608 task_attr, data_dir, 0); 607 task_attr, data_dir, 0);
609 pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret); 608 pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
610 if (ret < 0) {
611 ft_dump_cmd(cmd, __func__);
612 return;
613 }
614 return; 609 return;
615 610
616err: 611err:
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 9f50c4e3c2be..9b7336fcfbb3 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -45,7 +45,7 @@
45#include "8250.h" 45#include "8250.h"
46 46
47#ifdef CONFIG_SPARC 47#ifdef CONFIG_SPARC
48#include "suncore.h" 48#include "../suncore.h"
49#endif 49#endif
50 50
51/* 51/*
diff --git a/drivers/tty/serial/8250/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 94a6792bf97b..94a6792bf97b 100644
--- a/drivers/tty/serial/8250/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
diff --git a/drivers/tty/serial/8250/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
index e9b7e11793b1..e9b7e11793b1 100644
--- a/drivers/tty/serial/8250/m32r_sio.h
+++ b/drivers/tty/serial/m32r_sio.h
diff --git a/drivers/tty/serial/8250/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..4671473793e3 100644
--- a/drivers/tty/serial/8250/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 1c2426931484..f80904145fd4 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -46,6 +46,13 @@
46 46
47#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/ 47#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
48 48
49/* SCR register bitmasks */
50#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
51
52/* FCR register bitmasks */
53#define OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT 6
54#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6)
55
49static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS]; 56static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
50 57
51/* Forward declaration of functions */ 58/* Forward declaration of functions */
@@ -129,6 +136,7 @@ static void serial_omap_enable_ms(struct uart_port *port)
129static void serial_omap_stop_tx(struct uart_port *port) 136static void serial_omap_stop_tx(struct uart_port *port)
130{ 137{
131 struct uart_omap_port *up = (struct uart_omap_port *)port; 138 struct uart_omap_port *up = (struct uart_omap_port *)port;
139 struct omap_uart_port_info *pdata = up->pdev->dev.platform_data;
132 140
133 if (up->use_dma && 141 if (up->use_dma &&
134 up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) { 142 up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
@@ -151,6 +159,9 @@ static void serial_omap_stop_tx(struct uart_port *port)
151 serial_out(up, UART_IER, up->ier); 159 serial_out(up, UART_IER, up->ier);
152 } 160 }
153 161
162 if (!up->use_dma && pdata->set_forceidle)
163 pdata->set_forceidle(up->pdev);
164
154 pm_runtime_mark_last_busy(&up->pdev->dev); 165 pm_runtime_mark_last_busy(&up->pdev->dev);
155 pm_runtime_put_autosuspend(&up->pdev->dev); 166 pm_runtime_put_autosuspend(&up->pdev->dev);
156} 167}
@@ -279,6 +290,7 @@ static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
279static void serial_omap_start_tx(struct uart_port *port) 290static void serial_omap_start_tx(struct uart_port *port)
280{ 291{
281 struct uart_omap_port *up = (struct uart_omap_port *)port; 292 struct uart_omap_port *up = (struct uart_omap_port *)port;
293 struct omap_uart_port_info *pdata = up->pdev->dev.platform_data;
282 struct circ_buf *xmit; 294 struct circ_buf *xmit;
283 unsigned int start; 295 unsigned int start;
284 int ret = 0; 296 int ret = 0;
@@ -286,6 +298,8 @@ static void serial_omap_start_tx(struct uart_port *port)
286 if (!up->use_dma) { 298 if (!up->use_dma) {
287 pm_runtime_get_sync(&up->pdev->dev); 299 pm_runtime_get_sync(&up->pdev->dev);
288 serial_omap_enable_ier_thri(up); 300 serial_omap_enable_ier_thri(up);
301 if (pdata->set_noidle)
302 pdata->set_noidle(up->pdev);
289 pm_runtime_mark_last_busy(&up->pdev->dev); 303 pm_runtime_mark_last_busy(&up->pdev->dev);
290 pm_runtime_put_autosuspend(&up->pdev->dev); 304 pm_runtime_put_autosuspend(&up->pdev->dev);
291 return; 305 return;
@@ -726,8 +740,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
726 quot = serial_omap_get_divisor(port, baud); 740 quot = serial_omap_get_divisor(port, baud);
727 741
728 /* calculate wakeup latency constraint */ 742 /* calculate wakeup latency constraint */
729 up->calc_latency = (1000000 * up->port.fifosize) / 743 up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8);
730 (1000 * baud / 8);
731 up->latency = up->calc_latency; 744 up->latency = up->calc_latency;
732 schedule_work(&up->qos_work); 745 schedule_work(&up->qos_work);
733 746
@@ -811,14 +824,21 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
811 up->mcr = serial_in(up, UART_MCR); 824 up->mcr = serial_in(up, UART_MCR);
812 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); 825 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
813 /* FIFO ENABLE, DMA MODE */ 826 /* FIFO ENABLE, DMA MODE */
814 serial_out(up, UART_FCR, up->fcr); 827
815 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 828 up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
816 829
817 if (up->use_dma) { 830 if (up->use_dma) {
818 serial_out(up, UART_TI752_TLR, 0); 831 serial_out(up, UART_TI752_TLR, 0);
819 up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8); 832 up->scr |= UART_FCR_TRIGGER_4;
833 } else {
834 /* Set receive FIFO threshold to 1 byte */
835 up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
836 up->fcr |= (0x1 << OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT);
820 } 837 }
821 838
839 serial_out(up, UART_FCR, up->fcr);
840 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
841
822 serial_out(up, UART_OMAP_SCR, up->scr); 842 serial_out(up, UART_OMAP_SCR, up->scr);
823 843
824 serial_out(up, UART_EFR, up->efr); 844 serial_out(up, UART_EFR, up->efr);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f96f37b5fec6..c55e5fb16fa3 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1593,7 +1593,8 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
1593#define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL 1593#define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
1594#endif 1594#endif
1595 1595
1596#ifdef CONFIG_CPU_EXYNOS4210 1596#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) || \
1597 defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
1597static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { 1598static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
1598 .info = &(struct s3c24xx_uart_info) { 1599 .info = &(struct s3c24xx_uart_info) {
1599 .name = "Samsung Exynos4 UART", 1600 .name = "Samsung Exynos4 UART",
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 5e096f43bcea..65447c5f91d7 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
1463 if (!perm && op->op != KD_FONT_OP_GET) 1463 if (!perm && op->op != KD_FONT_OP_GET)
1464 return -EPERM; 1464 return -EPERM;
1465 op->data = compat_ptr(((struct compat_console_font_op *)op)->data); 1465 op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
1466 op->flags |= KD_FONT_FLAG_OLD;
1467 i = con_font_op(vc, op); 1466 i = con_font_op(vc, op);
1468 if (i) 1467 if (i)
1469 return i; 1468 return i;
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index 6d87f288df4e..2c0cd824c667 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -418,7 +418,7 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
418 418
419 /* support autoresume for remote wakeup testing */ 419 /* support autoresume for remote wakeup testing */
420 if (autoresume) 420 if (autoresume)
421 sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 421 loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
422 422
423 /* support OTG systems */ 423 /* support OTG systems */
424 if (gadget_is_otg(cdev->gadget)) { 424 if (gadget_is_otg(cdev->gadget)) {
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 91413cac97be..353cdd488b93 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -130,7 +130,7 @@ config USB_FSL_MPH_DR_OF
130 tristate 130 tristate
131 131
132config USB_EHCI_FSL 132config USB_EHCI_FSL
133 bool "Support for Freescale on-chip EHCI USB controller" 133 bool "Support for Freescale PPC on-chip EHCI USB controller"
134 depends on USB_EHCI_HCD && FSL_SOC 134 depends on USB_EHCI_HCD && FSL_SOC
135 select USB_EHCI_ROOT_HUB_TT 135 select USB_EHCI_ROOT_HUB_TT
136 select USB_FSL_MPH_DR_OF if OF 136 select USB_FSL_MPH_DR_OF if OF
@@ -138,7 +138,7 @@ config USB_EHCI_FSL
138 Variation of ARC USB block used in some Freescale chips. 138 Variation of ARC USB block used in some Freescale chips.
139 139
140config USB_EHCI_MXC 140config USB_EHCI_MXC
141 bool "Support for Freescale on-chip EHCI USB controller" 141 bool "Support for Freescale i.MX on-chip EHCI USB controller"
142 depends on USB_EHCI_HCD && ARCH_MXC 142 depends on USB_EHCI_HCD && ARCH_MXC
143 select USB_EHCI_ROOT_HUB_TT 143 select USB_EHCI_ROOT_HUB_TT
144 ---help--- 144 ---help---
@@ -546,7 +546,7 @@ config USB_RENESAS_USBHS_HCD
546config USB_WHCI_HCD 546config USB_WHCI_HCD
547 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" 547 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
548 depends on EXPERIMENTAL 548 depends on EXPERIMENTAL
549 depends on PCI && USB 549 depends on PCI && USB && UWB
550 select USB_WUSB 550 select USB_WUSB
551 select UWB_WHCI 551 select UWB_WHCI
552 help 552 help
@@ -559,7 +559,7 @@ config USB_WHCI_HCD
559config USB_HWA_HCD 559config USB_HWA_HCD
560 tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)" 560 tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
561 depends on EXPERIMENTAL 561 depends on EXPERIMENTAL
562 depends on USB 562 depends on USB && UWB
563 select USB_WUSB 563 select USB_WUSB
564 select UWB_HWA 564 select UWB_HWA
565 help 565 help
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index b556a72264d1..c26a82e83f6e 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -239,7 +239,7 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
239 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]); 239 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
240} 240}
241 241
242static void ehci_fsl_usb_setup(struct ehci_hcd *ehci) 242static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
243{ 243{
244 struct usb_hcd *hcd = ehci_to_hcd(ehci); 244 struct usb_hcd *hcd = ehci_to_hcd(ehci);
245 struct fsl_usb2_platform_data *pdata; 245 struct fsl_usb2_platform_data *pdata;
@@ -299,12 +299,19 @@ static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
299#endif 299#endif
300 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001); 300 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
301 } 301 }
302
303 if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & CTRL_PHY_CLK_VALID)) {
304 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
305 return -ENODEV;
306 }
307 return 0;
302} 308}
303 309
304/* called after powerup, by probe or system-pm "wakeup" */ 310/* called after powerup, by probe or system-pm "wakeup" */
305static int ehci_fsl_reinit(struct ehci_hcd *ehci) 311static int ehci_fsl_reinit(struct ehci_hcd *ehci)
306{ 312{
307 ehci_fsl_usb_setup(ehci); 313 if (ehci_fsl_usb_setup(ehci))
314 return -ENODEV;
308 ehci_port_power(ehci, 0); 315 ehci_port_power(ehci, 0);
309 316
310 return 0; 317 return 0;
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 491806221165..bdf43e2adc51 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -45,5 +45,6 @@
45#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */ 45#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
46#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */ 46#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
47#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */ 47#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
48#define CTRL_PHY_CLK_VALID (1 << 17)
48#define SNOOP_SIZE_2GB 0x1e 49#define SNOOP_SIZE_2GB 0x1e
49#endif /* _EHCI_FSL_H */ 50#endif /* _EHCI_FSL_H */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index caf87428ca43..ac53a662a6a3 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -867,6 +867,12 @@ hc_init:
867 867
868static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) 868static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
869{ 869{
870 /* Skip Netlogic mips SoC's internal PCI USB controller.
871 * This device does not need/support EHCI/OHCI handoff
872 */
873 if (pdev->vendor == 0x184e) /* vendor Netlogic */
874 return;
875
870 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) 876 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
871 quirk_usb_handoff_uhci(pdev); 877 quirk_usb_handoff_uhci(pdev);
872 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) 878 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index e61aa95f2d2a..1d5eda26fbd1 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -39,7 +39,8 @@
39 39
40#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \ 40#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
41 && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \ 41 && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
42 && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) 42 && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
43 && !defined(CONFIG_MIPS)
43static inline void readsl(const void __iomem *addr, void *buf, int len) 44static inline void readsl(const void __iomem *addr, void *buf, int len)
44 { insl((unsigned long)addr, buf, len); } 45 { insl((unsigned long)addr, buf, len); }
45static inline void readsw(const void __iomem *addr, void *buf, int len) 46static inline void readsw(const void __iomem *addr, void *buf, int len)
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 76d629345418..735ef4c2339a 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -118,7 +118,7 @@ config FSL_USB2_OTG
118 118
119config USB_MV_OTG 119config USB_MV_OTG
120 tristate "Marvell USB OTG support" 120 tristate "Marvell USB OTG support"
121 depends on USB_MV_UDC && USB_SUSPEND 121 depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND
122 select USB_OTG 122 select USB_OTG
123 select USB_OTG_UTILS 123 select USB_OTG_UTILS
124 help 124 help
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ad654f8208ef..f770415305f8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -839,6 +839,7 @@ static struct usb_device_id id_table_combined [] = {
839 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 839 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
840 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 840 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
841 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 841 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
842 { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
842 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 843 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
843 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 844 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
844 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 845 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f994503df2dd..6f6058f0db1b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1187,3 +1187,10 @@
1187 */ 1187 */
1188/* ZigBee controller */ 1188/* ZigBee controller */
1189#define FTDI_RF_R106 0x8A28 1189#define FTDI_RF_R106 0x8A28
1190
1191/*
1192 * Product: HCP HIT GPRS modem
1193 * Manufacturer: HCP d.o.o.
1194 * ATI command output: Cinterion MC55i
1195 */
1196#define FTDI_CINTERION_MC55I_PID 0xA951
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ea126a4490cd..39ed1f46cec0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -855,6 +855,18 @@ static const struct usb_device_id option_ids[] = {
855 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) }, 855 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
856 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, 856 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
857 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) }, 857 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
858 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) },
859 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) },
860 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) },
861 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) },
862 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) },
863 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) },
864 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
865 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
866 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
867 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
868 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) },
869 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) },
858 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), 870 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
859 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 871 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
860 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, 872 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
@@ -883,7 +895,6 @@ static const struct usb_device_id option_ids[] = {
883 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, 895 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
884 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, 896 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
885 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, 897 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
886 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
887 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, 898 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
888 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, 899 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
889 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, 900 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
@@ -892,6 +903,12 @@ static const struct usb_device_id option_ids[] = {
892 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, 903 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
893 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, 904 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
894 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, 905 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
906 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
907 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
908 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
909 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
910 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
911 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
895 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, 912 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
896 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, 913 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
897 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, 914 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
@@ -1066,6 +1083,116 @@ static const struct usb_device_id option_ids[] = {
1066 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, 1083 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
1067 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, 1084 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
1068 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, 1085 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
1086 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) },
1087 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) },
1088 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) },
1089 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) },
1090 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) },
1091 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) },
1092 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) },
1093 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) },
1094 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) },
1095 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) },
1096 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) },
1097 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) },
1098 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) },
1099 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) },
1100 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) },
1101 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) },
1102 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) },
1103 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) },
1104 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) },
1105 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) },
1106 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) },
1107 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) },
1108 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) },
1109 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) },
1110 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) },
1111 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) },
1112 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) },
1113 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) },
1114 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) },
1115 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) },
1116 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) },
1117 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) },
1118 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) },
1119 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) },
1120 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) },
1121 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) },
1122 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) },
1123 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) },
1124 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) },
1125 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) },
1126 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) },
1127 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) },
1128 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) },
1129 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) },
1130 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) },
1131 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) },
1132 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) },
1133 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) },
1134 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) },
1135 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) },
1136 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) },
1137 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) },
1138 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) },
1139 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) },
1140 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) },
1141 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) },
1142 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) },
1143 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) },
1144 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) },
1145 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) },
1146 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) },
1147 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) },
1148 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) },
1149 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) },
1150 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) },
1151 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) },
1152 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) },
1153 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) },
1154 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) },
1155 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) },
1156 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) },
1157 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) },
1158 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) },
1159 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) },
1160 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) },
1161 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) },
1162 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) },
1163 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) },
1164 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) },
1165 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) },
1166 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) },
1167 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) },
1168 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) },
1169 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) },
1170 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) },
1171 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) },
1172 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) },
1173 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) },
1174 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) },
1175 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) },
1176 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) },
1177 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) },
1178 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) },
1179 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) },
1180 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) },
1181 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) },
1182 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) },
1183 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) },
1184 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) },
1185 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) },
1186 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) },
1187 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) },
1188 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) },
1189 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) },
1190 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) },
1191 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) },
1192 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) },
1193 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) },
1194 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) },
1195 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) },
1069 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ 1196 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
1070 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, 1197 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
1071 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, 1198 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1d5deee3be52..f98800f2324c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -36,6 +36,11 @@ static const struct usb_device_id id_table[] = {
36 {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */ 36 {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */
37 {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ 37 {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
38 {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ 38 {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
39 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi QDL device */
40 {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi QDL device */
41 {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi QDL device */
42 {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi QDL device */
43 {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi QDL device */
39 {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ 44 {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
40 {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ 45 {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
41 {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ 46 {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
@@ -86,7 +91,16 @@ static const struct usb_device_id id_table[] = {
86 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 91 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
87 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ 92 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
88 {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ 93 {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
94
95 {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
96 {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
97 {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
98 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
99 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
100 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
89 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 101 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
102 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
103 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
90 { } /* Terminating entry */ 104 { } /* Terminating entry */
91}; 105};
92MODULE_DEVICE_TABLE(usb, id_table); 106MODULE_DEVICE_TABLE(usb, id_table);
@@ -123,8 +137,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
123 137
124 spin_lock_init(&data->susp_lock); 138 spin_lock_init(&data->susp_lock);
125 139
126 usb_enable_autosuspend(serial->dev);
127
128 switch (nintf) { 140 switch (nintf) {
129 case 1: 141 case 1:
130 /* QDL mode */ 142 /* QDL mode */
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 0d7b20d4285d..e40c00f2c2ba 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -1108,7 +1108,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
1108 */ 1108 */
1109 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL); 1109 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
1110 1110
1111 sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); 1111 sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
1112 lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0); 1112 lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
1113 if (sinfo->atmel_lcdfb_power_control) 1113 if (sinfo->atmel_lcdfb_power_control)
1114 sinfo->atmel_lcdfb_power_control(0); 1114 sinfo->atmel_lcdfb_power_control(0);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index acf292bfba02..6af3f16754f0 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1432,7 +1432,7 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
1432 struct fsl_diu_data *data; 1432 struct fsl_diu_data *data;
1433 1433
1434 data = dev_get_drvdata(&ofdev->dev); 1434 data = dev_get_drvdata(&ofdev->dev);
1435 disable_lcdc(data->fsl_diu_info[0]); 1435 disable_lcdc(data->fsl_diu_info);
1436 1436
1437 return 0; 1437 return 0;
1438} 1438}
@@ -1442,7 +1442,7 @@ static int fsl_diu_resume(struct platform_device *ofdev)
1442 struct fsl_diu_data *data; 1442 struct fsl_diu_data *data;
1443 1443
1444 data = dev_get_drvdata(&ofdev->dev); 1444 data = dev_get_drvdata(&ofdev->dev);
1445 enable_lcdc(data->fsl_diu_info[0]); 1445 enable_lcdc(data->fsl_diu_info);
1446 1446
1447 return 0; 1447 return 0;
1448} 1448}
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index c6afa33a4532..02fd2263610c 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -529,7 +529,6 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
529 if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) { 529 if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
530 ERR_MSG("Could not allocate cmap for intelfb_info.\n"); 530 ERR_MSG("Could not allocate cmap for intelfb_info.\n");
531 goto err_out_cmap; 531 goto err_out_cmap;
532 return -ENODEV;
533 } 532 }
534 533
535 dinfo = info->par; 534 dinfo = info->par;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index a5ec7f37c185..e1626a1d5c45 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -401,7 +401,7 @@ void dispc_runtime_put(void)
401 401
402 DSSDBG("dispc_runtime_put\n"); 402 DSSDBG("dispc_runtime_put\n");
403 403
404 r = pm_runtime_put(&dispc.pdev->dev); 404 r = pm_runtime_put_sync(&dispc.pdev->dev);
405 WARN_ON(r < 0); 405 WARN_ON(r < 0);
406} 406}
407 407
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 395d658a94fc..faaf305fda27 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -180,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
180{ 180{
181 int r; 181 int r;
182 182
183 if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) {
184 DSSERR("no VDSS_DSI regulator\n");
185 return -ENODEV;
186 }
187
183 if (dssdev->manager == NULL) { 188 if (dssdev->manager == NULL) {
184 DSSERR("failed to enable display: no manager\n"); 189 DSSERR("failed to enable display: no manager\n");
185 return -ENODEV; 190 return -ENODEV;
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index d4d676c82c12..52f36ec1c8bb 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -1079,7 +1079,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
1079 1079
1080 DSSDBG("dsi_runtime_put\n"); 1080 DSSDBG("dsi_runtime_put\n");
1081 1081
1082 r = pm_runtime_put(&dsi->pdev->dev); 1082 r = pm_runtime_put_sync(&dsi->pdev->dev);
1083 WARN_ON(r < 0); 1083 WARN_ON(r < 0);
1084} 1084}
1085 1085
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 17033457ee89..77c2b5a32b5d 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -720,7 +720,7 @@ void dss_runtime_put(void)
720 720
721 DSSDBG("dss_runtime_put\n"); 721 DSSDBG("dss_runtime_put\n");
722 722
723 r = pm_runtime_put(&dss.pdev->dev); 723 r = pm_runtime_put_sync(&dss.pdev->dev);
724 WARN_ON(r < 0); 724 WARN_ON(r < 0);
725} 725}
726 726
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index b4c270edb915..d7aa3b056529 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -176,7 +176,7 @@ static void hdmi_runtime_put(void)
176 176
177 DSSDBG("hdmi_runtime_put\n"); 177 DSSDBG("hdmi_runtime_put\n");
178 178
179 r = pm_runtime_put(&hdmi.pdev->dev); 179 r = pm_runtime_put_sync(&hdmi.pdev->dev);
180 WARN_ON(r < 0); 180 WARN_ON(r < 0);
181} 181}
182 182
@@ -497,6 +497,7 @@ bool omapdss_hdmi_detect(void)
497 497
498int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev) 498int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
499{ 499{
500 struct omap_dss_hdmi_data *priv = dssdev->data;
500 int r = 0; 501 int r = 0;
501 502
502 DSSDBG("ENTER hdmi_display_enable\n"); 503 DSSDBG("ENTER hdmi_display_enable\n");
@@ -509,6 +510,8 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
509 goto err0; 510 goto err0;
510 } 511 }
511 512
513 hdmi.ip_data.hpd_gpio = priv->hpd_gpio;
514
512 r = omap_dss_start_device(dssdev); 515 r = omap_dss_start_device(dssdev);
513 if (r) { 516 if (r) {
514 DSSERR("failed to start device\n"); 517 DSSERR("failed to start device\n");
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 814bb9500dca..55f398014f33 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -140,7 +140,7 @@ static void rfbi_runtime_put(void)
140 140
141 DSSDBG("rfbi_runtime_put\n"); 141 DSSDBG("rfbi_runtime_put\n");
142 142
143 r = pm_runtime_put(&rfbi.pdev->dev); 143 r = pm_runtime_put_sync(&rfbi.pdev->dev);
144 WARN_ON(r < 0); 144 WARN_ON(r < 0);
145} 145}
146 146
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 7503f7f619a7..50dadba5070a 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -126,6 +126,10 @@ struct hdmi_ip_data {
126 const struct ti_hdmi_ip_ops *ops; 126 const struct ti_hdmi_ip_ops *ops;
127 struct hdmi_config cfg; 127 struct hdmi_config cfg;
128 struct hdmi_pll_info pll_data; 128 struct hdmi_pll_info pll_data;
129
130 /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
131 int hpd_gpio;
132 bool phy_tx_enabled;
129}; 133};
130int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); 134int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
131void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); 135void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index 9af81f18f163..2d72334ca3da 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/gpio.h>
31 32
32#include "ti_hdmi_4xxx_ip.h" 33#include "ti_hdmi_4xxx_ip.h"
33#include "dss.h" 34#include "dss.h"
@@ -223,6 +224,49 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
223 hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF); 224 hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
224} 225}
225 226
227static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
228{
229 unsigned long flags;
230 bool hpd;
231 int r;
232 /* this should be in ti_hdmi_4xxx_ip private data */
233 static DEFINE_SPINLOCK(phy_tx_lock);
234
235 spin_lock_irqsave(&phy_tx_lock, flags);
236
237 hpd = gpio_get_value(ip_data->hpd_gpio);
238
239 if (hpd == ip_data->phy_tx_enabled) {
240 spin_unlock_irqrestore(&phy_tx_lock, flags);
241 return 0;
242 }
243
244 if (hpd)
245 r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
246 else
247 r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
248
249 if (r) {
250 DSSERR("Failed to %s PHY TX power\n",
251 hpd ? "enable" : "disable");
252 goto err;
253 }
254
255 ip_data->phy_tx_enabled = hpd;
256err:
257 spin_unlock_irqrestore(&phy_tx_lock, flags);
258 return r;
259}
260
261static irqreturn_t hpd_irq_handler(int irq, void *data)
262{
263 struct hdmi_ip_data *ip_data = data;
264
265 hdmi_check_hpd_state(ip_data);
266
267 return IRQ_HANDLED;
268}
269
226int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data) 270int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
227{ 271{
228 u16 r = 0; 272 u16 r = 0;
@@ -232,10 +276,6 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
232 if (r) 276 if (r)
233 return r; 277 return r;
234 278
235 r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
236 if (r)
237 return r;
238
239 /* 279 /*
240 * Read address 0 in order to get the SCP reset done completed 280 * Read address 0 in order to get the SCP reset done completed
241 * Dummy access performed to make sure reset is done 281 * Dummy access performed to make sure reset is done
@@ -257,12 +297,32 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
257 /* Write to phy address 3 to change the polarity control */ 297 /* Write to phy address 3 to change the polarity control */
258 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); 298 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
259 299
300 r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
301 NULL, hpd_irq_handler,
302 IRQF_DISABLED | IRQF_TRIGGER_RISING |
303 IRQF_TRIGGER_FALLING, "hpd", ip_data);
304 if (r) {
305 DSSERR("HPD IRQ request failed\n");
306 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
307 return r;
308 }
309
310 r = hdmi_check_hpd_state(ip_data);
311 if (r) {
312 free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
313 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
314 return r;
315 }
316
260 return 0; 317 return 0;
261} 318}
262 319
263void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data) 320void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
264{ 321{
322 free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
323
265 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); 324 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
325 ip_data->phy_tx_enabled = false;
266} 326}
267 327
268static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data) 328static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index b3e9f9091581..5c3d0f901510 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -401,7 +401,7 @@ static void venc_runtime_put(void)
401 401
402 DSSDBG("venc_runtime_put\n"); 402 DSSDBG("venc_runtime_put\n");
403 403
404 r = pm_runtime_put(&venc.pdev->dev); 404 r = pm_runtime_put_sync(&venc.pdev->dev);
405 WARN_ON(r < 0); 405 WARN_ON(r < 0);
406} 406}
407 407
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 14e2d995e958..4dcfced107f5 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -30,7 +30,8 @@ static int vcpu_online(unsigned int cpu)
30 sprintf(dir, "cpu/%u", cpu); 30 sprintf(dir, "cpu/%u", cpu);
31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); 31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
32 if (err != 1) { 32 if (err != 1) {
33 printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); 33 if (!xen_initial_domain())
34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
34 return err; 35 return err;
35 } 36 }
36 37
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 7944a17f5cbf..19834d1c7c36 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -884,7 +884,7 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
884 int err; 884 int err;
885 885
886 err = 886 err =
887 sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot, 887 sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot,
888 func, reg, size, mask); 888 func, reg, size, mask);
889 if (err == 7) 889 if (err == 7)
890 return 0; 890 return 0;
@@ -904,7 +904,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
904 pci_dev_id->bus = bus; 904 pci_dev_id->bus = bus;
905 pci_dev_id->devfn = PCI_DEVFN(slot, func); 905 pci_dev_id->devfn = PCI_DEVFN(slot, func);
906 906
907 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n", 907 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
908 domain, bus, slot, func); 908 domain, bus, slot, func);
909 909
910 spin_lock_irqsave(&device_ids_lock, flags); 910 spin_lock_irqsave(&device_ids_lock, flags);
@@ -934,7 +934,7 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
934 934
935 err = 0; 935 err = 0;
936 936
937 pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from " 937 pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from "
938 "seize list\n", domain, bus, slot, func); 938 "seize list\n", domain, bus, slot, func);
939 } 939 }
940 } 940 }
@@ -1029,7 +1029,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
1029 break; 1029 break;
1030 1030
1031 count += scnprintf(buf + count, PAGE_SIZE - count, 1031 count += scnprintf(buf + count, PAGE_SIZE - count,
1032 "%04x:%02x:%02x.%01x\n", 1032 "%04x:%02x:%02x.%d\n",
1033 pci_dev_id->domain, pci_dev_id->bus, 1033 pci_dev_id->domain, pci_dev_id->bus,
1034 PCI_SLOT(pci_dev_id->devfn), 1034 PCI_SLOT(pci_dev_id->devfn),
1035 PCI_FUNC(pci_dev_id->devfn)); 1035 PCI_FUNC(pci_dev_id->devfn));
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index d5dcf8d5d3d9..64b11f99eacc 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -206,6 +206,7 @@ static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev,
206 goto out; 206 goto out;
207 } 207 }
208 208
209 /* Note: The PV protocol uses %02x, don't change it */
209 err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, 210 err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
210 "%04x:%02x:%02x.%02x", domain, bus, 211 "%04x:%02x:%02x.%02x", domain, bus,
211 PCI_SLOT(devfn), PCI_FUNC(devfn)); 212 PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -229,7 +230,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
229 err = -EINVAL; 230 err = -EINVAL;
230 xenbus_dev_fatal(pdev->xdev, err, 231 xenbus_dev_fatal(pdev->xdev, err,
231 "Couldn't locate PCI device " 232 "Couldn't locate PCI device "
232 "(%04x:%02x:%02x.%01x)! " 233 "(%04x:%02x:%02x.%d)! "
233 "perhaps already in-use?", 234 "perhaps already in-use?",
234 domain, bus, slot, func); 235 domain, bus, slot, func);
235 goto out; 236 goto out;
@@ -274,7 +275,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
274 if (!dev) { 275 if (!dev) {
275 err = -EINVAL; 276 err = -EINVAL;
276 dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device " 277 dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
277 "(%04x:%02x:%02x.%01x)! not owned by this domain\n", 278 "(%04x:%02x:%02x.%d)! not owned by this domain\n",
278 domain, bus, slot, func); 279 domain, bus, slot, func);
279 goto out; 280 goto out;
280 } 281 }
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 527dc2a3b89f..89f76252a16f 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -369,6 +369,10 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
369 goto out; 369 goto out;
370 } 370 }
371 token++; 371 token++;
372 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
373 rc = -EILSEQ;
374 goto out;
375 }
372 376
373 if (msg_type == XS_WATCH) { 377 if (msg_type == XS_WATCH) {
374 watch = alloc_watch_adapter(path, token); 378 watch = alloc_watch_adapter(path, token);
diff --git a/fs/bio.c b/fs/bio.c
index b1fe82cf88cf..b980ecde026a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -505,13 +505,9 @@ EXPORT_SYMBOL(bio_clone);
505int bio_get_nr_vecs(struct block_device *bdev) 505int bio_get_nr_vecs(struct block_device *bdev)
506{ 506{
507 struct request_queue *q = bdev_get_queue(bdev); 507 struct request_queue *q = bdev_get_queue(bdev);
508 int nr_pages; 508 return min_t(unsigned,
509 509 queue_max_segments(q),
510 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 510 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
511 if (nr_pages > queue_max_segments(q))
512 nr_pages = queue_max_segments(q);
513
514 return nr_pages;
515} 511}
516EXPORT_SYMBOL(bio_get_nr_vecs); 512EXPORT_SYMBOL(bio_get_nr_vecs);
517 513
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b60fc8bfb3e9..620daad201db 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -641,10 +641,10 @@ static int __cap_is_valid(struct ceph_cap *cap)
641 unsigned long ttl; 641 unsigned long ttl;
642 u32 gen; 642 u32 gen;
643 643
644 spin_lock(&cap->session->s_cap_lock); 644 spin_lock(&cap->session->s_gen_ttl_lock);
645 gen = cap->session->s_cap_gen; 645 gen = cap->session->s_cap_gen;
646 ttl = cap->session->s_cap_ttl; 646 ttl = cap->session->s_cap_ttl;
647 spin_unlock(&cap->session->s_cap_lock); 647 spin_unlock(&cap->session->s_gen_ttl_lock);
648 648
649 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 649 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
650 dout("__cap_is_valid %p cap %p issued %s " 650 dout("__cap_is_valid %p cap %p issued %s "
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 618246bc2196..3e8094be4604 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -975,10 +975,10 @@ static int dentry_lease_is_valid(struct dentry *dentry)
975 di = ceph_dentry(dentry); 975 di = ceph_dentry(dentry);
976 if (di->lease_session) { 976 if (di->lease_session) {
977 s = di->lease_session; 977 s = di->lease_session;
978 spin_lock(&s->s_cap_lock); 978 spin_lock(&s->s_gen_ttl_lock);
979 gen = s->s_cap_gen; 979 gen = s->s_cap_gen;
980 ttl = s->s_cap_ttl; 980 ttl = s->s_cap_ttl;
981 spin_unlock(&s->s_cap_lock); 981 spin_unlock(&s->s_gen_ttl_lock);
982 982
983 if (di->lease_gen == gen && 983 if (di->lease_gen == gen &&
984 time_before(jiffies, dentry->d_time) && 984 time_before(jiffies, dentry->d_time) &&
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 23ab6a3f1825..866e8d7ca37d 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -262,6 +262,7 @@ static int parse_reply_info(struct ceph_msg *msg,
262 /* trace */ 262 /* trace */
263 ceph_decode_32_safe(&p, end, len, bad); 263 ceph_decode_32_safe(&p, end, len, bad);
264 if (len > 0) { 264 if (len > 0) {
265 ceph_decode_need(&p, end, len, bad);
265 err = parse_reply_info_trace(&p, p+len, info, features); 266 err = parse_reply_info_trace(&p, p+len, info, features);
266 if (err < 0) 267 if (err < 0)
267 goto out_bad; 268 goto out_bad;
@@ -270,6 +271,7 @@ static int parse_reply_info(struct ceph_msg *msg,
270 /* extra */ 271 /* extra */
271 ceph_decode_32_safe(&p, end, len, bad); 272 ceph_decode_32_safe(&p, end, len, bad);
272 if (len > 0) { 273 if (len > 0) {
274 ceph_decode_need(&p, end, len, bad);
273 err = parse_reply_info_extra(&p, p+len, info, features); 275 err = parse_reply_info_extra(&p, p+len, info, features);
274 if (err < 0) 276 if (err < 0)
275 goto out_bad; 277 goto out_bad;
@@ -398,9 +400,11 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
398 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; 400 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
399 s->s_con.peer_name.num = cpu_to_le64(mds); 401 s->s_con.peer_name.num = cpu_to_le64(mds);
400 402
401 spin_lock_init(&s->s_cap_lock); 403 spin_lock_init(&s->s_gen_ttl_lock);
402 s->s_cap_gen = 0; 404 s->s_cap_gen = 0;
403 s->s_cap_ttl = 0; 405 s->s_cap_ttl = 0;
406
407 spin_lock_init(&s->s_cap_lock);
404 s->s_renew_requested = 0; 408 s->s_renew_requested = 0;
405 s->s_renew_seq = 0; 409 s->s_renew_seq = 0;
406 INIT_LIST_HEAD(&s->s_caps); 410 INIT_LIST_HEAD(&s->s_caps);
@@ -2326,10 +2330,10 @@ static void handle_session(struct ceph_mds_session *session,
2326 case CEPH_SESSION_STALE: 2330 case CEPH_SESSION_STALE:
2327 pr_info("mds%d caps went stale, renewing\n", 2331 pr_info("mds%d caps went stale, renewing\n",
2328 session->s_mds); 2332 session->s_mds);
2329 spin_lock(&session->s_cap_lock); 2333 spin_lock(&session->s_gen_ttl_lock);
2330 session->s_cap_gen++; 2334 session->s_cap_gen++;
2331 session->s_cap_ttl = 0; 2335 session->s_cap_ttl = 0;
2332 spin_unlock(&session->s_cap_lock); 2336 spin_unlock(&session->s_gen_ttl_lock);
2333 send_renew_caps(mdsc, session); 2337 send_renew_caps(mdsc, session);
2334 break; 2338 break;
2335 2339
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index a50ca0e39475..8c7c04ebb595 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -117,10 +117,13 @@ struct ceph_mds_session {
117 void *s_authorizer_buf, *s_authorizer_reply_buf; 117 void *s_authorizer_buf, *s_authorizer_reply_buf;
118 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; 118 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
119 119
120 /* protected by s_cap_lock */ 120 /* protected by s_gen_ttl_lock */
121 spinlock_t s_cap_lock; 121 spinlock_t s_gen_ttl_lock;
122 u32 s_cap_gen; /* inc each time we get mds stale msg */ 122 u32 s_cap_gen; /* inc each time we get mds stale msg */
123 unsigned long s_cap_ttl; /* when session caps expire */ 123 unsigned long s_cap_ttl; /* when session caps expire */
124
125 /* protected by s_cap_lock */
126 spinlock_t s_cap_lock;
124 struct list_head s_caps; /* all caps issued by this session */ 127 struct list_head s_caps; /* all caps issued by this session */
125 int s_nr_caps, s_trim_caps; 128 int s_nr_caps, s_trim_caps;
126 int s_num_cap_releases; 129 int s_num_cap_releases;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 857214ae8c08..a76f697303d9 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -111,8 +111,10 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
111} 111}
112 112
113static struct ceph_vxattr_cb ceph_file_vxattrs[] = { 113static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
114 { true, "ceph.file.layout", ceph_vxattrcb_layout},
115 /* The following extended attribute name is deprecated */
114 { true, "ceph.layout", ceph_vxattrcb_layout}, 116 { true, "ceph.layout", ceph_vxattrcb_layout},
115 { NULL, NULL } 117 { true, NULL, NULL }
116}; 118};
117 119
118static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode) 120static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0554b00a7b33..2b243af70aa3 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -139,7 +139,7 @@ config CIFS_DFS_UPCALL
139 points. If unsure, say N. 139 points. If unsure, say N.
140 140
141config CIFS_FSCACHE 141config CIFS_FSCACHE
142 bool "Provide CIFS client caching support (EXPERIMENTAL)" 142 bool "Provide CIFS client caching support"
143 depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y 143 depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
144 help 144 help
145 Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data 145 Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
@@ -147,7 +147,7 @@ config CIFS_FSCACHE
147 manager. If unsure, say N. 147 manager. If unsure, say N.
148 148
149config CIFS_ACL 149config CIFS_ACL
150 bool "Provide CIFS ACL support (EXPERIMENTAL)" 150 bool "Provide CIFS ACL support"
151 depends on CIFS_XATTR && KEYS 151 depends on CIFS_XATTR && KEYS
152 help 152 help
153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob 153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 986709a8d903..602f77c304c9 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -773,10 +773,11 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
773 cifs_dump_mem("Bad SMB: ", buf, 773 cifs_dump_mem("Bad SMB: ", buf,
774 min_t(unsigned int, server->total_read, 48)); 774 min_t(unsigned int, server->total_read, 48));
775 775
776 if (mid) 776 if (!mid)
777 handle_mid(mid, server, smb_buffer, length); 777 return length;
778 778
779 return length; 779 handle_mid(mid, server, smb_buffer, length);
780 return 0;
780} 781}
781 782
782static int 783static int
@@ -2125,7 +2126,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2125 down_read(&key->sem); 2126 down_read(&key->sem);
2126 upayload = key->payload.data; 2127 upayload = key->payload.data;
2127 if (IS_ERR_OR_NULL(upayload)) { 2128 if (IS_ERR_OR_NULL(upayload)) {
2128 rc = PTR_ERR(key); 2129 rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2129 goto out_key_put; 2130 goto out_key_put;
2130 } 2131 }
2131 2132
@@ -2142,14 +2143,14 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2142 2143
2143 len = delim - payload; 2144 len = delim - payload;
2144 if (len > MAX_USERNAME_SIZE || len <= 0) { 2145 if (len > MAX_USERNAME_SIZE || len <= 0) {
2145 cFYI(1, "Bad value from username search (len=%ld)", len); 2146 cFYI(1, "Bad value from username search (len=%zd)", len);
2146 rc = -EINVAL; 2147 rc = -EINVAL;
2147 goto out_key_put; 2148 goto out_key_put;
2148 } 2149 }
2149 2150
2150 vol->username = kstrndup(payload, len, GFP_KERNEL); 2151 vol->username = kstrndup(payload, len, GFP_KERNEL);
2151 if (!vol->username) { 2152 if (!vol->username) {
2152 cFYI(1, "Unable to allocate %ld bytes for username", len); 2153 cFYI(1, "Unable to allocate %zd bytes for username", len);
2153 rc = -ENOMEM; 2154 rc = -ENOMEM;
2154 goto out_key_put; 2155 goto out_key_put;
2155 } 2156 }
@@ -2157,7 +2158,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2157 2158
2158 len = key->datalen - (len + 1); 2159 len = key->datalen - (len + 1);
2159 if (len > MAX_PASSWORD_SIZE || len <= 0) { 2160 if (len > MAX_PASSWORD_SIZE || len <= 0) {
2160 cFYI(1, "Bad len for password search (len=%ld)", len); 2161 cFYI(1, "Bad len for password search (len=%zd)", len);
2161 rc = -EINVAL; 2162 rc = -EINVAL;
2162 kfree(vol->username); 2163 kfree(vol->username);
2163 vol->username = NULL; 2164 vol->username = NULL;
@@ -2167,7 +2168,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2167 ++delim; 2168 ++delim;
2168 vol->password = kstrndup(delim, len, GFP_KERNEL); 2169 vol->password = kstrndup(delim, len, GFP_KERNEL);
2169 if (!vol->password) { 2170 if (!vol->password) {
2170 cFYI(1, "Unable to allocate %ld bytes for password", len); 2171 cFYI(1, "Unable to allocate %zd bytes for password", len);
2171 rc = -ENOMEM; 2172 rc = -ENOMEM;
2172 kfree(vol->username); 2173 kfree(vol->username);
2173 vol->username = NULL; 2174 vol->username = NULL;
@@ -3857,10 +3858,8 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3857 struct smb_vol *vol_info; 3858 struct smb_vol *vol_info;
3858 3859
3859 vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); 3860 vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
3860 if (vol_info == NULL) { 3861 if (vol_info == NULL)
3861 tcon = ERR_PTR(-ENOMEM); 3862 return ERR_PTR(-ENOMEM);
3862 goto out;
3863 }
3864 3863
3865 vol_info->local_nls = cifs_sb->local_nls; 3864 vol_info->local_nls = cifs_sb->local_nls;
3866 vol_info->linux_uid = fsuid; 3865 vol_info->linux_uid = fsuid;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index df8fecb5b993..63a196b97d50 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
492{ 492{
493 int xid; 493 int xid;
494 int rc = 0; /* to get around spurious gcc warning, set to zero here */ 494 int rc = 0; /* to get around spurious gcc warning, set to zero here */
495 __u32 oplock = 0; 495 __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
496 __u16 fileHandle = 0; 496 __u16 fileHandle = 0;
497 bool posix_open = false; 497 bool posix_open = false;
498 struct cifs_sb_info *cifs_sb; 498 struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index d85efad5765f..551d0c2b9736 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -246,16 +246,15 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
246 /* copy user */ 246 /* copy user */
247 /* BB what about null user mounts - check that we do this BB */ 247 /* BB what about null user mounts - check that we do this BB */
248 /* copy user */ 248 /* copy user */
249 if (ses->user_name != NULL) 249 if (ses->user_name != NULL) {
250 strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); 250 strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
251 bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
252 }
251 /* else null user mount */ 253 /* else null user mount */
252
253 bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
254 *bcc_ptr = 0; 254 *bcc_ptr = 0;
255 bcc_ptr++; /* account for null termination */ 255 bcc_ptr++; /* account for null termination */
256 256
257 /* copy domain */ 257 /* copy domain */
258
259 if (ses->domainName != NULL) { 258 if (ses->domainName != NULL) {
260 strncpy(bcc_ptr, ses->domainName, 256); 259 strncpy(bcc_ptr, ses->domainName, 256);
261 bcc_ptr += strnlen(ses->domainName, 256); 260 bcc_ptr += strnlen(ses->domainName, 256);
@@ -395,6 +394,10 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
395 ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); 394 ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
396 tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); 395 tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
397 tilen = le16_to_cpu(pblob->TargetInfoArray.Length); 396 tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
397 if (tioffset > blob_len || tioffset + tilen > blob_len) {
398 cERROR(1, "tioffset + tilen too high %u + %u", tioffset, tilen);
399 return -EINVAL;
400 }
398 if (tilen) { 401 if (tilen) {
399 ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); 402 ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
400 if (!ses->auth_key.response) { 403 if (!ses->auth_key.response) {
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 63ab24510649..ea9931281557 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1990,6 +1990,17 @@ out:
1990 return; 1990 return;
1991} 1991}
1992 1992
1993static size_t ecryptfs_max_decoded_size(size_t encoded_size)
1994{
1995 /* Not exact; conservatively long. Every block of 4
1996 * encoded characters decodes into a block of 3
1997 * decoded characters. This segment of code provides
1998 * the caller with the maximum amount of allocated
1999 * space that @dst will need to point to in a
2000 * subsequent call. */
2001 return ((encoded_size + 1) * 3) / 4;
2002}
2003
1993/** 2004/**
1994 * ecryptfs_decode_from_filename 2005 * ecryptfs_decode_from_filename
1995 * @dst: If NULL, this function only sets @dst_size and returns. If 2006 * @dst: If NULL, this function only sets @dst_size and returns. If
@@ -2008,13 +2019,7 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
2008 size_t dst_byte_offset = 0; 2019 size_t dst_byte_offset = 0;
2009 2020
2010 if (dst == NULL) { 2021 if (dst == NULL) {
2011 /* Not exact; conservatively long. Every block of 4 2022 (*dst_size) = ecryptfs_max_decoded_size(src_size);
2012 * encoded characters decodes into a block of 3
2013 * decoded characters. This segment of code provides
2014 * the caller with the maximum amount of allocated
2015 * space that @dst will need to point to in a
2016 * subsequent call. */
2017 (*dst_size) = (((src_size + 1) * 3) / 4);
2018 goto out; 2023 goto out;
2019 } 2024 }
2020 while (src_byte_offset < src_size) { 2025 while (src_byte_offset < src_size) {
@@ -2239,3 +2244,52 @@ out_free:
2239out: 2244out:
2240 return rc; 2245 return rc;
2241} 2246}
2247
2248#define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143
2249
2250int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
2251 struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
2252{
2253 struct blkcipher_desc desc;
2254 struct mutex *tfm_mutex;
2255 size_t cipher_blocksize;
2256 int rc;
2257
2258 if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
2259 (*namelen) = lower_namelen;
2260 return 0;
2261 }
2262
2263 rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
2264 mount_crypt_stat->global_default_fn_cipher_name);
2265 if (unlikely(rc)) {
2266 (*namelen) = 0;
2267 return rc;
2268 }
2269
2270 mutex_lock(tfm_mutex);
2271 cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm);
2272 mutex_unlock(tfm_mutex);
2273
2274 /* Return an exact amount for the common cases */
2275 if (lower_namelen == NAME_MAX
2276 && (cipher_blocksize == 8 || cipher_blocksize == 16)) {
2277 (*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16;
2278 return 0;
2279 }
2280
2281 /* Return a safe estimate for the uncommon cases */
2282 (*namelen) = lower_namelen;
2283 (*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
2284 /* Since this is the max decoded size, subtract 1 "decoded block" len */
2285 (*namelen) = ecryptfs_max_decoded_size(*namelen) - 3;
2286 (*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE;
2287 (*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES;
2288 /* Worst case is that the filename is padded nearly a full block size */
2289 (*namelen) -= cipher_blocksize - 1;
2290
2291 if ((*namelen) < 0)
2292 (*namelen) = 0;
2293
2294 return 0;
2295}
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index a2362df58ae8..867b64c5d84f 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -162,6 +162,10 @@ ecryptfs_get_key_payload_data(struct key *key)
162#define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */ 162#define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */
163#define MD5_DIGEST_SIZE 16 163#define MD5_DIGEST_SIZE 16
164#define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE 164#define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE
165#define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \
166 + ECRYPTFS_SIG_SIZE + 1 + 1)
167#define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \
168 + ECRYPTFS_SIG_SIZE + 1 + 1)
165#define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FEK_ENCRYPTED." 169#define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FEK_ENCRYPTED."
166#define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE 23 170#define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE 23
167#define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FNEK_ENCRYPTED." 171#define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FNEK_ENCRYPTED."
@@ -701,6 +705,8 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
701 size_t *packet_size, 705 size_t *packet_size,
702 struct ecryptfs_mount_crypt_stat *mount_crypt_stat, 706 struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
703 char *data, size_t max_packet_size); 707 char *data, size_t max_packet_size);
708int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
709 struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
704int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, 710int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
705 loff_t offset); 711 loff_t offset);
706 712
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 19892d7d2ed1..ab35b113003b 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1085,6 +1085,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
1085 } 1085 }
1086 1086
1087 rc = vfs_setxattr(lower_dentry, name, value, size, flags); 1087 rc = vfs_setxattr(lower_dentry, name, value, size, flags);
1088 if (!rc)
1089 fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
1088out: 1090out:
1089 return rc; 1091 return rc;
1090} 1092}
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 8e3b943e330f..2333203a120b 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -679,10 +679,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
679 * Octets N3-N4: Block-aligned encrypted filename 679 * Octets N3-N4: Block-aligned encrypted filename
680 * - Consists of a minimum number of random characters, a \0 680 * - Consists of a minimum number of random characters, a \0
681 * separator, and then the filename */ 681 * separator, and then the filename */
682 s->max_packet_size = (1 /* Tag 70 identifier */ 682 s->max_packet_size = (ECRYPTFS_TAG_70_MAX_METADATA_SIZE
683 + 3 /* Max Tag 70 packet size */
684 + ECRYPTFS_SIG_SIZE /* FNEK sig */
685 + 1 /* Cipher identifier */
686 + s->block_aligned_filename_size); 683 + s->block_aligned_filename_size);
687 if (dest == NULL) { 684 if (dest == NULL) {
688 (*packet_size) = s->max_packet_size; 685 (*packet_size) = s->max_packet_size;
@@ -934,10 +931,10 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
934 goto out; 931 goto out;
935 } 932 }
936 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 933 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
937 if (max_packet_size < (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1)) { 934 if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
938 printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be " 935 printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
939 "at least [%d]\n", __func__, max_packet_size, 936 "at least [%d]\n", __func__, max_packet_size,
940 (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1)); 937 ECRYPTFS_TAG_70_MIN_METADATA_SIZE);
941 rc = -EINVAL; 938 rc = -EINVAL;
942 goto out; 939 goto out;
943 } 940 }
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 10ec695ccd68..a46b3a8fee1e 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -150,7 +150,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
150 /* This is a header extent */ 150 /* This is a header extent */
151 char *page_virt; 151 char *page_virt;
152 152
153 page_virt = kmap_atomic(page, KM_USER0); 153 page_virt = kmap_atomic(page);
154 memset(page_virt, 0, PAGE_CACHE_SIZE); 154 memset(page_virt, 0, PAGE_CACHE_SIZE);
155 /* TODO: Support more than one header extent */ 155 /* TODO: Support more than one header extent */
156 if (view_extent_num == 0) { 156 if (view_extent_num == 0) {
@@ -163,7 +163,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
163 crypt_stat, 163 crypt_stat,
164 &written); 164 &written);
165 } 165 }
166 kunmap_atomic(page_virt, KM_USER0); 166 kunmap_atomic(page_virt);
167 flush_dcache_page(page); 167 flush_dcache_page(page);
168 if (rc) { 168 if (rc) {
169 printk(KERN_ERR "%s: Error reading xattr " 169 printk(KERN_ERR "%s: Error reading xattr "
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 5c0106f75775..b2a34a192f4f 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -156,7 +156,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
156 ecryptfs_page_idx, rc); 156 ecryptfs_page_idx, rc);
157 goto out; 157 goto out;
158 } 158 }
159 ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0); 159 ecryptfs_page_virt = kmap_atomic(ecryptfs_page);
160 160
161 /* 161 /*
162 * pos: where we're now writing, offset: where the request was 162 * pos: where we're now writing, offset: where the request was
@@ -179,7 +179,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
179 (data + data_offset), num_bytes); 179 (data + data_offset), num_bytes);
180 data_offset += num_bytes; 180 data_offset += num_bytes;
181 } 181 }
182 kunmap_atomic(ecryptfs_page_virt, KM_USER0); 182 kunmap_atomic(ecryptfs_page_virt);
183 flush_dcache_page(ecryptfs_page); 183 flush_dcache_page(ecryptfs_page);
184 SetPageUptodate(ecryptfs_page); 184 SetPageUptodate(ecryptfs_page);
185 unlock_page(ecryptfs_page); 185 unlock_page(ecryptfs_page);
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 9df7fd6e0c39..cf152823bbf4 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -30,6 +30,8 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/file.h> 31#include <linux/file.h>
32#include <linux/crypto.h> 32#include <linux/crypto.h>
33#include <linux/statfs.h>
34#include <linux/magic.h>
33#include "ecryptfs_kernel.h" 35#include "ecryptfs_kernel.h"
34 36
35struct kmem_cache *ecryptfs_inode_info_cache; 37struct kmem_cache *ecryptfs_inode_info_cache;
@@ -102,10 +104,20 @@ static void ecryptfs_destroy_inode(struct inode *inode)
102static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf) 104static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
103{ 105{
104 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); 106 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
107 int rc;
105 108
106 if (!lower_dentry->d_sb->s_op->statfs) 109 if (!lower_dentry->d_sb->s_op->statfs)
107 return -ENOSYS; 110 return -ENOSYS;
108 return lower_dentry->d_sb->s_op->statfs(lower_dentry, buf); 111
112 rc = lower_dentry->d_sb->s_op->statfs(lower_dentry, buf);
113 if (rc)
114 return rc;
115
116 buf->f_type = ECRYPTFS_SUPER_MAGIC;
117 rc = ecryptfs_set_f_namelen(&buf->f_namelen, buf->f_namelen,
118 &ecryptfs_superblock_to_private(dentry->d_sb)->mount_crypt_stat);
119
120 return rc;
109} 121}
110 122
111/** 123/**
diff --git a/fs/exec.c b/fs/exec.c
index aeb135c7ff5c..92ce83a11e90 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1071,6 +1071,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
1071 perf_event_comm(tsk); 1071 perf_event_comm(tsk);
1072} 1072}
1073 1073
1074static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
1075{
1076 int i, ch;
1077
1078 /* Copies the binary name from after last slash */
1079 for (i = 0; (ch = *(fn++)) != '\0';) {
1080 if (ch == '/')
1081 i = 0; /* overwrite what we wrote */
1082 else
1083 if (i < len - 1)
1084 tcomm[i++] = ch;
1085 }
1086 tcomm[i] = '\0';
1087}
1088
1074int flush_old_exec(struct linux_binprm * bprm) 1089int flush_old_exec(struct linux_binprm * bprm)
1075{ 1090{
1076 int retval; 1091 int retval;
@@ -1085,6 +1100,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1085 1100
1086 set_mm_exe_file(bprm->mm, bprm->file); 1101 set_mm_exe_file(bprm->mm, bprm->file);
1087 1102
1103 filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
1088 /* 1104 /*
1089 * Release all of the old mmap stuff 1105 * Release all of the old mmap stuff
1090 */ 1106 */
@@ -1116,10 +1132,6 @@ EXPORT_SYMBOL(would_dump);
1116 1132
1117void setup_new_exec(struct linux_binprm * bprm) 1133void setup_new_exec(struct linux_binprm * bprm)
1118{ 1134{
1119 int i, ch;
1120 const char *name;
1121 char tcomm[sizeof(current->comm)];
1122
1123 arch_pick_mmap_layout(current->mm); 1135 arch_pick_mmap_layout(current->mm);
1124 1136
1125 /* This is the point of no return */ 1137 /* This is the point of no return */
@@ -1130,18 +1142,7 @@ void setup_new_exec(struct linux_binprm * bprm)
1130 else 1142 else
1131 set_dumpable(current->mm, suid_dumpable); 1143 set_dumpable(current->mm, suid_dumpable);
1132 1144
1133 name = bprm->filename; 1145 set_task_comm(current, bprm->tcomm);
1134
1135 /* Copies the binary name from after last slash */
1136 for (i=0; (ch = *(name++)) != '\0';) {
1137 if (ch == '/')
1138 i = 0; /* overwrite what we wrote */
1139 else
1140 if (i < (sizeof(tcomm) - 1))
1141 tcomm[i++] = ch;
1142 }
1143 tcomm[i] = '\0';
1144 set_task_comm(current, tcomm);
1145 1146
1146 /* Set the new mm task size. We have to do that late because it may 1147 /* Set the new mm task size. We have to do that late because it may
1147 * depend on TIF_32BIT which is only updated in flush_thread() on 1148 * depend on TIF_32BIT which is only updated in flush_thread() on
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f855916657ba..5b4a9362d5aa 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -53,14 +53,6 @@ struct wb_writeback_work {
53}; 53};
54 54
55/* 55/*
56 * Include the creation of the trace points after defining the
57 * wb_writeback_work structure so that the definition remains local to this
58 * file.
59 */
60#define CREATE_TRACE_POINTS
61#include <trace/events/writeback.h>
62
63/*
64 * We don't actually have pdflush, but this one is exported though /proc... 56 * We don't actually have pdflush, but this one is exported though /proc...
65 */ 57 */
66int nr_pdflush_threads; 58int nr_pdflush_threads;
@@ -92,6 +84,14 @@ static inline struct inode *wb_inode(struct list_head *head)
92 return list_entry(head, struct inode, i_wb_list); 84 return list_entry(head, struct inode, i_wb_list);
93} 85}
94 86
87/*
88 * Include the creation of the trace points after defining the
89 * wb_writeback_work structure and inline functions so that the definition
90 * remains local to this file.
91 */
92#define CREATE_TRACE_POINTS
93#include <trace/events/writeback.h>
94
95/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 95/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
96static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 96static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
97{ 97{
diff --git a/fs/ioprio.c b/fs/ioprio.c
index f84b380d65e5..0f1b9515213b 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) { 52 if (ioc) {
53 ioc_ioprio_changed(ioc, ioprio); 53 ioc_ioprio_changed(ioc, ioprio);
54 put_io_context(ioc, NULL); 54 put_io_context(ioc);
55 } 55 }
56 56
57 return err; 57 return err;
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index a01cdad6aad1..eafb8d37a6fb 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -335,7 +335,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
335 void *ebuf; 335 void *ebuf;
336 uint32_t ofs; 336 uint32_t ofs;
337 size_t retlen; 337 size_t retlen;
338 int ret = -EIO; 338 int ret;
339 unsigned long *wordebuf; 339 unsigned long *wordebuf;
340 340
341 ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, 341 ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index e97404d611e0..9c501449450d 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -152,9 +152,6 @@ static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
152 filler_t *filler = logfs_mtd_readpage; 152 filler_t *filler = logfs_mtd_readpage;
153 struct mtd_info *mtd = super->s_mtd; 153 struct mtd_info *mtd = super->s_mtd;
154 154
155 if (!mtd_can_have_bb(mtd))
156 return NULL;
157
158 *ofs = 0; 155 *ofs = 0;
159 while (mtd_block_isbad(mtd, *ofs)) { 156 while (mtd_block_isbad(mtd, *ofs)) {
160 *ofs += mtd->erasesize; 157 *ofs += mtd->erasesize;
@@ -172,9 +169,6 @@ static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
172 filler_t *filler = logfs_mtd_readpage; 169 filler_t *filler = logfs_mtd_readpage;
173 struct mtd_info *mtd = super->s_mtd; 170 struct mtd_info *mtd = super->s_mtd;
174 171
175 if (!mtd_can_have_bb(mtd))
176 return NULL;
177
178 *ofs = mtd->size - mtd->erasesize; 172 *ofs = mtd->size - mtd->erasesize;
179 while (mtd_block_isbad(mtd, *ofs)) { 173 while (mtd_block_isbad(mtd, *ofs)) {
180 *ofs -= mtd->erasesize; 174 *ofs -= mtd->erasesize;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 886649627c3d..2a70fce70c65 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -603,6 +603,8 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
603 nsegs = argv[4].v_nmembs; 603 nsegs = argv[4].v_nmembs;
604 if (argv[4].v_size != argsz[4]) 604 if (argv[4].v_size != argsz[4])
605 goto out; 605 goto out;
606 if (nsegs > UINT_MAX / sizeof(__u64))
607 goto out;
606 608
607 /* 609 /*
608 * argv[4] points to segment numbers this ioctl cleans. We 610 * argv[4] points to segment numbers this ioctl cleans. We
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9cde9edf9c4d..d4548dd49b02 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
198 return result; 198 return result;
199} 199}
200 200
201static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
202{
203 struct mm_struct *mm;
204 int err;
205
206 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
207 if (err)
208 return ERR_PTR(err);
209
210 mm = get_task_mm(task);
211 if (mm && mm != current->mm &&
212 !ptrace_may_access(task, mode)) {
213 mmput(mm);
214 mm = ERR_PTR(-EACCES);
215 }
216 mutex_unlock(&task->signal->cred_guard_mutex);
217
218 return mm;
219}
220
221struct mm_struct *mm_for_maps(struct task_struct *task) 201struct mm_struct *mm_for_maps(struct task_struct *task)
222{ 202{
223 return mm_access(task, PTRACE_MODE_READ); 203 return mm_access(task, PTRACE_MODE_READ);
@@ -711,6 +691,13 @@ static int mem_open(struct inode* inode, struct file* file)
711 if (IS_ERR(mm)) 691 if (IS_ERR(mm))
712 return PTR_ERR(mm); 692 return PTR_ERR(mm);
713 693
694 if (mm) {
695 /* ensure this mm_struct can't be freed */
696 atomic_inc(&mm->mm_count);
697 /* but do not pin its memory */
698 mmput(mm);
699 }
700
714 /* OK to pass negative loff_t, we can catch out-of-range */ 701 /* OK to pass negative loff_t, we can catch out-of-range */
715 file->f_mode |= FMODE_UNSIGNED_OFFSET; 702 file->f_mode |= FMODE_UNSIGNED_OFFSET;
716 file->private_data = mm; 703 file->private_data = mm;
@@ -718,57 +705,13 @@ static int mem_open(struct inode* inode, struct file* file)
718 return 0; 705 return 0;
719} 706}
720 707
721static ssize_t mem_read(struct file * file, char __user * buf, 708static ssize_t mem_rw(struct file *file, char __user *buf,
722 size_t count, loff_t *ppos) 709 size_t count, loff_t *ppos, int write)
723{ 710{
724 int ret;
725 char *page;
726 unsigned long src = *ppos;
727 struct mm_struct *mm = file->private_data; 711 struct mm_struct *mm = file->private_data;
728 712 unsigned long addr = *ppos;
729 if (!mm) 713 ssize_t copied;
730 return 0;
731
732 page = (char *)__get_free_page(GFP_TEMPORARY);
733 if (!page)
734 return -ENOMEM;
735
736 ret = 0;
737
738 while (count > 0) {
739 int this_len, retval;
740
741 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
742 retval = access_remote_vm(mm, src, page, this_len, 0);
743 if (!retval) {
744 if (!ret)
745 ret = -EIO;
746 break;
747 }
748
749 if (copy_to_user(buf, page, retval)) {
750 ret = -EFAULT;
751 break;
752 }
753
754 ret += retval;
755 src += retval;
756 buf += retval;
757 count -= retval;
758 }
759 *ppos = src;
760
761 free_page((unsigned long) page);
762 return ret;
763}
764
765static ssize_t mem_write(struct file * file, const char __user *buf,
766 size_t count, loff_t *ppos)
767{
768 int copied;
769 char *page; 714 char *page;
770 unsigned long dst = *ppos;
771 struct mm_struct *mm = file->private_data;
772 715
773 if (!mm) 716 if (!mm)
774 return 0; 717 return 0;
@@ -778,31 +721,54 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
778 return -ENOMEM; 721 return -ENOMEM;
779 722
780 copied = 0; 723 copied = 0;
724 if (!atomic_inc_not_zero(&mm->mm_users))
725 goto free;
726
781 while (count > 0) { 727 while (count > 0) {
782 int this_len, retval; 728 int this_len = min_t(int, count, PAGE_SIZE);
783 729
784 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 730 if (write && copy_from_user(page, buf, this_len)) {
785 if (copy_from_user(page, buf, this_len)) {
786 copied = -EFAULT; 731 copied = -EFAULT;
787 break; 732 break;
788 } 733 }
789 retval = access_remote_vm(mm, dst, page, this_len, 1); 734
790 if (!retval) { 735 this_len = access_remote_vm(mm, addr, page, this_len, write);
736 if (!this_len) {
791 if (!copied) 737 if (!copied)
792 copied = -EIO; 738 copied = -EIO;
793 break; 739 break;
794 } 740 }
795 copied += retval; 741
796 buf += retval; 742 if (!write && copy_to_user(buf, page, this_len)) {
797 dst += retval; 743 copied = -EFAULT;
798 count -= retval; 744 break;
745 }
746
747 buf += this_len;
748 addr += this_len;
749 copied += this_len;
750 count -= this_len;
799 } 751 }
800 *ppos = dst; 752 *ppos = addr;
801 753
754 mmput(mm);
755free:
802 free_page((unsigned long) page); 756 free_page((unsigned long) page);
803 return copied; 757 return copied;
804} 758}
805 759
760static ssize_t mem_read(struct file *file, char __user *buf,
761 size_t count, loff_t *ppos)
762{
763 return mem_rw(file, buf, count, ppos, 0);
764}
765
766static ssize_t mem_write(struct file *file, const char __user *buf,
767 size_t count, loff_t *ppos)
768{
769 return mem_rw(file, (char __user*)buf, count, ppos, 1);
770}
771
806loff_t mem_lseek(struct file *file, loff_t offset, int orig) 772loff_t mem_lseek(struct file *file, loff_t offset, int orig)
807{ 773{
808 switch (orig) { 774 switch (orig) {
@@ -822,8 +788,8 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
822static int mem_release(struct inode *inode, struct file *file) 788static int mem_release(struct inode *inode, struct file *file)
823{ 789{
824 struct mm_struct *mm = file->private_data; 790 struct mm_struct *mm = file->private_data;
825 791 if (mm)
826 mmput(mm); 792 mmdrop(mm);
827 return 0; 793 return 0;
828} 794}
829 795
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 292eff198030..ab7c53fe346e 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -110,10 +110,4 @@ kmem_zone_destroy(kmem_zone_t *zone)
110extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 110extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
111extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 111extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
112 112
113static inline int
114kmem_shake_allow(gfp_t gfp_mask)
115{
116 return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS));
117}
118
119#endif /* __XFS_SUPPORT_KMEM_H__ */ 113#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index b4ff40b5f918..cbcb7bea38e2 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -63,82 +63,6 @@ int xfs_dqerror_mod = 33;
63static struct lock_class_key xfs_dquot_other_class; 63static struct lock_class_key xfs_dquot_other_class;
64 64
65/* 65/*
66 * Allocate and initialize a dquot. We don't always allocate fresh memory;
67 * we try to reclaim a free dquot if the number of incore dquots are above
68 * a threshold.
69 * The only field inside the core that gets initialized at this point
70 * is the d_id field. The idea is to fill in the entire q_core
71 * when we read in the on disk dquot.
72 */
73STATIC xfs_dquot_t *
74xfs_qm_dqinit(
75 xfs_mount_t *mp,
76 xfs_dqid_t id,
77 uint type)
78{
79 xfs_dquot_t *dqp;
80 boolean_t brandnewdquot;
81
82 brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
83 dqp->dq_flags = type;
84 dqp->q_core.d_id = cpu_to_be32(id);
85 dqp->q_mount = mp;
86
87 /*
88 * No need to re-initialize these if this is a reclaimed dquot.
89 */
90 if (brandnewdquot) {
91 INIT_LIST_HEAD(&dqp->q_freelist);
92 mutex_init(&dqp->q_qlock);
93 init_waitqueue_head(&dqp->q_pinwait);
94
95 /*
96 * Because we want to use a counting completion, complete
97 * the flush completion once to allow a single access to
98 * the flush completion without blocking.
99 */
100 init_completion(&dqp->q_flush);
101 complete(&dqp->q_flush);
102
103 trace_xfs_dqinit(dqp);
104 } else {
105 /*
106 * Only the q_core portion was zeroed in dqreclaim_one().
107 * So, we need to reset others.
108 */
109 dqp->q_nrefs = 0;
110 dqp->q_blkno = 0;
111 INIT_LIST_HEAD(&dqp->q_mplist);
112 INIT_LIST_HEAD(&dqp->q_hashlist);
113 dqp->q_bufoffset = 0;
114 dqp->q_fileoffset = 0;
115 dqp->q_transp = NULL;
116 dqp->q_gdquot = NULL;
117 dqp->q_res_bcount = 0;
118 dqp->q_res_icount = 0;
119 dqp->q_res_rtbcount = 0;
120 atomic_set(&dqp->q_pincount, 0);
121 dqp->q_hash = NULL;
122 ASSERT(list_empty(&dqp->q_freelist));
123
124 trace_xfs_dqreuse(dqp);
125 }
126
127 /*
128 * In either case we need to make sure group quotas have a different
129 * lock class than user quotas, to make sure lockdep knows we can
130 * locks of one of each at the same time.
131 */
132 if (!(type & XFS_DQ_USER))
133 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
134
135 /*
136 * log item gets initialized later
137 */
138 return (dqp);
139}
140
141/*
142 * This is called to free all the memory associated with a dquot 66 * This is called to free all the memory associated with a dquot
143 */ 67 */
144void 68void
@@ -567,7 +491,32 @@ xfs_qm_dqread(
567 int error; 491 int error;
568 int cancelflags = 0; 492 int cancelflags = 0;
569 493
570 dqp = xfs_qm_dqinit(mp, id, type); 494
495 dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
496
497 dqp->dq_flags = type;
498 dqp->q_core.d_id = cpu_to_be32(id);
499 dqp->q_mount = mp;
500 INIT_LIST_HEAD(&dqp->q_freelist);
501 mutex_init(&dqp->q_qlock);
502 init_waitqueue_head(&dqp->q_pinwait);
503
504 /*
505 * Because we want to use a counting completion, complete
506 * the flush completion once to allow a single access to
507 * the flush completion without blocking.
508 */
509 init_completion(&dqp->q_flush);
510 complete(&dqp->q_flush);
511
512 /*
513 * Make sure group quotas have a different lock class than user
514 * quotas.
515 */
516 if (!(type & XFS_DQ_USER))
517 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
518
519 atomic_inc(&xfs_Gqm->qm_totaldquots);
571 520
572 trace_xfs_dqread(dqp); 521 trace_xfs_dqread(dqp);
573 522
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 541a508adea1..15ff5392fb65 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1489,7 +1489,7 @@ xlog_recover_add_to_cont_trans(
1489 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1489 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1490 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1490 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1491 1491
1492 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u); 1492 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1493 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1493 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1494 item->ri_buf[item->ri_cnt-1].i_len += len; 1494 item->ri_buf[item->ri_cnt-1].i_len += len;
1495 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1495 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 671f37eae1c7..c436def733bf 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -50,7 +50,6 @@
50 */ 50 */
51struct mutex xfs_Gqm_lock; 51struct mutex xfs_Gqm_lock;
52struct xfs_qm *xfs_Gqm; 52struct xfs_qm *xfs_Gqm;
53uint ndquot;
54 53
55kmem_zone_t *qm_dqzone; 54kmem_zone_t *qm_dqzone;
56kmem_zone_t *qm_dqtrxzone; 55kmem_zone_t *qm_dqtrxzone;
@@ -93,7 +92,6 @@ xfs_Gqm_init(void)
93 goto out_free_udqhash; 92 goto out_free_udqhash;
94 93
95 hsize /= sizeof(xfs_dqhash_t); 94 hsize /= sizeof(xfs_dqhash_t);
96 ndquot = hsize << 8;
97 95
98 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); 96 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
99 xqm->qm_dqhashmask = hsize - 1; 97 xqm->qm_dqhashmask = hsize - 1;
@@ -137,7 +135,6 @@ xfs_Gqm_init(void)
137 xqm->qm_dqtrxzone = qm_dqtrxzone; 135 xqm->qm_dqtrxzone = qm_dqtrxzone;
138 136
139 atomic_set(&xqm->qm_totaldquots, 0); 137 atomic_set(&xqm->qm_totaldquots, 0);
140 xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
141 xqm->qm_nrefs = 0; 138 xqm->qm_nrefs = 0;
142 return xqm; 139 return xqm;
143 140
@@ -1600,216 +1597,150 @@ xfs_qm_init_quotainos(
1600 return 0; 1597 return 0;
1601} 1598}
1602 1599
1600STATIC void
1601xfs_qm_dqfree_one(
1602 struct xfs_dquot *dqp)
1603{
1604 struct xfs_mount *mp = dqp->q_mount;
1605 struct xfs_quotainfo *qi = mp->m_quotainfo;
1603 1606
1607 mutex_lock(&dqp->q_hash->qh_lock);
1608 list_del_init(&dqp->q_hashlist);
1609 dqp->q_hash->qh_version++;
1610 mutex_unlock(&dqp->q_hash->qh_lock);
1604 1611
1605/* 1612 mutex_lock(&qi->qi_dqlist_lock);
1606 * Pop the least recently used dquot off the freelist and recycle it. 1613 list_del_init(&dqp->q_mplist);
1607 */ 1614 qi->qi_dquots--;
1608STATIC struct xfs_dquot * 1615 qi->qi_dqreclaims++;
1609xfs_qm_dqreclaim_one(void) 1616 mutex_unlock(&qi->qi_dqlist_lock);
1617
1618 xfs_qm_dqdestroy(dqp);
1619}
1620
1621STATIC void
1622xfs_qm_dqreclaim_one(
1623 struct xfs_dquot *dqp,
1624 struct list_head *dispose_list)
1610{ 1625{
1611 struct xfs_dquot *dqp; 1626 struct xfs_mount *mp = dqp->q_mount;
1612 int restarts = 0; 1627 int error;
1613 1628
1614 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); 1629 if (!xfs_dqlock_nowait(dqp))
1615restart: 1630 goto out_busy;
1616 list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
1617 struct xfs_mount *mp = dqp->q_mount;
1618 1631
1619 if (!xfs_dqlock_nowait(dqp)) 1632 /*
1620 continue; 1633 * This dquot has acquired a reference in the meantime remove it from
1634 * the freelist and try again.
1635 */
1636 if (dqp->q_nrefs) {
1637 xfs_dqunlock(dqp);
1621 1638
1622 /* 1639 trace_xfs_dqreclaim_want(dqp);
1623 * This dquot has already been grabbed by dqlookup. 1640 XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1624 * Remove it from the freelist and try again.
1625 */
1626 if (dqp->q_nrefs) {
1627 trace_xfs_dqreclaim_want(dqp);
1628 XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1629
1630 list_del_init(&dqp->q_freelist);
1631 xfs_Gqm->qm_dqfrlist_cnt--;
1632 restarts++;
1633 goto dqunlock;
1634 }
1635 1641
1636 ASSERT(dqp->q_hash); 1642 list_del_init(&dqp->q_freelist);
1637 ASSERT(!list_empty(&dqp->q_mplist)); 1643 xfs_Gqm->qm_dqfrlist_cnt--;
1644 return;
1645 }
1638 1646
1639 /* 1647 ASSERT(dqp->q_hash);
1640 * Try to grab the flush lock. If this dquot is in the process 1648 ASSERT(!list_empty(&dqp->q_mplist));
1641 * of getting flushed to disk, we don't want to reclaim it.
1642 */
1643 if (!xfs_dqflock_nowait(dqp))
1644 goto dqunlock;
1645 1649
1646 /* 1650 /*
1647 * We have the flush lock so we know that this is not in the 1651 * Try to grab the flush lock. If this dquot is in the process of
1648 * process of being flushed. So, if this is dirty, flush it 1652 * getting flushed to disk, we don't want to reclaim it.
1649 * DELWRI so that we don't get a freelist infested with 1653 */
1650 * dirty dquots. 1654 if (!xfs_dqflock_nowait(dqp))
1651 */ 1655 goto out_busy;
1652 if (XFS_DQ_IS_DIRTY(dqp)) {
1653 int error;
1654 1656
1655 trace_xfs_dqreclaim_dirty(dqp); 1657 /*
1658 * We have the flush lock so we know that this is not in the
1659 * process of being flushed. So, if this is dirty, flush it
1660 * DELWRI so that we don't get a freelist infested with
1661 * dirty dquots.
1662 */
1663 if (XFS_DQ_IS_DIRTY(dqp)) {
1664 trace_xfs_dqreclaim_dirty(dqp);
1656 1665
1657 /* 1666 /*
1658 * We flush it delayed write, so don't bother 1667 * We flush it delayed write, so don't bother releasing the
1659 * releasing the freelist lock. 1668 * freelist lock.
1660 */ 1669 */
1661 error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); 1670 error = xfs_qm_dqflush(dqp, 0);
1662 if (error) { 1671 if (error) {
1663 xfs_warn(mp, "%s: dquot %p flush failed", 1672 xfs_warn(mp, "%s: dquot %p flush failed",
1664 __func__, dqp); 1673 __func__, dqp);
1665 }
1666 goto dqunlock;
1667 } 1674 }
1668 xfs_dqfunlock(dqp);
1669 1675
1670 /* 1676 /*
1671 * Prevent lookup now that we are going to reclaim the dquot. 1677 * Give the dquot another try on the freelist, as the
1672 * Once XFS_DQ_FREEING is set lookup won't touch the dquot, 1678 * flushing will take some time.
1673 * thus we can drop the lock now.
1674 */ 1679 */
1675 dqp->dq_flags |= XFS_DQ_FREEING; 1680 goto out_busy;
1676 xfs_dqunlock(dqp); 1681 }
1677 1682 xfs_dqfunlock(dqp);
1678 mutex_lock(&dqp->q_hash->qh_lock);
1679 list_del_init(&dqp->q_hashlist);
1680 dqp->q_hash->qh_version++;
1681 mutex_unlock(&dqp->q_hash->qh_lock);
1682
1683 mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
1684 list_del_init(&dqp->q_mplist);
1685 mp->m_quotainfo->qi_dquots--;
1686 mp->m_quotainfo->qi_dqreclaims++;
1687 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
1688 1683
1689 ASSERT(dqp->q_nrefs == 0); 1684 /*
1690 list_del_init(&dqp->q_freelist); 1685 * Prevent lookups now that we are past the point of no return.
1691 xfs_Gqm->qm_dqfrlist_cnt--; 1686 */
1687 dqp->dq_flags |= XFS_DQ_FREEING;
1688 xfs_dqunlock(dqp);
1692 1689
1693 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); 1690 ASSERT(dqp->q_nrefs == 0);
1694 return dqp; 1691 list_move_tail(&dqp->q_freelist, dispose_list);
1695dqunlock: 1692 xfs_Gqm->qm_dqfrlist_cnt--;
1696 xfs_dqunlock(dqp);
1697 if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
1698 break;
1699 goto restart;
1700 }
1701 1693
1702 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); 1694 trace_xfs_dqreclaim_done(dqp);
1703 return NULL; 1695 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
1704} 1696 return;
1705 1697
1706/* 1698out_busy:
1707 * Traverse the freelist of dquots and attempt to reclaim a maximum of 1699 xfs_dqunlock(dqp);
1708 * 'howmany' dquots. This operation races with dqlookup(), and attempts to
1709 * favor the lookup function ...
1710 */
1711STATIC int
1712xfs_qm_shake_freelist(
1713 int howmany)
1714{
1715 int nreclaimed = 0;
1716 xfs_dquot_t *dqp;
1717 1700
1718 if (howmany <= 0) 1701 /*
1719 return 0; 1702 * Move the dquot to the tail of the list so that we don't spin on it.
1703 */
1704 list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
1720 1705
1721 while (nreclaimed < howmany) { 1706 trace_xfs_dqreclaim_busy(dqp);
1722 dqp = xfs_qm_dqreclaim_one(); 1707 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
1723 if (!dqp)
1724 return nreclaimed;
1725 xfs_qm_dqdestroy(dqp);
1726 nreclaimed++;
1727 }
1728 return nreclaimed;
1729} 1708}
1730 1709
1731/*
1732 * The kmem_shake interface is invoked when memory is running low.
1733 */
1734/* ARGSUSED */
1735STATIC int 1710STATIC int
1736xfs_qm_shake( 1711xfs_qm_shake(
1737 struct shrinker *shrink, 1712 struct shrinker *shrink,
1738 struct shrink_control *sc) 1713 struct shrink_control *sc)
1739{ 1714{
1740 int ndqused, nfree, n; 1715 int nr_to_scan = sc->nr_to_scan;
1741 gfp_t gfp_mask = sc->gfp_mask; 1716 LIST_HEAD (dispose_list);
1742 1717 struct xfs_dquot *dqp;
1743 if (!kmem_shake_allow(gfp_mask))
1744 return 0;
1745 if (!xfs_Gqm)
1746 return 0;
1747
1748 nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
1749 /* incore dquots in all f/s's */
1750 ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
1751
1752 ASSERT(ndqused >= 0);
1753 1718
1754 if (nfree <= ndqused && nfree < ndquot) 1719 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1755 return 0; 1720 return 0;
1721 if (!nr_to_scan)
1722 goto out;
1756 1723
1757 ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ 1724 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1758 n = nfree - ndqused - ndquot; /* # over target */ 1725 while (!list_empty(&xfs_Gqm->qm_dqfrlist)) {
1759 1726 if (nr_to_scan-- <= 0)
1760 return xfs_qm_shake_freelist(MAX(nfree, n)); 1727 break;
1761} 1728 dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot,
1762 1729 q_freelist);
1763 1730 xfs_qm_dqreclaim_one(dqp, &dispose_list);
1764/*------------------------------------------------------------------*/
1765
1766/*
1767 * Return a new incore dquot. Depending on the number of
1768 * dquots in the system, we either allocate a new one on the kernel heap,
1769 * or reclaim a free one.
1770 * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
1771 * to reclaim an existing one from the freelist.
1772 */
1773boolean_t
1774xfs_qm_dqalloc_incore(
1775 xfs_dquot_t **O_dqpp)
1776{
1777 xfs_dquot_t *dqp;
1778
1779 /*
1780 * Check against high water mark to see if we want to pop
1781 * a nincompoop dquot off the freelist.
1782 */
1783 if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
1784 /*
1785 * Try to recycle a dquot from the freelist.
1786 */
1787 if ((dqp = xfs_qm_dqreclaim_one())) {
1788 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
1789 /*
1790 * Just zero the core here. The rest will get
1791 * reinitialized by caller. XXX we shouldn't even
1792 * do this zero ...
1793 */
1794 memset(&dqp->q_core, 0, sizeof(dqp->q_core));
1795 *O_dqpp = dqp;
1796 return B_FALSE;
1797 }
1798 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
1799 } 1731 }
1732 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1800 1733
1801 /* 1734 while (!list_empty(&dispose_list)) {
1802 * Allocate a brand new dquot on the kernel heap and return it 1735 dqp = list_first_entry(&dispose_list, struct xfs_dquot,
1803 * to the caller to initialize. 1736 q_freelist);
1804 */ 1737 list_del_init(&dqp->q_freelist);
1805 ASSERT(xfs_Gqm->qm_dqzone != NULL); 1738 xfs_qm_dqfree_one(dqp);
1806 *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); 1739 }
1807 atomic_inc(&xfs_Gqm->qm_totaldquots); 1740out:
1808 1741 return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure;
1809 return B_TRUE;
1810} 1742}
1811 1743
1812
1813/* 1744/*
1814 * Start a transaction and write the incore superblock changes to 1745 * Start a transaction and write the incore superblock changes to
1815 * disk. flags parameter indicates which fields have changed. 1746 * disk. flags parameter indicates which fields have changed.
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 9b4f3adefbc5..9a9b997e1a0a 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -26,24 +26,12 @@
26struct xfs_qm; 26struct xfs_qm;
27struct xfs_inode; 27struct xfs_inode;
28 28
29extern uint ndquot;
30extern struct mutex xfs_Gqm_lock; 29extern struct mutex xfs_Gqm_lock;
31extern struct xfs_qm *xfs_Gqm; 30extern struct xfs_qm *xfs_Gqm;
32extern kmem_zone_t *qm_dqzone; 31extern kmem_zone_t *qm_dqzone;
33extern kmem_zone_t *qm_dqtrxzone; 32extern kmem_zone_t *qm_dqtrxzone;
34 33
35/* 34/*
36 * Ditto, for xfs_qm_dqreclaim_one.
37 */
38#define XFS_QM_RECLAIM_MAX_RESTARTS 4
39
40/*
41 * Ideal ratio of free to in use dquots. Quota manager makes an attempt
42 * to keep this balance.
43 */
44#define XFS_QM_DQFREE_RATIO 2
45
46/*
47 * Dquot hashtable constants/threshold values. 35 * Dquot hashtable constants/threshold values.
48 */ 36 */
49#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) 37#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t))
@@ -74,7 +62,6 @@ typedef struct xfs_qm {
74 int qm_dqfrlist_cnt; 62 int qm_dqfrlist_cnt;
75 atomic_t qm_totaldquots; /* total incore dquots */ 63 atomic_t qm_totaldquots; /* total incore dquots */
76 uint qm_nrefs; /* file systems with quota on */ 64 uint qm_nrefs; /* file systems with quota on */
77 int qm_dqfree_ratio;/* ratio of free to inuse dquots */
78 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ 65 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */
79 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ 66 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */
80} xfs_qm_t; 67} xfs_qm_t;
@@ -143,7 +130,6 @@ extern int xfs_qm_quotacheck(xfs_mount_t *);
143extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); 130extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
144 131
145/* dquot stuff */ 132/* dquot stuff */
146extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
147extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); 133extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
148extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); 134extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
149 135
diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c
index 8671a0b32644..5729ba570877 100644
--- a/fs/xfs/xfs_qm_stats.c
+++ b/fs/xfs/xfs_qm_stats.c
@@ -42,9 +42,9 @@ static int xqm_proc_show(struct seq_file *m, void *v)
42{ 42{
43 /* maximum; incore; ratio free to inuse; freelist */ 43 /* maximum; incore; ratio free to inuse; freelist */
44 seq_printf(m, "%d\t%d\t%d\t%u\n", 44 seq_printf(m, "%d\t%d\t%d\t%u\n",
45 ndquot, 45 0,
46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, 46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
47 xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, 47 0,
48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); 48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
49 return 0; 49 return 0;
50} 50}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 6b6df5802e95..bb134a819930 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -733,11 +733,10 @@ DEFINE_EVENT(xfs_dquot_class, name, \
733DEFINE_DQUOT_EVENT(xfs_dqadjust); 733DEFINE_DQUOT_EVENT(xfs_dqadjust);
734DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); 734DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
735DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); 735DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
736DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); 736DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
737DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
737DEFINE_DQUOT_EVENT(xfs_dqattach_found); 738DEFINE_DQUOT_EVENT(xfs_dqattach_found);
738DEFINE_DQUOT_EVENT(xfs_dqattach_get); 739DEFINE_DQUOT_EVENT(xfs_dqattach_get);
739DEFINE_DQUOT_EVENT(xfs_dqinit);
740DEFINE_DQUOT_EVENT(xfs_dqreuse);
741DEFINE_DQUOT_EVENT(xfs_dqalloc); 740DEFINE_DQUOT_EVENT(xfs_dqalloc);
742DEFINE_DQUOT_EVENT(xfs_dqtobp_read); 741DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
743DEFINE_DQUOT_EVENT(xfs_dqread); 742DEFINE_DQUOT_EVENT(xfs_dqread);
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 8de4b73e19e2..e58fcf891370 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,6 +15,16 @@ struct pci_dev;
15#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
18/* Create a virtual mapping cookie for a port on a given PCI device.
19 * Do not call this directly, it exists to make it easier for architectures
20 * to override */
21#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
22extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
23 unsigned int nr);
24#else
25#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
26#endif
27
18#else 28#else
19static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) 29static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
20{ 30{
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fd88a3945aa1..0092102db2de 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -18,7 +18,7 @@ struct pt_regs;
18#define BINPRM_BUF_SIZE 128 18#define BINPRM_BUF_SIZE 128
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21#include <linux/list.h> 21#include <linux/sched.h>
22 22
23#define CORENAME_MAX_SIZE 128 23#define CORENAME_MAX_SIZE 128
24 24
@@ -58,6 +58,7 @@ struct linux_binprm {
58 unsigned interp_flags; 58 unsigned interp_flags;
59 unsigned interp_data; 59 unsigned interp_data;
60 unsigned long loader, exec; 60 unsigned long loader, exec;
61 char tcomm[TASK_COMM_LEN];
61}; 62};
62 63
63#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 64#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 3c1063acb2ab..94300fe46cce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -56,6 +56,26 @@ static inline unsigned long hweight_long(unsigned long w)
56} 56}
57 57
58/** 58/**
59 * rol64 - rotate a 64-bit value left
60 * @word: value to rotate
61 * @shift: bits to roll
62 */
63static inline __u64 rol64(__u64 word, unsigned int shift)
64{
65 return (word << shift) | (word >> (64 - shift));
66}
67
68/**
69 * ror64 - rotate a 64-bit value right
70 * @word: value to rotate
71 * @shift: bits to roll
72 */
73static inline __u64 ror64(__u64 word, unsigned int shift)
74{
75 return (word >> shift) | (word << (64 - shift));
76}
77
78/**
59 * rol32 - rotate a 32-bit value left 79 * rol32 - rotate a 32-bit value left
60 * @word: value to rotate 80 * @word: value to rotate
61 * @shift: bits to roll 81 * @shift: bits to roll
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6c6a1f008065..606cf339bb56 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -399,9 +399,6 @@ struct request_queue {
399 /* Throttle data */ 399 /* Throttle data */
400 struct throtl_data *td; 400 struct throtl_data *td;
401#endif 401#endif
402#ifdef CONFIG_LOCKDEP
403 int ioc_release_depth;
404#endif
405}; 402};
406 403
407#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 404#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 35eae4b67503..7c48029dffe6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -952,7 +952,8 @@ struct cdrom_device_info {
952 char name[20]; /* name of the device type */ 952 char name[20]; /* name of the device type */
953/* per-device flags */ 953/* per-device flags */
954 __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ 954 __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */
955 __u8 reserved : 6; /* not used yet */ 955 __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */
956 __u8 reserved : 5; /* not used yet */
956 int cdda_method; /* see flags */ 957 int cdda_method; /* see flags */
957 __u8 last_sense; 958 __u8 last_sense;
958 __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ 959 __u8 media_written; /* dirty flag, DVD+RW bookkeeping */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c24f3d7fbf1e..7d4e0356f329 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -42,12 +42,6 @@ struct elevator_ops
42 elevator_merged_fn *elevator_merged_fn; 42 elevator_merged_fn *elevator_merged_fn;
43 elevator_merge_req_fn *elevator_merge_req_fn; 43 elevator_merge_req_fn *elevator_merge_req_fn;
44 elevator_allow_merge_fn *elevator_allow_merge_fn; 44 elevator_allow_merge_fn *elevator_allow_merge_fn;
45
46 /*
47 * Used for both plugged list and elevator merging and in the
48 * former case called without queue_lock. Read comment on top of
49 * attempt_plug_merge() for details.
50 */
51 elevator_bio_merged_fn *elevator_bio_merged_fn; 45 elevator_bio_merged_fn *elevator_bio_merged_fn;
52 46
53 elevator_dispatch_fn *elevator_dispatch_fn; 47 elevator_dispatch_fn *elevator_dispatch_fn;
@@ -122,7 +116,6 @@ extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
122extern void elv_add_request(struct request_queue *, struct request *, int); 116extern void elv_add_request(struct request_queue *, struct request *, int);
123extern void __elv_add_request(struct request_queue *, struct request *, int); 117extern void __elv_add_request(struct request_queue *, struct request *, int);
124extern int elv_merge(struct request_queue *, struct request **, struct bio *); 118extern int elv_merge(struct request_queue *, struct request **, struct bio *);
125extern int elv_try_merge(struct request *, struct bio *);
126extern void elv_merge_requests(struct request_queue *, struct request *, 119extern void elv_merge_requests(struct request_queue *, struct request *,
127 struct request *); 120 struct request *);
128extern void elv_merged_request(struct request_queue *, struct request *, int); 121extern void elv_merged_request(struct request_queue *, struct request *, int);
@@ -155,7 +148,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
155extern int elevator_init(struct request_queue *, char *); 148extern int elevator_init(struct request_queue *, char *);
156extern void elevator_exit(struct elevator_queue *); 149extern void elevator_exit(struct elevator_queue *);
157extern int elevator_change(struct request_queue *, const char *); 150extern int elevator_change(struct request_queue *, const char *);
158extern int elv_rq_merge_ok(struct request *, struct bio *); 151extern bool elv_rq_merge_ok(struct request *, struct bio *);
159 152
160/* 153/*
161 * Helper functions. 154 * Helper functions.
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index b5ca4b2c08ec..004ff33ab38e 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -1,6 +1,8 @@
1#ifndef _GPIO_KEYS_H 1#ifndef _GPIO_KEYS_H
2#define _GPIO_KEYS_H 2#define _GPIO_KEYS_H
3 3
4struct device;
5
4struct gpio_keys_button { 6struct gpio_keys_button {
5 /* Configuration parameters */ 7 /* Configuration parameters */
6 unsigned int code; /* input event code (KEY_*, SW_*) */ 8 unsigned int code; /* input event code (KEY_*, SW_*) */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62b908e0e591..0ae065a5fcb2 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -35,7 +35,7 @@
35#include <linux/mod_devicetable.h> 35#include <linux/mod_devicetable.h>
36 36
37 37
38#define MAX_PAGE_BUFFER_COUNT 18 38#define MAX_PAGE_BUFFER_COUNT 19
39#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 39#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
40 40
41#pragma pack(push, 1) 41#pragma pack(push, 1)
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 7e1371c4bccf..119773eebe31 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -133,7 +133,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
133 133
134struct task_struct; 134struct task_struct;
135#ifdef CONFIG_BLOCK 135#ifdef CONFIG_BLOCK
136void put_io_context(struct io_context *ioc, struct request_queue *locked_q); 136void put_io_context(struct io_context *ioc);
137void exit_io_context(struct task_struct *task); 137void exit_io_context(struct task_struct *task);
138struct io_context *get_task_io_context(struct task_struct *task, 138struct io_context *get_task_io_context(struct task_struct *task,
139 gfp_t gfp_flags, int node); 139 gfp_t gfp_flags, int node);
@@ -141,8 +141,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc); 141void ioc_cgroup_changed(struct io_context *ioc);
142#else 142#else
143struct io_context; 143struct io_context;
144static inline void put_io_context(struct io_context *ioc, 144static inline void put_io_context(struct io_context *ioc) { }
145 struct request_queue *locked_q) { }
146static inline void exit_io_context(struct task_struct *task) { } 145static inline void exit_io_context(struct task_struct *task) { }
147#endif 146#endif
148 147
diff --git a/include/linux/lp8727.h b/include/linux/lp8727.h
index d21fa2865bf4..d21fa2865bf4 100755..100644
--- a/include/linux/lp8727.h
+++ b/include/linux/lp8727.h
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 2463c2619596..9bc9ac651dad 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -187,8 +187,10 @@ struct twl6040 {
187 int rev; 187 int rev;
188 u8 vibra_ctrl_cache[2]; 188 u8 vibra_ctrl_cache[2];
189 189
190 /* PLL configuration */
190 int pll; 191 int pll;
191 unsigned int sysclk; 192 unsigned int sysclk;
193 unsigned int mclk;
192 194
193 unsigned int irq; 195 unsigned int irq;
194 unsigned int irq_base; 196 unsigned int irq_base;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 9f22ba572de0..19a41d1737af 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -217,6 +217,7 @@ struct mmc_card {
217#define MMC_CARD_SDXC (1<<6) /* card is SDXC */ 217#define MMC_CARD_SDXC (1<<6) /* card is SDXC */
218#define MMC_CARD_REMOVED (1<<7) /* card has been removed */ 218#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
219#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ 219#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
220#define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */
220 unsigned int quirks; /* card quirks */ 221 unsigned int quirks; /* card quirks */
221#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 222#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
222#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 223#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -382,6 +383,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
382#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 383#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
383#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) 384#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
384#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) 385#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
386#define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP)
385 387
386#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 388#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
387#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 389#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -393,7 +395,9 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
393#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 395#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
394#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) 396#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
395#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) 397#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
398#define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP)
396 399
400#define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP)
397/* 401/*
398 * Quirk add/remove for MMC products. 402 * Quirk add/remove for MMC products.
399 */ 403 */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index e8779c6d1759..aae5d1f1bb39 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -14,6 +14,8 @@
14#ifndef LINUX_MMC_DW_MMC_H 14#ifndef LINUX_MMC_DW_MMC_H
15#define LINUX_MMC_DW_MMC_H 15#define LINUX_MMC_DW_MMC_H
16 16
17#include <linux/scatterlist.h>
18
17#define MAX_MCI_SLOTS 2 19#define MAX_MCI_SLOTS 2
18 20
19enum dw_mci_state { 21enum dw_mci_state {
@@ -40,7 +42,7 @@ struct mmc_data;
40 * @lock: Spinlock protecting the queue and associated data. 42 * @lock: Spinlock protecting the queue and associated data.
41 * @regs: Pointer to MMIO registers. 43 * @regs: Pointer to MMIO registers.
42 * @sg: Scatterlist entry currently being processed by PIO code, if any. 44 * @sg: Scatterlist entry currently being processed by PIO code, if any.
43 * @pio_offset: Offset into the current scatterlist entry. 45 * @sg_miter: PIO mapping scatterlist iterator.
44 * @cur_slot: The slot which is currently using the controller. 46 * @cur_slot: The slot which is currently using the controller.
45 * @mrq: The request currently being processed on @cur_slot, 47 * @mrq: The request currently being processed on @cur_slot,
46 * or NULL if the controller is idle. 48 * or NULL if the controller is idle.
@@ -115,7 +117,7 @@ struct dw_mci {
115 void __iomem *regs; 117 void __iomem *regs;
116 118
117 struct scatterlist *sg; 119 struct scatterlist *sg;
118 unsigned int pio_offset; 120 struct sg_mapping_iter sg_miter;
119 121
120 struct dw_mci_slot *cur_slot; 122 struct dw_mci_slot *cur_slot;
121 struct mmc_request *mrq; 123 struct mmc_request *mrq;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0beba1e5e1ed..ee2b0363c040 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -257,6 +257,7 @@ struct mmc_host {
257#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ 257#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
258#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ 258#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
259 MMC_CAP2_HS200_1_2V_SDR) 259 MMC_CAP2_HS200_1_2V_SDR)
260#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
260 261
261 mmc_pm_flag_t pm_caps; /* supported pm features */ 262 mmc_pm_flag_t pm_caps; /* supported pm features */
262 unsigned int power_notify_type; 263 unsigned int power_notify_type;
@@ -444,4 +445,23 @@ static inline int mmc_boot_partition_access(struct mmc_host *host)
444 return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); 445 return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
445} 446}
446 447
448#ifdef CONFIG_MMC_CLKGATE
449void mmc_host_clk_hold(struct mmc_host *host);
450void mmc_host_clk_release(struct mmc_host *host);
451unsigned int mmc_host_clk_rate(struct mmc_host *host);
452
453#else
454static inline void mmc_host_clk_hold(struct mmc_host *host)
455{
456}
457
458static inline void mmc_host_clk_release(struct mmc_host *host)
459{
460}
461
462static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
463{
464 return host->ios.clock;
465}
466#endif
447#endif /* LINUX_MMC_HOST_H */ 467#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 06f88994ccaa..d02cca6cc8ce 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -57,8 +57,6 @@ struct gcry_mpi {
57 57
58typedef struct gcry_mpi *MPI; 58typedef struct gcry_mpi *MPI;
59 59
60#define MPI_NULL NULL
61
62#define mpi_get_nlimbs(a) ((a)->nlimbs) 60#define mpi_get_nlimbs(a) ((a)->nlimbs)
63#define mpi_is_neg(a) ((a)->sign) 61#define mpi_is_neg(a) ((a)->sign)
64 62
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 221295208fd0..d43dc25af82e 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -427,9 +427,7 @@ static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
427 427
428static inline int mtd_suspend(struct mtd_info *mtd) 428static inline int mtd_suspend(struct mtd_info *mtd)
429{ 429{
430 if (!mtd->suspend) 430 return mtd->suspend ? mtd->suspend(mtd) : 0;
431 return -EOPNOTSUPP;
432 return mtd->suspend(mtd);
433} 431}
434 432
435static inline void mtd_resume(struct mtd_info *mtd) 433static inline void mtd_resume(struct mtd_info *mtd)
@@ -489,7 +487,7 @@ static inline int mtd_has_oob(const struct mtd_info *mtd)
489 487
490static inline int mtd_can_have_bb(const struct mtd_info *mtd) 488static inline int mtd_can_have_bb(const struct mtd_info *mtd)
491{ 489{
492 return 0; 490 return !!mtd->block_isbad;
493} 491}
494 492
495 /* Kernel-side ioctl definitions */ 493 /* Kernel-side ioctl definitions */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 08855613ceb3..abb2776be1ba 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -587,6 +587,7 @@ struct hw_perf_event {
587 u64 sample_period; 587 u64 sample_period;
588 u64 last_period; 588 u64 last_period;
589 local64_t period_left; 589 local64_t period_left;
590 u64 interrupts_seq;
590 u64 interrupts; 591 u64 interrupts;
591 592
592 u64 freq_time_stamp; 593 u64 freq_time_stamp;
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index e5bbcbaa6f57..4d99e4e6ef83 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -110,7 +110,19 @@ static inline void pm_qos_remove_request(struct pm_qos_request *req)
110 { return; } 110 { return; }
111 111
112static inline int pm_qos_request(int pm_qos_class) 112static inline int pm_qos_request(int pm_qos_class)
113 { return 0; } 113{
114 switch (pm_qos_class) {
115 case PM_QOS_CPU_DMA_LATENCY:
116 return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
117 case PM_QOS_NETWORK_LATENCY:
118 return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
119 case PM_QOS_NETWORK_THROUGHPUT:
120 return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
121 default:
122 return PM_QOS_DEFAULT_VALUE;
123 }
124}
125
114static inline int pm_qos_add_notifier(int pm_qos_class, 126static inline int pm_qos_add_notifier(int pm_qos_class,
115 struct notifier_block *notifier) 127 struct notifier_block *notifier)
116 { return 0; } 128 { return 0; }
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index ef35bb73f69b..26a8a4ed9b07 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -81,7 +81,11 @@ void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
81 * Limit the time part in order to ensure there are some bits left for the 81 * Limit the time part in order to ensure there are some bits left for the
82 * cycle counter and fraction multiply. 82 * cycle counter and fraction multiply.
83 */ 83 */
84#if BITS_PER_LONG == 32
84#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) 85#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
86#else
87#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
88#endif
85 89
86#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) 90#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
87#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) 91#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2234985a5e65..7d379a6bfd88 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm)
2259extern void mmput(struct mm_struct *); 2259extern void mmput(struct mm_struct *);
2260/* Grab a reference to a task's mm, if it is not already going away */ 2260/* Grab a reference to a task's mm, if it is not already going away */
2261extern struct mm_struct *get_task_mm(struct task_struct *task); 2261extern struct mm_struct *get_task_mm(struct task_struct *task);
2262/*
2263 * Grab a reference to a task's mm, if it is not already going away
2264 * and ptrace_may_access with the mode parameter passed to it
2265 * succeeds.
2266 */
2267extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2262/* Remove the current tasks stale references to the old mm_struct */ 2268/* Remove the current tasks stale references to the old mm_struct */
2263extern void mm_release(struct task_struct *, struct mm_struct *); 2269extern void mm_release(struct task_struct *, struct mm_struct *);
2264/* Allocate a new mm structure and copy contents from tsk->mm */ 2270/* Allocate a new mm structure and copy contents from tsk->mm */
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 8cd7fe59cf1a..425450b980b8 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -70,6 +70,7 @@ struct sh_dmae_pdata {
70 unsigned int needs_tend_set:1; 70 unsigned int needs_tend_set:1;
71 unsigned int no_dmars:1; 71 unsigned int no_dmars:1;
72 unsigned int chclr_present:1; 72 unsigned int chclr_present:1;
73 unsigned int slave_only:1;
73}; 74};
74 75
75/* DMA register */ 76/* DMA register */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 61b29057b054..3b6f628880f8 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -589,7 +589,7 @@ static inline int usb_endpoint_is_isoc_out(
589 */ 589 */
590static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) 590static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
591{ 591{
592 return le16_to_cpu(epd->wMaxPacketSize); 592 return __le16_to_cpu(epd->wMaxPacketSize);
593} 593}
594 594
595/*-------------------------------------------------------------------------*/ 595/*-------------------------------------------------------------------------*/
diff --git a/include/net/flow.h b/include/net/flow.h
index 9b582437fbea..6c469dbdb917 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -93,6 +93,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
93 fl4->fl4_dport = dport; 93 fl4->fl4_dport = dport;
94 fl4->fl4_sport = sport; 94 fl4->fl4_sport = sport;
95} 95}
96
97/* Reset some input parameters after previous lookup */
98static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
99 __be32 daddr, __be32 saddr)
100{
101 fl4->flowi4_oif = oif;
102 fl4->flowi4_tos = tos;
103 fl4->daddr = daddr;
104 fl4->saddr = saddr;
105}
96 106
97 107
98struct flowi6 { 108struct flowi6 {
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index 7b2d43139c8e..d58fdec47597 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -37,19 +37,51 @@ extern int net_prio_subsys_id;
37 37
38extern void sock_update_netprioidx(struct sock *sk); 38extern void sock_update_netprioidx(struct sock *sk);
39 39
40static inline struct cgroup_netprio_state 40#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
41 *task_netprio_state(struct task_struct *p) 41
42static inline u32 task_netprioidx(struct task_struct *p)
42{ 43{
43#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 44 struct cgroup_netprio_state *state;
44 return container_of(task_subsys_state(p, net_prio_subsys_id), 45 u32 idx;
45 struct cgroup_netprio_state, css); 46
46#else 47 rcu_read_lock();
47 return NULL; 48 state = container_of(task_subsys_state(p, net_prio_subsys_id),
48#endif 49 struct cgroup_netprio_state, css);
50 idx = state->prioidx;
51 rcu_read_unlock();
52 return idx;
53}
54
55#elif IS_MODULE(CONFIG_NETPRIO_CGROUP)
56
57static inline u32 task_netprioidx(struct task_struct *p)
58{
59 struct cgroup_netprio_state *state;
60 int subsys_id;
61 u32 idx = 0;
62
63 rcu_read_lock();
64 subsys_id = rcu_dereference_index_check(net_prio_subsys_id,
65 rcu_read_lock_held());
66 if (subsys_id >= 0) {
67 state = container_of(task_subsys_state(p, subsys_id),
68 struct cgroup_netprio_state, css);
69 idx = state->prioidx;
70 }
71 rcu_read_unlock();
72 return idx;
49} 73}
50 74
51#else 75#else
52 76
77static inline u32 task_netprioidx(struct task_struct *p)
78{
79 return 0;
80}
81
82#endif /* CONFIG_NETPRIO_CGROUP */
83
84#else
53#define sock_update_netprioidx(sk) 85#define sock_update_netprioidx(sk)
54#endif 86#endif
55 87
diff --git a/include/net/route.h b/include/net/route.h
index 91855d185b53..b1c0d5b564c2 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
270 if (IS_ERR(rt)) 270 if (IS_ERR(rt))
271 return rt; 271 return rt;
272 ip_rt_put(rt); 272 ip_rt_put(rt);
273 flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
273 } 274 }
274 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 275 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
275 return ip_route_output_flow(net, fl4, sk); 276 return ip_route_output_flow(net, fl4, sk);
@@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
284 fl4->fl4_dport = dport; 285 fl4->fl4_dport = dport;
285 fl4->fl4_sport = sport; 286 fl4->fl4_sport = sport;
286 ip_rt_put(rt); 287 ip_rt_put(rt);
288 flowi4_update_output(fl4, sk->sk_bound_dev_if,
289 RT_CONN_FLAGS(sk), fl4->daddr,
290 fl4->saddr);
287 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 291 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
288 return ip_route_output_flow(sock_net(sk), fl4, sk); 292 return ip_route_output_flow(sock_net(sk), fl4, sk);
289 } 293 }
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f6bb08b73ca4..55ce96b53b09 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,9 +220,16 @@ struct tcf_proto {
220 220
221struct qdisc_skb_cb { 221struct qdisc_skb_cb {
222 unsigned int pkt_len; 222 unsigned int pkt_len;
223 long data[]; 223 unsigned char data[24];
224}; 224};
225 225
226static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
227{
228 struct qdisc_skb_cb *qcb;
229 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
230 BUILD_BUG_ON(sizeof(qcb->data) < sz);
231}
232
226static inline int qdisc_qlen(const struct Qdisc *q) 233static inline int qdisc_qlen(const struct Qdisc *q)
227{ 234{
228 return q->q.qlen; 235 return q->q.qlen;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d49db0113a06..42c29bfbcee3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -273,6 +273,14 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
273 return seq3 - seq2 >= seq1 - seq2; 273 return seq3 - seq2 >= seq1 - seq2;
274} 274}
275 275
276static inline bool tcp_out_of_memory(struct sock *sk)
277{
278 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
279 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
280 return true;
281 return false;
282}
283
276static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 284static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
277{ 285{
278 struct percpu_counter *ocp = sk->sk_prot->orphan_count; 286 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -283,13 +291,11 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
283 if (orphans << shift > sysctl_tcp_max_orphans) 291 if (orphans << shift > sysctl_tcp_max_orphans)
284 return true; 292 return true;
285 } 293 }
286
287 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
288 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
289 return true;
290 return false; 294 return false;
291} 295}
292 296
297extern bool tcp_check_oom(struct sock *sk, int shift);
298
293/* syncookies: remember time of last synqueue overflow */ 299/* syncookies: remember time of last synqueue overflow */
294static inline void tcp_synq_overflow(struct sock *sk) 300static inline void tcp_synq_overflow(struct sock *sk)
295{ 301{
diff --git a/include/sound/core.h b/include/sound/core.h
index 5ab255f196cc..cea1b5426dfa 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -417,6 +417,7 @@ static inline int __snd_bug_on(int cond)
417#define gameport_get_port_data(gp) (gp)->port_data 417#define gameport_get_port_data(gp) (gp)->port_data
418#endif 418#endif
419 419
420#ifdef CONFIG_PCI
420/* PCI quirk list helper */ 421/* PCI quirk list helper */
421struct snd_pci_quirk { 422struct snd_pci_quirk {
422 unsigned short subvendor; /* PCI subvendor ID */ 423 unsigned short subvendor; /* PCI subvendor ID */
@@ -456,5 +457,6 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list);
456const struct snd_pci_quirk * 457const struct snd_pci_quirk *
457snd_pci_quirk_lookup_id(u16 vendor, u16 device, 458snd_pci_quirk_lookup_id(u16 vendor, u16 device,
458 const struct snd_pci_quirk *list); 459 const struct snd_pci_quirk *list);
460#endif
459 461
460#endif /* __SOUND_CORE_H */ 462#endif /* __SOUND_CORE_H */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 4866499bdeeb..e5e6ff98f0fa 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -59,7 +59,7 @@ int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
59int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); 59int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
60 60
61/* core helpers also used by command snooping in pscsi */ 61/* core helpers also used by command snooping in pscsi */
62void *transport_kmap_first_data_page(struct se_cmd *); 62void *transport_kmap_data_sg(struct se_cmd *);
63void transport_kunmap_first_data_page(struct se_cmd *); 63void transport_kunmap_data_sg(struct se_cmd *);
64 64
65#endif /* TARGET_CORE_BACKEND_H */ 65#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index daf532bc721a..dc4e345a0163 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -582,6 +582,7 @@ struct se_cmd {
582 582
583 struct scatterlist *t_data_sg; 583 struct scatterlist *t_data_sg;
584 unsigned int t_data_nents; 584 unsigned int t_data_nents;
585 void *t_data_vmap;
585 struct scatterlist *t_bidi_data_sg; 586 struct scatterlist *t_bidi_data_sg;
586 unsigned int t_bidi_data_nents; 587 unsigned int t_bidi_data_nents;
587 588
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 523e8bc104d4..d36fad317e78 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -114,7 +114,7 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
114 struct se_session *, u32, int, int, unsigned char *); 114 struct se_session *, u32, int, int, unsigned char *);
115int transport_lookup_cmd_lun(struct se_cmd *, u32); 115int transport_lookup_cmd_lun(struct se_cmd *, u32);
116int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); 116int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
117int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 117void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
118 unsigned char *, u32, u32, int, int, int); 118 unsigned char *, u32, u32, int, int, int);
119int transport_handle_cdb_direct(struct se_cmd *); 119int transport_handle_cdb_direct(struct se_cmd *);
120int transport_generic_handle_cdb_map(struct se_cmd *); 120int transport_generic_handle_cdb_map(struct se_cmd *);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 8588a8918023..5973410e8f8c 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -47,7 +47,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
47 __field(int, reason) 47 __field(int, reason)
48 ), 48 ),
49 TP_fast_assign( 49 TP_fast_assign(
50 strncpy(__entry->name, dev_name(bdi->dev), 32); 50 struct device *dev = bdi->dev;
51 if (!dev)
52 dev = default_backing_dev_info.dev;
53 strncpy(__entry->name, dev_name(dev), 32);
51 __entry->nr_pages = work->nr_pages; 54 __entry->nr_pages = work->nr_pages;
52 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 55 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
53 __entry->sync_mode = work->sync_mode; 56 __entry->sync_mode = work->sync_mode;
@@ -426,7 +429,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
426 429
427 TP_fast_assign( 430 TP_fast_assign(
428 strncpy(__entry->name, 431 strncpy(__entry->name,
429 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 432 dev_name(inode_to_bdi(inode)->dev), 32);
430 __entry->ino = inode->i_ino; 433 __entry->ino = inode->i_ino;
431 __entry->state = inode->i_state; 434 __entry->state = inode->i_state;
432 __entry->dirtied_when = inode->dirtied_when; 435 __entry->dirtied_when = inode->dirtied_when;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 062b3b24ff10..483f67caa7ad 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -590,6 +590,11 @@ struct omap_dss_device {
590 int (*get_backlight)(struct omap_dss_device *dssdev); 590 int (*get_backlight)(struct omap_dss_device *dssdev);
591}; 591};
592 592
593struct omap_dss_hdmi_data
594{
595 int hpd_gpio;
596};
597
593struct omap_dss_driver { 598struct omap_dss_driver {
594 struct device_driver driver; 599 struct device_driver driver;
595 600
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 32b48c889711..1b5c081d8b9f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2300,7 +2300,10 @@ do { \
2300 return div64_u64(dividend, divisor); 2300 return div64_u64(dividend, divisor);
2301} 2301}
2302 2302
2303static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2303static DEFINE_PER_CPU(int, perf_throttled_count);
2304static DEFINE_PER_CPU(u64, perf_throttled_seq);
2305
2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2304{ 2307{
2305 struct hw_perf_event *hwc = &event->hw; 2308 struct hw_perf_event *hwc = &event->hw;
2306 s64 period, sample_period; 2309 s64 period, sample_period;
@@ -2319,22 +2322,40 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2319 hwc->sample_period = sample_period; 2322 hwc->sample_period = sample_period;
2320 2323
2321 if (local64_read(&hwc->period_left) > 8*sample_period) { 2324 if (local64_read(&hwc->period_left) > 8*sample_period) {
2322 event->pmu->stop(event, PERF_EF_UPDATE); 2325 if (disable)
2326 event->pmu->stop(event, PERF_EF_UPDATE);
2327
2323 local64_set(&hwc->period_left, 0); 2328 local64_set(&hwc->period_left, 0);
2324 event->pmu->start(event, PERF_EF_RELOAD); 2329
2330 if (disable)
2331 event->pmu->start(event, PERF_EF_RELOAD);
2325 } 2332 }
2326} 2333}
2327 2334
2328static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) 2335/*
2336 * combine freq adjustment with unthrottling to avoid two passes over the
2337 * events. At the same time, make sure, having freq events does not change
2338 * the rate of unthrottling as that would introduce bias.
2339 */
2340static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2341 int needs_unthr)
2329{ 2342{
2330 struct perf_event *event; 2343 struct perf_event *event;
2331 struct hw_perf_event *hwc; 2344 struct hw_perf_event *hwc;
2332 u64 interrupts, now; 2345 u64 now, period = TICK_NSEC;
2333 s64 delta; 2346 s64 delta;
2334 2347
2335 if (!ctx->nr_freq) 2348 /*
2349 * only need to iterate over all events iff:
2350 * - context have events in frequency mode (needs freq adjust)
2351 * - there are events to unthrottle on this cpu
2352 */
2353 if (!(ctx->nr_freq || needs_unthr))
2336 return; 2354 return;
2337 2355
2356 raw_spin_lock(&ctx->lock);
2357 perf_pmu_disable(ctx->pmu);
2358
2338 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2359 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2339 if (event->state != PERF_EVENT_STATE_ACTIVE) 2360 if (event->state != PERF_EVENT_STATE_ACTIVE)
2340 continue; 2361 continue;
@@ -2344,13 +2365,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2344 2365
2345 hwc = &event->hw; 2366 hwc = &event->hw;
2346 2367
2347 interrupts = hwc->interrupts; 2368 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2348 hwc->interrupts = 0; 2369 hwc->interrupts = 0;
2349
2350 /*
2351 * unthrottle events on the tick
2352 */
2353 if (interrupts == MAX_INTERRUPTS) {
2354 perf_log_throttle(event, 1); 2370 perf_log_throttle(event, 1);
2355 event->pmu->start(event, 0); 2371 event->pmu->start(event, 0);
2356 } 2372 }
@@ -2358,14 +2374,30 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2358 if (!event->attr.freq || !event->attr.sample_freq) 2374 if (!event->attr.freq || !event->attr.sample_freq)
2359 continue; 2375 continue;
2360 2376
2361 event->pmu->read(event); 2377 /*
2378 * stop the event and update event->count
2379 */
2380 event->pmu->stop(event, PERF_EF_UPDATE);
2381
2362 now = local64_read(&event->count); 2382 now = local64_read(&event->count);
2363 delta = now - hwc->freq_count_stamp; 2383 delta = now - hwc->freq_count_stamp;
2364 hwc->freq_count_stamp = now; 2384 hwc->freq_count_stamp = now;
2365 2385
2386 /*
2387 * restart the event
2388 * reload only if value has changed
2389 * we have stopped the event so tell that
2390 * to perf_adjust_period() to avoid stopping it
2391 * twice.
2392 */
2366 if (delta > 0) 2393 if (delta > 0)
2367 perf_adjust_period(event, period, delta); 2394 perf_adjust_period(event, period, delta, false);
2395
2396 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2368 } 2397 }
2398
2399 perf_pmu_enable(ctx->pmu);
2400 raw_spin_unlock(&ctx->lock);
2369} 2401}
2370 2402
2371/* 2403/*
@@ -2388,16 +2420,13 @@ static void rotate_ctx(struct perf_event_context *ctx)
2388 */ 2420 */
2389static void perf_rotate_context(struct perf_cpu_context *cpuctx) 2421static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2390{ 2422{
2391 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
2392 struct perf_event_context *ctx = NULL; 2423 struct perf_event_context *ctx = NULL;
2393 int rotate = 0, remove = 1, freq = 0; 2424 int rotate = 0, remove = 1;
2394 2425
2395 if (cpuctx->ctx.nr_events) { 2426 if (cpuctx->ctx.nr_events) {
2396 remove = 0; 2427 remove = 0;
2397 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2428 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2398 rotate = 1; 2429 rotate = 1;
2399 if (cpuctx->ctx.nr_freq)
2400 freq = 1;
2401 } 2430 }
2402 2431
2403 ctx = cpuctx->task_ctx; 2432 ctx = cpuctx->task_ctx;
@@ -2405,37 +2434,26 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2405 remove = 0; 2434 remove = 0;
2406 if (ctx->nr_events != ctx->nr_active) 2435 if (ctx->nr_events != ctx->nr_active)
2407 rotate = 1; 2436 rotate = 1;
2408 if (ctx->nr_freq)
2409 freq = 1;
2410 } 2437 }
2411 2438
2412 if (!rotate && !freq) 2439 if (!rotate)
2413 goto done; 2440 goto done;
2414 2441
2415 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2442 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2416 perf_pmu_disable(cpuctx->ctx.pmu); 2443 perf_pmu_disable(cpuctx->ctx.pmu);
2417 2444
2418 if (freq) { 2445 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2419 perf_ctx_adjust_freq(&cpuctx->ctx, interval); 2446 if (ctx)
2420 if (ctx) 2447 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2421 perf_ctx_adjust_freq(ctx, interval);
2422 }
2423
2424 if (rotate) {
2425 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2426 if (ctx)
2427 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2428 2448
2429 rotate_ctx(&cpuctx->ctx); 2449 rotate_ctx(&cpuctx->ctx);
2430 if (ctx) 2450 if (ctx)
2431 rotate_ctx(ctx); 2451 rotate_ctx(ctx);
2432 2452
2433 perf_event_sched_in(cpuctx, ctx, current); 2453 perf_event_sched_in(cpuctx, ctx, current);
2434 }
2435 2454
2436 perf_pmu_enable(cpuctx->ctx.pmu); 2455 perf_pmu_enable(cpuctx->ctx.pmu);
2437 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2456 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2438
2439done: 2457done:
2440 if (remove) 2458 if (remove)
2441 list_del_init(&cpuctx->rotation_list); 2459 list_del_init(&cpuctx->rotation_list);
@@ -2445,10 +2463,22 @@ void perf_event_task_tick(void)
2445{ 2463{
2446 struct list_head *head = &__get_cpu_var(rotation_list); 2464 struct list_head *head = &__get_cpu_var(rotation_list);
2447 struct perf_cpu_context *cpuctx, *tmp; 2465 struct perf_cpu_context *cpuctx, *tmp;
2466 struct perf_event_context *ctx;
2467 int throttled;
2448 2468
2449 WARN_ON(!irqs_disabled()); 2469 WARN_ON(!irqs_disabled());
2450 2470
2471 __this_cpu_inc(perf_throttled_seq);
2472 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2473
2451 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2474 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2475 ctx = &cpuctx->ctx;
2476 perf_adjust_freq_unthr_context(ctx, throttled);
2477
2478 ctx = cpuctx->task_ctx;
2479 if (ctx)
2480 perf_adjust_freq_unthr_context(ctx, throttled);
2481
2452 if (cpuctx->jiffies_interval == 1 || 2482 if (cpuctx->jiffies_interval == 1 ||
2453 !(jiffies % cpuctx->jiffies_interval)) 2483 !(jiffies % cpuctx->jiffies_interval))
2454 perf_rotate_context(cpuctx); 2484 perf_rotate_context(cpuctx);
@@ -4509,6 +4539,7 @@ static int __perf_event_overflow(struct perf_event *event,
4509{ 4539{
4510 int events = atomic_read(&event->event_limit); 4540 int events = atomic_read(&event->event_limit);
4511 struct hw_perf_event *hwc = &event->hw; 4541 struct hw_perf_event *hwc = &event->hw;
4542 u64 seq;
4512 int ret = 0; 4543 int ret = 0;
4513 4544
4514 /* 4545 /*
@@ -4518,14 +4549,20 @@ static int __perf_event_overflow(struct perf_event *event,
4518 if (unlikely(!is_sampling_event(event))) 4549 if (unlikely(!is_sampling_event(event)))
4519 return 0; 4550 return 0;
4520 4551
4521 if (unlikely(hwc->interrupts >= max_samples_per_tick)) { 4552 seq = __this_cpu_read(perf_throttled_seq);
4522 if (throttle) { 4553 if (seq != hwc->interrupts_seq) {
4554 hwc->interrupts_seq = seq;
4555 hwc->interrupts = 1;
4556 } else {
4557 hwc->interrupts++;
4558 if (unlikely(throttle
4559 && hwc->interrupts >= max_samples_per_tick)) {
4560 __this_cpu_inc(perf_throttled_count);
4523 hwc->interrupts = MAX_INTERRUPTS; 4561 hwc->interrupts = MAX_INTERRUPTS;
4524 perf_log_throttle(event, 0); 4562 perf_log_throttle(event, 0);
4525 ret = 1; 4563 ret = 1;
4526 } 4564 }
4527 } else 4565 }
4528 hwc->interrupts++;
4529 4566
4530 if (event->attr.freq) { 4567 if (event->attr.freq) {
4531 u64 now = perf_clock(); 4568 u64 now = perf_clock();
@@ -4534,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
4534 hwc->freq_time_stamp = now; 4571 hwc->freq_time_stamp = now;
4535 4572
4536 if (delta > 0 && delta < 2*TICK_NSEC) 4573 if (delta > 0 && delta < 2*TICK_NSEC)
4537 perf_adjust_period(event, delta, hwc->last_period); 4574 perf_adjust_period(event, delta, hwc->last_period, true);
4538 } 4575 }
4539 4576
4540 /* 4577 /*
diff --git a/kernel/exit.c b/kernel/exit.c
index 294b1709170d..4b4042f9bc6a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1038,6 +1038,22 @@ void do_exit(long code)
1038 if (tsk->nr_dirtied) 1038 if (tsk->nr_dirtied)
1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
1040 exit_rcu(); 1040 exit_rcu();
1041
1042 /*
1043 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
1044 * when the following two conditions become true.
1045 * - There is race condition of mmap_sem (It is acquired by
1046 * exit_mm()), and
1047 * - SMI occurs before setting TASK_RUNINNG.
1048 * (or hypervisor of virtual machine switches to other guest)
1049 * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
1050 *
1051 * To avoid it, we have to wait for releasing tsk->pi_lock which
1052 * is held by try_to_wake_up()
1053 */
1054 smp_mb();
1055 raw_spin_unlock_wait(&tsk->pi_lock);
1056
1041 /* causes final put_task_struct in finish_task_switch(). */ 1057 /* causes final put_task_struct in finish_task_switch(). */
1042 tsk->state = TASK_DEAD; 1058 tsk->state = TASK_DEAD;
1043 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ 1059 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
diff --git a/kernel/fork.c b/kernel/fork.c
index 051f090d40c1..b77fd559c78e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
647} 647}
648EXPORT_SYMBOL_GPL(get_task_mm); 648EXPORT_SYMBOL_GPL(get_task_mm);
649 649
650struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
651{
652 struct mm_struct *mm;
653 int err;
654
655 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
656 if (err)
657 return ERR_PTR(err);
658
659 mm = get_task_mm(task);
660 if (mm && mm != current->mm &&
661 !ptrace_may_access(task, mode)) {
662 mmput(mm);
663 mm = ERR_PTR(-EACCES);
664 }
665 mutex_unlock(&task->signal->cred_guard_mutex);
666
667 return mm;
668}
669
650/* Please note the differences between mmput and mm_release. 670/* Please note the differences between mmput and mm_release.
651 * mmput is called whenever we stop holding onto a mm_struct, 671 * mmput is called whenever we stop holding onto a mm_struct,
652 * error success whatever. 672 * error success whatever.
@@ -890,7 +910,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
890 return -ENOMEM; 910 return -ENOMEM;
891 911
892 new_ioc->ioprio = ioc->ioprio; 912 new_ioc->ioprio = ioc->ioprio;
893 put_io_context(new_ioc, NULL); 913 put_io_context(new_ioc);
894 } 914 }
895#endif 915#endif
896 return 0; 916 return 0;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 29f5b65bee29..9788c0ec6f43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1673 ri->rp = rp; 1673 ri->rp = rp;
1674 ri->task = current; 1674 ri->task = current;
1675 1675
1676 if (rp->entry_handler && rp->entry_handler(ri, regs)) 1676 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1677 raw_spin_lock_irqsave(&rp->lock, flags);
1678 hlist_add_head(&ri->hlist, &rp->free_instances);
1679 raw_spin_unlock_irqrestore(&rp->lock, flags);
1677 return 0; 1680 return 0;
1681 }
1678 1682
1679 arch_prepare_kretprobe(ri, regs); 1683 arch_prepare_kretprobe(ri, regs);
1680 1684
diff --git a/kernel/params.c b/kernel/params.c
index 32ee04308285..4bc965d8a1fe 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -97,7 +97,8 @@ static int parse_one(char *param,
97 for (i = 0; i < num_params; i++) { 97 for (i = 0; i < num_params; i++) {
98 if (parameq(param, params[i].name)) { 98 if (parameq(param, params[i].name)) {
99 /* No one handled NULL, so do it here. */ 99 /* No one handled NULL, so do it here. */
100 if (!val && params[i].ops->set != param_set_bool) 100 if (!val && params[i].ops->set != param_set_bool
101 && params[i].ops->set != param_set_bint)
101 return -EINVAL; 102 return -EINVAL;
102 pr_debug("They are equal! Calling %p\n", 103 pr_debug("They are equal! Calling %p\n",
103 params[i].ops->set); 104 params[i].ops->set);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 0c4defe6d3b8..21724eee5206 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -231,8 +231,28 @@ extern int pm_test_level;
231#ifdef CONFIG_SUSPEND_FREEZER 231#ifdef CONFIG_SUSPEND_FREEZER
232static inline int suspend_freeze_processes(void) 232static inline int suspend_freeze_processes(void)
233{ 233{
234 int error = freeze_processes(); 234 int error;
235 return error ? : freeze_kernel_threads(); 235
236 error = freeze_processes();
237
238 /*
239 * freeze_processes() automatically thaws every task if freezing
240 * fails. So we need not do anything extra upon error.
241 */
242 if (error)
243 goto Finish;
244
245 error = freeze_kernel_threads();
246
247 /*
248 * freeze_kernel_threads() thaws only kernel threads upon freezing
249 * failure. So we have to thaw the userspace tasks ourselves.
250 */
251 if (error)
252 thaw_processes();
253
254 Finish:
255 return error;
236} 256}
237 257
238static inline void suspend_thaw_processes(void) 258static inline void suspend_thaw_processes(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index eeca00311f39..7e426459e60a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -143,7 +143,10 @@ int freeze_processes(void)
143/** 143/**
144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
145 * 145 *
146 * On success, returns 0. On failure, -errno and system is fully thawed. 146 * On success, returns 0. On failure, -errno and only the kernel threads are
147 * thawed, so as to give a chance to the caller to do additional cleanups
148 * (if any) before thawing the userspace tasks. So, it is the responsibility
149 * of the caller to thaw the userspace tasks, when the time is right.
147 */ 150 */
148int freeze_kernel_threads(void) 151int freeze_kernel_threads(void)
149{ 152{
@@ -159,7 +162,7 @@ int freeze_kernel_threads(void)
159 BUG_ON(in_atomic()); 162 BUG_ON(in_atomic());
160 163
161 if (error) 164 if (error)
162 thaw_processes(); 165 thaw_kernel_threads();
163 return error; 166 return error;
164} 167}
165 168
diff --git a/kernel/power/user.c b/kernel/power/user.c
index e5a21a857302..3e100075b13c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
249 } 249 }
250 pm_restore_gfp_mask(); 250 pm_restore_gfp_mask();
251 error = hibernation_snapshot(data->platform_support); 251 error = hibernation_snapshot(data->platform_support);
252 if (!error) { 252 if (error) {
253 thaw_kernel_threads();
254 } else {
253 error = put_user(in_suspend, (int __user *)arg); 255 error = put_user(in_suspend, (int __user *)arg);
254 if (!error && !freezer_test_done) 256 if (!error && !freezer_test_done)
255 data->ready = 1; 257 data->ready = 1;
256 if (freezer_test_done) { 258 if (freezer_test_done) {
257 freezer_test_done = false; 259 freezer_test_done = false;
258 thaw_processes(); 260 thaw_kernel_threads();
259 } 261 }
260 } 262 }
261 break; 263 break;
diff --git a/kernel/relay.c b/kernel/relay.c
index 4335e1d7ee2d..ab56a1764d4d 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -164,10 +164,14 @@ depopulate:
164 */ 164 */
165static struct rchan_buf *relay_create_buf(struct rchan *chan) 165static struct rchan_buf *relay_create_buf(struct rchan *chan)
166{ 166{
167 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); 167 struct rchan_buf *buf;
168 if (!buf) 168
169 if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
169 return NULL; 170 return NULL;
170 171
172 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
173 if (!buf)
174 return NULL;
171 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); 175 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
172 if (!buf->padding) 176 if (!buf->padding)
173 goto free_buf; 177 goto free_buf;
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
574 578
575 if (!(subbuf_size && n_subbufs)) 579 if (!(subbuf_size && n_subbufs))
576 return NULL; 580 return NULL;
581 if (subbuf_size > UINT_MAX / n_subbufs)
582 return NULL;
577 583
578 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); 584 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
579 if (!chan) 585 if (!chan)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df00cb09263e..5255c9d2e053 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
74 74
75#include <asm/tlb.h> 75#include <asm/tlb.h>
76#include <asm/irq_regs.h> 76#include <asm/irq_regs.h>
77#include <asm/mutex.h>
77#ifdef CONFIG_PARAVIRT 78#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h> 79#include <asm/paravirt.h>
79#endif 80#endif
@@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
723 p->sched_class->dequeue_task(rq, p, flags); 724 p->sched_class->dequeue_task(rq, p, flags);
724} 725}
725 726
726/*
727 * activate_task - move a task to the runqueue.
728 */
729void activate_task(struct rq *rq, struct task_struct *p, int flags) 727void activate_task(struct rq *rq, struct task_struct *p, int flags)
730{ 728{
731 if (task_contributes_to_load(p)) 729 if (task_contributes_to_load(p))
@@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
734 enqueue_task(rq, p, flags); 732 enqueue_task(rq, p, flags);
735} 733}
736 734
737/*
738 * deactivate_task - remove a task from the runqueue.
739 */
740void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 735void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
741{ 736{
742 if (task_contributes_to_load(p)) 737 if (task_contributes_to_load(p))
@@ -4134,7 +4129,7 @@ recheck:
4134 on_rq = p->on_rq; 4129 on_rq = p->on_rq;
4135 running = task_current(rq, p); 4130 running = task_current(rq, p);
4136 if (on_rq) 4131 if (on_rq)
4137 deactivate_task(rq, p, 0); 4132 dequeue_task(rq, p, 0);
4138 if (running) 4133 if (running)
4139 p->sched_class->put_prev_task(rq, p); 4134 p->sched_class->put_prev_task(rq, p);
4140 4135
@@ -4147,7 +4142,7 @@ recheck:
4147 if (running) 4142 if (running)
4148 p->sched_class->set_curr_task(rq); 4143 p->sched_class->set_curr_task(rq);
4149 if (on_rq) 4144 if (on_rq)
4150 activate_task(rq, p, 0); 4145 enqueue_task(rq, p, 0);
4151 4146
4152 check_class_changed(rq, p, prev_class, oldprio); 4147 check_class_changed(rq, p, prev_class, oldprio);
4153 task_rq_unlock(rq, p, &flags); 4148 task_rq_unlock(rq, p, &flags);
@@ -4998,9 +4993,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4998 * placed properly. 4993 * placed properly.
4999 */ 4994 */
5000 if (p->on_rq) { 4995 if (p->on_rq) {
5001 deactivate_task(rq_src, p, 0); 4996 dequeue_task(rq_src, p, 0);
5002 set_task_cpu(p, dest_cpu); 4997 set_task_cpu(p, dest_cpu);
5003 activate_task(rq_dest, p, 0); 4998 enqueue_task(rq_dest, p, 0);
5004 check_preempt_curr(rq_dest, p, 0); 4999 check_preempt_curr(rq_dest, p, 0);
5005 } 5000 }
5006done: 5001done:
@@ -7032,10 +7027,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7032 7027
7033 on_rq = p->on_rq; 7028 on_rq = p->on_rq;
7034 if (on_rq) 7029 if (on_rq)
7035 deactivate_task(rq, p, 0); 7030 dequeue_task(rq, p, 0);
7036 __setscheduler(rq, p, SCHED_NORMAL, 0); 7031 __setscheduler(rq, p, SCHED_NORMAL, 0);
7037 if (on_rq) { 7032 if (on_rq) {
7038 activate_task(rq, p, 0); 7033 enqueue_task(rq, p, 0);
7039 resched_task(rq->curr); 7034 resched_task(rq->curr);
7040 } 7035 }
7041 7036
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 84adb2d66cbd..7c6414fc669d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu)
4866 return; 4866 return;
4867} 4867}
4868 4868
4869static inline void clear_nohz_tick_stopped(int cpu)
4870{
4871 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
4872 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4873 atomic_dec(&nohz.nr_cpus);
4874 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4875 }
4876}
4877
4869static inline void set_cpu_sd_state_busy(void) 4878static inline void set_cpu_sd_state_busy(void)
4870{ 4879{
4871 struct sched_domain *sd; 4880 struct sched_domain *sd;
@@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick)
4904{ 4913{
4905 int cpu = smp_processor_id(); 4914 int cpu = smp_processor_id();
4906 4915
4916 /*
4917 * If this cpu is going down, then nothing needs to be done.
4918 */
4919 if (!cpu_active(cpu))
4920 return;
4921
4907 if (stop_tick) { 4922 if (stop_tick) {
4908 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 4923 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
4909 return; 4924 return;
@@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick)
4914 } 4929 }
4915 return; 4930 return;
4916} 4931}
4932
4933static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
4934 unsigned long action, void *hcpu)
4935{
4936 switch (action & ~CPU_TASKS_FROZEN) {
4937 case CPU_DYING:
4938 clear_nohz_tick_stopped(smp_processor_id());
4939 return NOTIFY_OK;
4940 default:
4941 return NOTIFY_DONE;
4942 }
4943}
4917#endif 4944#endif
4918 4945
4919static DEFINE_SPINLOCK(balancing); 4946static DEFINE_SPINLOCK(balancing);
@@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5070 * busy tick after returning from idle, we will update the busy stats. 5097 * busy tick after returning from idle, we will update the busy stats.
5071 */ 5098 */
5072 set_cpu_sd_state_busy(); 5099 set_cpu_sd_state_busy();
5073 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 5100 clear_nohz_tick_stopped(cpu);
5074 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5075 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5076 atomic_dec(&nohz.nr_cpus);
5077 }
5078 5101
5079 /* 5102 /*
5080 * None are in tickless mode and hence no need for NOHZ idle load 5103 * None are in tickless mode and hence no need for NOHZ idle load
@@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void)
5590 5613
5591#ifdef CONFIG_NO_HZ 5614#ifdef CONFIG_NO_HZ
5592 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 5615 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5616 cpu_notifier(sched_ilb_notifier, 0);
5593#endif 5617#endif
5594#endif /* SMP */ 5618#endif /* SMP */
5595 5619
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3640ebbb466b..f42ae7fb5ec5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq)
1587 if (!next_task) 1587 if (!next_task)
1588 return 0; 1588 return 0;
1589 1589
1590#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1591 if (unlikely(task_running(rq, next_task)))
1592 return 0;
1593#endif
1594
1590retry: 1595retry:
1591 if (unlikely(next_task == rq->curr)) { 1596 if (unlikely(next_task == rq->curr)) {
1592 WARN_ON(1); 1597 WARN_ON(1);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1d7bca7f4f52..d117262deba3 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
296 if (__this_cpu_read(soft_watchdog_warn) == true) 296 if (__this_cpu_read(soft_watchdog_warn) == true)
297 return HRTIMER_RESTART; 297 return HRTIMER_RESTART;
298 298
299 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 299 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
300 smp_processor_id(), duration, 300 smp_processor_id(), duration,
301 current->comm, task_pid_nr(current)); 301 current->comm, task_pid_nr(current));
302 print_modules(); 302 print_modules();
diff --git a/lib/Kconfig b/lib/Kconfig
index 169eb7c598e5..028aba9e72af 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,6 +19,9 @@ config RATIONAL
19config GENERIC_FIND_FIRST_BIT 19config GENERIC_FIND_FIRST_BIT
20 bool 20 bool
21 21
22config NO_GENERIC_PCI_IOPORT_MAP
23 bool
24
22config GENERIC_PCI_IOMAP 25config GENERIC_PCI_IOMAP
23 bool 26 bool
24 27
@@ -279,6 +282,9 @@ config AVERAGE
279 282
280 If unsure, say N. 283 If unsure, say N.
281 284
285config CLZ_TAB
286 bool
287
282config CORDIC 288config CORDIC
283 tristate "CORDIC algorithm" 289 tristate "CORDIC algorithm"
284 help 290 help
@@ -287,6 +293,7 @@ config CORDIC
287 293
288config MPILIB 294config MPILIB
289 tristate 295 tristate
296 select CLZ_TAB
290 help 297 help
291 Multiprecision maths library from GnuPG. 298 Multiprecision maths library from GnuPG.
292 It is used to implement RSA digital signature verification, 299 It is used to implement RSA digital signature verification,
diff --git a/lib/Makefile b/lib/Makefile
index d71aae1b01b3..18515f0267c4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -121,6 +121,8 @@ obj-$(CONFIG_DQL) += dynamic_queue_limits.o
121obj-$(CONFIG_MPILIB) += mpi/ 121obj-$(CONFIG_MPILIB) += mpi/
122obj-$(CONFIG_SIGNATURE) += digsig.o 122obj-$(CONFIG_SIGNATURE) += digsig.o
123 123
124obj-$(CONFIG_CLZ_TAB) += clz_tab.o
125
124hostprogs-y := gen_crc32table 126hostprogs-y := gen_crc32table
125clean-files := crc32table.h 127clean-files := crc32table.h
126 128
diff --git a/lib/bug.c b/lib/bug.c
index 19552096d16b..a28c1415357c 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -169,7 +169,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
169 return BUG_TRAP_TYPE_WARN; 169 return BUG_TRAP_TYPE_WARN;
170 } 170 }
171 171
172 printk(KERN_EMERG "------------[ cut here ]------------\n"); 172 printk(KERN_DEFAULT "------------[ cut here ]------------\n");
173 173
174 if (file) 174 if (file)
175 printk(KERN_CRIT "kernel BUG at %s:%u!\n", 175 printk(KERN_CRIT "kernel BUG at %s:%u!\n",
diff --git a/lib/clz_tab.c b/lib/clz_tab.c
new file mode 100644
index 000000000000..7287b4a991a7
--- /dev/null
+++ b/lib/clz_tab.c
@@ -0,0 +1,18 @@
1const unsigned char __clz_tab[] = {
2 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
3 5, 5, 5, 5, 5, 5, 5, 5,
4 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
5 6, 6, 6, 6, 6, 6, 6, 6,
6 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7 7, 7, 7, 7, 7, 7, 7, 7,
8 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
9 7, 7, 7, 7, 7, 7, 7, 7,
10 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
11 8, 8, 8, 8, 8, 8, 8, 8,
12 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
13 8, 8, 8, 8, 8, 8, 8, 8,
14 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
15 8, 8, 8, 8, 8, 8, 8, 8,
16 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
17 8, 8, 8, 8, 8, 8, 8, 8,
18};
diff --git a/lib/digsig.c b/lib/digsig.c
index fd2402f67f89..286d558033e2 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -34,14 +34,9 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
34 unsigned long msglen, 34 unsigned long msglen,
35 unsigned long modulus_bitlen, 35 unsigned long modulus_bitlen,
36 unsigned char *out, 36 unsigned char *out,
37 unsigned long *outlen, 37 unsigned long *outlen)
38 int *is_valid)
39{ 38{
40 unsigned long modulus_len, ps_len, i; 39 unsigned long modulus_len, ps_len, i;
41 int result;
42
43 /* default to invalid packet */
44 *is_valid = 0;
45 40
46 modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); 41 modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0);
47 42
@@ -50,39 +45,30 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
50 return -EINVAL; 45 return -EINVAL;
51 46
52 /* separate encoded message */ 47 /* separate encoded message */
53 if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) { 48 if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1))
54 result = -EINVAL; 49 return -EINVAL;
55 goto bail;
56 }
57 50
58 for (i = 2; i < modulus_len - 1; i++) 51 for (i = 2; i < modulus_len - 1; i++)
59 if (msg[i] != 0xFF) 52 if (msg[i] != 0xFF)
60 break; 53 break;
61 54
62 /* separator check */ 55 /* separator check */
63 if (msg[i] != 0) { 56 if (msg[i] != 0)
64 /* There was no octet with hexadecimal value 0x00 57 /* There was no octet with hexadecimal value 0x00
65 to separate ps from m. */ 58 to separate ps from m. */
66 result = -EINVAL; 59 return -EINVAL;
67 goto bail;
68 }
69 60
70 ps_len = i - 2; 61 ps_len = i - 2;
71 62
72 if (*outlen < (msglen - (2 + ps_len + 1))) { 63 if (*outlen < (msglen - (2 + ps_len + 1))) {
73 *outlen = msglen - (2 + ps_len + 1); 64 *outlen = msglen - (2 + ps_len + 1);
74 result = -EOVERFLOW; 65 return -EOVERFLOW;
75 goto bail;
76 } 66 }
77 67
78 *outlen = (msglen - (2 + ps_len + 1)); 68 *outlen = (msglen - (2 + ps_len + 1));
79 memcpy(out, &msg[2 + ps_len + 1], *outlen); 69 memcpy(out, &msg[2 + ps_len + 1], *outlen);
80 70
81 /* valid packet */ 71 return 0;
82 *is_valid = 1;
83 result = 0;
84bail:
85 return result;
86} 72}
87 73
88/* 74/*
@@ -96,7 +82,7 @@ static int digsig_verify_rsa(struct key *key,
96 unsigned long len; 82 unsigned long len;
97 unsigned long mlen, mblen; 83 unsigned long mlen, mblen;
98 unsigned nret, l; 84 unsigned nret, l;
99 int valid, head, i; 85 int head, i;
100 unsigned char *out1 = NULL, *out2 = NULL; 86 unsigned char *out1 = NULL, *out2 = NULL;
101 MPI in = NULL, res = NULL, pkey[2]; 87 MPI in = NULL, res = NULL, pkey[2];
102 uint8_t *p, *datap, *endp; 88 uint8_t *p, *datap, *endp;
@@ -105,6 +91,10 @@ static int digsig_verify_rsa(struct key *key,
105 91
106 down_read(&key->sem); 92 down_read(&key->sem);
107 ukp = key->payload.data; 93 ukp = key->payload.data;
94
95 if (ukp->datalen < sizeof(*pkh))
96 goto err1;
97
108 pkh = (struct pubkey_hdr *)ukp->data; 98 pkh = (struct pubkey_hdr *)ukp->data;
109 99
110 if (pkh->version != 1) 100 if (pkh->version != 1)
@@ -117,18 +107,23 @@ static int digsig_verify_rsa(struct key *key,
117 goto err1; 107 goto err1;
118 108
119 datap = pkh->mpi; 109 datap = pkh->mpi;
120 endp = datap + ukp->datalen; 110 endp = ukp->data + ukp->datalen;
111
112 err = -ENOMEM;
121 113
122 for (i = 0; i < pkh->nmpi; i++) { 114 for (i = 0; i < pkh->nmpi; i++) {
123 unsigned int remaining = endp - datap; 115 unsigned int remaining = endp - datap;
124 pkey[i] = mpi_read_from_buffer(datap, &remaining); 116 pkey[i] = mpi_read_from_buffer(datap, &remaining);
117 if (!pkey[i])
118 goto err;
125 datap += remaining; 119 datap += remaining;
126 } 120 }
127 121
128 mblen = mpi_get_nbits(pkey[0]); 122 mblen = mpi_get_nbits(pkey[0]);
129 mlen = (mblen + 7)/8; 123 mlen = (mblen + 7)/8;
130 124
131 err = -ENOMEM; 125 if (mlen == 0)
126 goto err;
132 127
133 out1 = kzalloc(mlen, GFP_KERNEL); 128 out1 = kzalloc(mlen, GFP_KERNEL);
134 if (!out1) 129 if (!out1)
@@ -167,10 +162,9 @@ static int digsig_verify_rsa(struct key *key,
167 memset(out1, 0, head); 162 memset(out1, 0, head);
168 memcpy(out1 + head, p, l); 163 memcpy(out1 + head, p, l);
169 164
170 err = -EINVAL; 165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
171 pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len, &valid);
172 166
173 if (valid && len == hlen) 167 if (!err && len == hlen)
174 err = memcmp(out2, h, hlen); 168 err = memcmp(out2, h, hlen);
175 169
176err: 170err:
@@ -178,8 +172,8 @@ err:
178 mpi_free(res); 172 mpi_free(res);
179 kfree(out1); 173 kfree(out1);
180 kfree(out2); 174 kfree(out2);
181 mpi_free(pkey[0]); 175 while (--i >= 0)
182 mpi_free(pkey[1]); 176 mpi_free(pkey[i]);
183err1: 177err1:
184 up_read(&key->sem); 178 up_read(&key->sem);
185 179
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 7a94c8f14e29..b1dd3e7d88cb 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -44,12 +44,13 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
44 * 44 *
45 * Don't you dare use this function. 45 * Don't you dare use this function.
46 */ 46 */
47unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res) 47unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
48{ 48{
49 unsigned long long res;
49 unsigned int rv; 50 unsigned int rv;
50 int overflow; 51 int overflow;
51 52
52 *res = 0; 53 res = 0;
53 rv = 0; 54 rv = 0;
54 overflow = 0; 55 overflow = 0;
55 while (*s) { 56 while (*s) {
@@ -64,12 +65,19 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
64 65
65 if (val >= base) 66 if (val >= base)
66 break; 67 break;
67 if (*res > div_u64(ULLONG_MAX - val, base)) 68 /*
68 overflow = 1; 69 * Check for overflow only if we are within range of
69 *res = *res * base + val; 70 * it in the max base we support (16)
71 */
72 if (unlikely(res & (~0ull << 60))) {
73 if (res > div_u64(ULLONG_MAX - val, base))
74 overflow = 1;
75 }
76 res = res * base + val;
70 rv++; 77 rv++;
71 s++; 78 s++;
72 } 79 }
80 *p = res;
73 if (overflow) 81 if (overflow)
74 rv |= KSTRTOX_OVERFLOW; 82 rv |= KSTRTOX_OVERFLOW;
75 return rv; 83 return rv;
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index b87487b40a8b..29f98624ef93 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -1200,18 +1200,40 @@ do { \
1200 "r" ((USItype)(v)) \ 1200 "r" ((USItype)(v)) \
1201 : "%g1", "%g2" __AND_CLOBBER_CC) 1201 : "%g1", "%g2" __AND_CLOBBER_CC)
1202#define UMUL_TIME 39 /* 39 instructions */ 1202#define UMUL_TIME 39 /* 39 instructions */
1203#endif 1203/* It's quite necessary to add this much assembler for the sparc.
1204#ifndef udiv_qrnnd 1204 The default udiv_qrnnd (in C) is more than 10 times slower! */
1205#ifndef LONGLONG_STANDALONE
1206#define udiv_qrnnd(q, r, n1, n0, d) \ 1205#define udiv_qrnnd(q, r, n1, n0, d) \
1207do { USItype __r; \ 1206 __asm__ ("! Inlined udiv_qrnnd\n\t" \
1208 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 1207 "mov 32,%%g1\n\t" \
1209 (r) = __r; \ 1208 "subcc %1,%2,%%g0\n\t" \
1210} while (0) 1209 "1: bcs 5f\n\t" \
1211 extern USItype __udiv_qrnnd(); 1210 "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
1212#define UDIV_TIME 140 1211 "sub %1,%2,%1 ! this kills msb of n\n\t" \
1213#endif /* LONGLONG_STANDALONE */ 1212 "addx %1,%1,%1 ! so this can't give carry\n\t" \
1214#endif /* udiv_qrnnd */ 1213 "subcc %%g1,1,%%g1\n\t" \
1214 "2: bne 1b\n\t" \
1215 "subcc %1,%2,%%g0\n\t" \
1216 "bcs 3f\n\t" \
1217 "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
1218 "b 3f\n\t" \
1219 "sub %1,%2,%1 ! this kills msb of n\n\t" \
1220 "4: sub %1,%2,%1\n\t" \
1221 "5: addxcc %1,%1,%1\n\t" \
1222 "bcc 2b\n\t" \
1223 "subcc %%g1,1,%%g1\n\t" \
1224 "! Got carry from n. Subtract next step to cancel this carry.\n\t" \
1225 "bne 4b\n\t" \
1226 "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \
1227 "sub %1,%2,%1\n\t" \
1228 "3: xnor %0,0,%0\n\t" \
1229 "! End of inline udiv_qrnnd\n" \
1230 : "=&r" ((USItype)(q)), \
1231 "=&r" ((USItype)(r)) \
1232 : "r" ((USItype)(d)), \
1233 "1" ((USItype)(n1)), \
1234 "0" ((USItype)(n0)) : "%g1", "cc")
1235#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
1236#endif
1215#endif /* __sparc__ */ 1237#endif /* __sparc__ */
1216 1238
1217/*************************************** 1239/***************************************
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 854c9c6da025..2f526627e4f5 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -21,25 +21,6 @@
21#include "mpi-internal.h" 21#include "mpi-internal.h"
22#include "longlong.h" 22#include "longlong.h"
23 23
24const unsigned char __clz_tab[] = {
25 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
26 5, 5, 5, 5, 5, 5, 5, 5,
27 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
28 6, 6, 6, 6, 6, 6, 6, 6,
29 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
30 7, 7, 7, 7, 7, 7, 7, 7,
31 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
32 7, 7, 7, 7, 7, 7, 7, 7,
33 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
34 8, 8, 8, 8, 8, 8, 8, 8,
35 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
36 8, 8, 8, 8, 8, 8, 8, 8,
37 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
38 8, 8, 8, 8, 8, 8, 8, 8,
39 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
40 8, 8, 8, 8, 8, 8, 8, 8,
41};
42
43#define A_LIMB_1 ((mpi_limb_t) 1) 24#define A_LIMB_1 ((mpi_limb_t) 1)
44 25
45/**************** 26/****************
diff --git a/lib/mpi/mpi-div.c b/lib/mpi/mpi-div.c
index c3087d1390ce..f68cbbb4d4a4 100644
--- a/lib/mpi/mpi-div.c
+++ b/lib/mpi/mpi-div.c
@@ -149,6 +149,9 @@ int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
149 mpi_ptr_t marker[5]; 149 mpi_ptr_t marker[5];
150 int markidx = 0; 150 int markidx = 0;
151 151
152 if (!dsize)
153 return -EINVAL;
154
152 memset(marker, 0, sizeof(marker)); 155 memset(marker, 0, sizeof(marker));
153 156
154 /* Ensure space is enough for quotient and remainder. 157 /* Ensure space is enough for quotient and remainder.
@@ -207,6 +210,8 @@ int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
207 * numerator would be gradually overwritten by the quotient limbs. */ 210 * numerator would be gradually overwritten by the quotient limbs. */
208 if (qp == np) { /* Copy NP object to temporary space. */ 211 if (qp == np) { /* Copy NP object to temporary space. */
209 np = marker[markidx++] = mpi_alloc_limb_space(nsize); 212 np = marker[markidx++] = mpi_alloc_limb_space(nsize);
213 if (!np)
214 goto nomem;
210 MPN_COPY(np, qp, nsize); 215 MPN_COPY(np, qp, nsize);
211 } 216 }
212 } else /* Put quotient at top of remainder. */ 217 } else /* Put quotient at top of remainder. */
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index b04a3cf80080..67f3e79af914 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -59,7 +59,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
59 ep = exp->d; 59 ep = exp->d;
60 60
61 if (!msize) 61 if (!msize)
62 msize = 1 / msize; /* provoke a signal */ 62 return -EINVAL;
63 63
64 if (!esize) { 64 if (!esize) {
65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 716802b774ea..f26b41fcb48c 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -20,78 +20,15 @@
20 20
21#include "mpi-internal.h" 21#include "mpi-internal.h"
22 22
23#define DIM(v) (sizeof(v)/sizeof((v)[0]))
24#define MAX_EXTERN_MPI_BITS 16384 23#define MAX_EXTERN_MPI_BITS 16384
25 24
26static uint8_t asn[15] = /* Object ID is 1.3.14.3.2.26 */
27{ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03,
28 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14
29};
30
31MPI do_encode_md(const void *sha_buffer, unsigned nbits)
32{
33 int nframe = (nbits + 7) / 8;
34 uint8_t *frame, *fr_pt;
35 int i = 0, n;
36 size_t asnlen = DIM(asn);
37 MPI a = MPI_NULL;
38
39 if (SHA1_DIGEST_LENGTH + asnlen + 4 > nframe)
40 pr_info("MPI: can't encode a %d bit MD into a %d bits frame\n",
41 (int)(SHA1_DIGEST_LENGTH * 8), (int)nbits);
42
43 /* We encode the MD in this way:
44 *
45 * 0 A PAD(n bytes) 0 ASN(asnlen bytes) MD(len bytes)
46 *
47 * PAD consists of FF bytes.
48 */
49 frame = kmalloc(nframe, GFP_KERNEL);
50 if (!frame)
51 return MPI_NULL;
52 n = 0;
53 frame[n++] = 0;
54 frame[n++] = 1; /* block type */
55 i = nframe - SHA1_DIGEST_LENGTH - asnlen - 3;
56
57 if (i <= 1) {
58 pr_info("MPI: message digest encoding failed\n");
59 kfree(frame);
60 return a;
61 }
62
63 memset(frame + n, 0xff, i);
64 n += i;
65 frame[n++] = 0;
66 memcpy(frame + n, &asn, asnlen);
67 n += asnlen;
68 memcpy(frame + n, sha_buffer, SHA1_DIGEST_LENGTH);
69 n += SHA1_DIGEST_LENGTH;
70
71 i = nframe;
72 fr_pt = frame;
73
74 if (n != nframe) {
75 printk
76 ("MPI: message digest encoding failed, frame length is wrong\n");
77 kfree(frame);
78 return a;
79 }
80
81 a = mpi_alloc((nframe + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB);
82 mpi_set_buffer(a, frame, nframe, 0);
83 kfree(frame);
84
85 return a;
86}
87
88MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) 25MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
89{ 26{
90 const uint8_t *buffer = xbuffer; 27 const uint8_t *buffer = xbuffer;
91 int i, j; 28 int i, j;
92 unsigned nbits, nbytes, nlimbs, nread = 0; 29 unsigned nbits, nbytes, nlimbs, nread = 0;
93 mpi_limb_t a; 30 mpi_limb_t a;
94 MPI val = MPI_NULL; 31 MPI val = NULL;
95 32
96 if (*ret_nread < 2) 33 if (*ret_nread < 2)
97 goto leave; 34 goto leave;
@@ -108,7 +45,7 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
108 nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; 45 nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
109 val = mpi_alloc(nlimbs); 46 val = mpi_alloc(nlimbs);
110 if (!val) 47 if (!val)
111 return MPI_NULL; 48 return NULL;
112 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; 49 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
113 i %= BYTES_PER_MPI_LIMB; 50 i %= BYTES_PER_MPI_LIMB;
114 val->nbits = nbits; 51 val->nbits = nbits;
@@ -212,30 +149,6 @@ int mpi_fromstr(MPI val, const char *str)
212EXPORT_SYMBOL_GPL(mpi_fromstr); 149EXPORT_SYMBOL_GPL(mpi_fromstr);
213 150
214/**************** 151/****************
215 * Special function to get the low 8 bytes from an mpi.
216 * This can be used as a keyid; KEYID is an 2 element array.
217 * Return the low 4 bytes.
218 */
219u32 mpi_get_keyid(const MPI a, u32 *keyid)
220{
221#if BYTES_PER_MPI_LIMB == 4
222 if (keyid) {
223 keyid[0] = a->nlimbs >= 2 ? a->d[1] : 0;
224 keyid[1] = a->nlimbs >= 1 ? a->d[0] : 0;
225 }
226 return a->nlimbs >= 1 ? a->d[0] : 0;
227#elif BYTES_PER_MPI_LIMB == 8
228 if (keyid) {
229 keyid[0] = a->nlimbs ? (u32) (a->d[0] >> 32) : 0;
230 keyid[1] = a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
231 }
232 return a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
233#else
234#error Make this function work with other LIMB sizes
235#endif
236}
237
238/****************
239 * Return an allocated buffer with the MPI (msb first). 152 * Return an allocated buffer with the MPI (msb first).
240 * NBYTES receives the length of this buffer. Caller must free the 153 * NBYTES receives the length of this buffer. Caller must free the
241 * return string (This function does return a 0 byte buffer with NBYTES 154 * return string (This function does return a 0 byte buffer with NBYTES
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
index 87ede162dfab..cde1aaec18da 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/mpi/mpih-div.c
@@ -217,6 +217,10 @@ mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
217 case 0: 217 case 0:
218 /* We are asked to divide by zero, so go ahead and do it! (To make 218 /* We are asked to divide by zero, so go ahead and do it! (To make
219 the compiler not remove this statement, return the value.) */ 219 the compiler not remove this statement, return the value.) */
220 /*
221 * existing clients of this function have been modified
222 * not to call it with dsize == 0, so this should not happen
223 */
220 return 1 / dsize; 224 return 1 / dsize;
221 225
222 case 1: 226 case 1:
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index eefc55d6b7f5..26e4ed31e256 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -58,6 +58,9 @@ mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
58{ 58{
59 size_t len = nlimbs * sizeof(mpi_limb_t); 59 size_t len = nlimbs * sizeof(mpi_limb_t);
60 60
61 if (!len)
62 return NULL;
63
61 return kmalloc(len, GFP_KERNEL); 64 return kmalloc(len, GFP_KERNEL);
62} 65}
63 66
@@ -135,7 +138,7 @@ int mpi_copy(MPI *copied, const MPI a)
135 size_t i; 138 size_t i;
136 MPI b; 139 MPI b;
137 140
138 *copied = MPI_NULL; 141 *copied = NULL;
139 142
140 if (a) { 143 if (a) {
141 b = mpi_alloc(a->nlimbs); 144 b = mpi_alloc(a->nlimbs);
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 4b0fdc22e688..0d83ea8a9605 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -34,7 +34,7 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
34 if (maxlen && len > maxlen) 34 if (maxlen && len > maxlen)
35 len = maxlen; 35 len = maxlen;
36 if (flags & IORESOURCE_IO) 36 if (flags & IORESOURCE_IO)
37 return ioport_map(start, len); 37 return __pci_ioport_map(dev, start, len);
38 if (flags & IORESOURCE_MEM) { 38 if (flags & IORESOURCE_MEM) {
39 if (flags & IORESOURCE_CACHEABLE) 39 if (flags & IORESOURCE_CACHEABLE)
40 return ioremap(start, len); 40 return ioremap(start, len);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7ba8feae11b8..dd8e2aafb07e 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data)
318 if (bdi->wb.task) { 318 if (bdi->wb.task) {
319 trace_writeback_wake_thread(bdi); 319 trace_writeback_wake_thread(bdi);
320 wake_up_process(bdi->wb.task); 320 wake_up_process(bdi->wb.task);
321 } else { 321 } else if (bdi->dev) {
322 /* 322 /*
323 * When bdi tasks are inactive for long time, they are killed. 323 * When bdi tasks are inactive for long time, they are killed.
324 * In this case we have to wake-up the forker thread which 324 * In this case we have to wake-up the forker thread which
@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev);
584 */ 584 */
585static void bdi_wb_shutdown(struct backing_dev_info *bdi) 585static void bdi_wb_shutdown(struct backing_dev_info *bdi)
586{ 586{
587 struct task_struct *task;
588
587 if (!bdi_cap_writeback_dirty(bdi)) 589 if (!bdi_cap_writeback_dirty(bdi))
588 return; 590 return;
589 591
@@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
602 * Finally, kill the kernel thread. We don't need to be RCU 604 * Finally, kill the kernel thread. We don't need to be RCU
603 * safe anymore, since the bdi is gone from visibility. 605 * safe anymore, since the bdi is gone from visibility.
604 */ 606 */
605 if (bdi->wb.task) 607 spin_lock_bh(&bdi->wb_lock);
606 kthread_stop(bdi->wb.task); 608 task = bdi->wb.task;
609 bdi->wb.task = NULL;
610 spin_unlock_bh(&bdi->wb_lock);
611
612 if (task)
613 kthread_stop(task);
607} 614}
608 615
609/* 616/*
@@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
623 630
624void bdi_unregister(struct backing_dev_info *bdi) 631void bdi_unregister(struct backing_dev_info *bdi)
625{ 632{
626 if (bdi->dev) { 633 struct device *dev = bdi->dev;
634
635 if (dev) {
627 bdi_set_min_ratio(bdi, 0); 636 bdi_set_min_ratio(bdi, 0);
628 trace_writeback_bdi_unregister(bdi); 637 trace_writeback_bdi_unregister(bdi);
629 bdi_prune_sb(bdi); 638 bdi_prune_sb(bdi);
@@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi)
632 if (!bdi_cap_flush_forker(bdi)) 641 if (!bdi_cap_flush_forker(bdi))
633 bdi_wb_shutdown(bdi); 642 bdi_wb_shutdown(bdi);
634 bdi_debug_unregister(bdi); 643 bdi_debug_unregister(bdi);
635 device_unregister(bdi->dev); 644
645 spin_lock_bh(&bdi->wb_lock);
636 bdi->dev = NULL; 646 bdi->dev = NULL;
647 spin_unlock_bh(&bdi->wb_lock);
648
649 device_unregister(dev);
637 } 650 }
638} 651}
639EXPORT_SYMBOL(bdi_unregister); 652EXPORT_SYMBOL(bdi_unregister);
diff --git a/mm/compaction.c b/mm/compaction.c
index 71a58f67f481..d9ebebe1a2aa 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
313 } else if (!locked) 313 } else if (!locked)
314 spin_lock_irq(&zone->lru_lock); 314 spin_lock_irq(&zone->lru_lock);
315 315
316 /*
317 * migrate_pfn does not necessarily start aligned to a
318 * pageblock. Ensure that pfn_valid is called when moving
319 * into a new MAX_ORDER_NR_PAGES range in case of large
320 * memory holes within the zone
321 */
322 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
323 if (!pfn_valid(low_pfn)) {
324 low_pfn += MAX_ORDER_NR_PAGES - 1;
325 continue;
326 }
327 }
328
316 if (!pfn_valid_within(low_pfn)) 329 if (!pfn_valid_within(low_pfn))
317 continue; 330 continue;
318 nr_scanned++; 331 nr_scanned++;
319 332
320 /* Get the page and skip if free */ 333 /*
334 * Get the page and ensure the page is within the same zone.
335 * See the comment in isolate_freepages about overlapping
336 * nodes. It is deliberate that the new zone lock is not taken
337 * as memory compaction should not move pages between nodes.
338 */
321 page = pfn_to_page(low_pfn); 339 page = pfn_to_page(low_pfn);
340 if (page_zone(page) != zone)
341 continue;
342
343 /* Skip if free */
322 if (PageBuddy(page)) 344 if (PageBuddy(page))
323 continue; 345 continue;
324 346
diff --git a/mm/filemap.c b/mm/filemap.c
index 97f49ed35bd2..b66275757c28 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1400 unsigned long seg = 0; 1400 unsigned long seg = 0;
1401 size_t count; 1401 size_t count;
1402 loff_t *ppos = &iocb->ki_pos; 1402 loff_t *ppos = &iocb->ki_pos;
1403 struct blk_plug plug;
1404 1403
1405 count = 0; 1404 count = 0;
1406 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1405 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1407 if (retval) 1406 if (retval)
1408 return retval; 1407 return retval;
1409 1408
1410 blk_start_plug(&plug);
1411
1412 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1409 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1413 if (filp->f_flags & O_DIRECT) { 1410 if (filp->f_flags & O_DIRECT) {
1414 loff_t size; 1411 loff_t size;
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1424 retval = filemap_write_and_wait_range(mapping, pos, 1421 retval = filemap_write_and_wait_range(mapping, pos,
1425 pos + iov_length(iov, nr_segs) - 1); 1422 pos + iov_length(iov, nr_segs) - 1);
1426 if (!retval) { 1423 if (!retval) {
1424 struct blk_plug plug;
1425
1426 blk_start_plug(&plug);
1427 retval = mapping->a_ops->direct_IO(READ, iocb, 1427 retval = mapping->a_ops->direct_IO(READ, iocb,
1428 iov, pos, nr_segs); 1428 iov, pos, nr_segs);
1429 blk_finish_plug(&plug);
1429 } 1430 }
1430 if (retval > 0) { 1431 if (retval > 0) {
1431 *ppos = pos + retval; 1432 *ppos = pos + retval;
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1481 break; 1482 break;
1482 } 1483 }
1483out: 1484out:
1484 blk_finish_plug(&plug);
1485 return retval; 1485 return retval;
1486} 1486}
1487EXPORT_SYMBOL(generic_file_aio_read); 1487EXPORT_SYMBOL(generic_file_aio_read);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index f91b2f687343..a4eb31132229 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -263,7 +263,12 @@ found:
263 xip_pfn); 263 xip_pfn);
264 if (err == -ENOMEM) 264 if (err == -ENOMEM)
265 return VM_FAULT_OOM; 265 return VM_FAULT_OOM;
266 BUG_ON(err); 266 /*
267 * err == -EBUSY is fine, we've raced against another thread
268 * that faulted-in the same page
269 */
270 if (err != -EBUSY)
271 BUG_ON(err);
267 return VM_FAULT_NOPAGE; 272 return VM_FAULT_NOPAGE;
268 } else { 273 } else {
269 int err, ret = VM_FAULT_OOM; 274 int err, ret = VM_FAULT_OOM;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b3ffc21ce801..91d3efb25d15 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2083,7 +2083,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
2083{ 2083{
2084 struct mm_struct *mm = mm_slot->mm; 2084 struct mm_struct *mm = mm_slot->mm;
2085 2085
2086 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); 2086 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2087 2087
2088 if (khugepaged_test_exit(mm)) { 2088 if (khugepaged_test_exit(mm)) {
2089 /* free mm_slot */ 2089 /* free mm_slot */
@@ -2113,7 +2113,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2113 int progress = 0; 2113 int progress = 0;
2114 2114
2115 VM_BUG_ON(!pages); 2115 VM_BUG_ON(!pages);
2116 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); 2116 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2117 2117
2118 if (khugepaged_scan.mm_slot) 2118 if (khugepaged_scan.mm_slot)
2119 mm_slot = khugepaged_scan.mm_slot; 2119 mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c833addd94d7..45eb6217bf38 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1036,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1036{ 1036{
1037 pr_debug("%s(0x%p)\n", __func__, ptr); 1037 pr_debug("%s(0x%p)\n", __func__, ptr);
1038 1038
1039 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 1039 if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1040 add_scan_area((unsigned long)ptr, size, gfp); 1040 add_scan_area((unsigned long)ptr, size, gfp);
1041 else if (atomic_read(&kmemleak_early_log)) 1041 else if (atomic_read(&kmemleak_early_log))
1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
@@ -1757,6 +1757,7 @@ void __init kmemleak_init(void)
1757 1757
1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759 if (!kmemleak_skip_disable) { 1759 if (!kmemleak_skip_disable) {
1760 atomic_set(&kmemleak_early_log, 0);
1760 kmemleak_disable(); 1761 kmemleak_disable();
1761 return; 1762 return;
1762 } 1763 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 556859fec4ef..6728a7ae6f2d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
776 /* threshold event is triggered in finer grain than soft limit */ 776 /* threshold event is triggered in finer grain than soft limit */
777 if (unlikely(mem_cgroup_event_ratelimit(memcg, 777 if (unlikely(mem_cgroup_event_ratelimit(memcg,
778 MEM_CGROUP_TARGET_THRESH))) { 778 MEM_CGROUP_TARGET_THRESH))) {
779 bool do_softlimit, do_numainfo; 779 bool do_softlimit;
780 bool do_numainfo __maybe_unused;
780 781
781 do_softlimit = mem_cgroup_event_ratelimit(memcg, 782 do_softlimit = mem_cgroup_event_ratelimit(memcg,
782 MEM_CGROUP_TARGET_SOFTLIMIT); 783 MEM_CGROUP_TARGET_SOFTLIMIT);
diff --git a/mm/migrate.c b/mm/migrate.c
index 9871a56d82c3..df141f60289e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
445 ClearPageSwapCache(page); 445 ClearPageSwapCache(page);
446 ClearPagePrivate(page); 446 ClearPagePrivate(page);
447 set_page_private(page, 0); 447 set_page_private(page, 0);
448 page->mapping = NULL;
449 448
450 /* 449 /*
451 * If any waiters have accumulated on the new page then 450 * If any waiters have accumulated on the new page then
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
667 } else { 666 } else {
668 if (remap_swapcache) 667 if (remap_swapcache)
669 remove_migration_ptes(page, newpage); 668 remove_migration_ptes(page, newpage);
669 page->mapping = NULL;
670 } 670 }
671 671
672 unlock_page(newpage); 672 unlock_page(newpage);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index e920aa3ce104..c20ff48994c2 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
298 goto free_proc_pages; 298 goto free_proc_pages;
299 } 299 }
300 300
301 task_lock(task); 301 mm = mm_access(task, PTRACE_MODE_ATTACH);
302 if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { 302 if (!mm || IS_ERR(mm)) {
303 task_unlock(task); 303 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
304 rc = -EPERM; 304 /*
305 goto put_task_struct; 305 * Explicitly map EACCES to EPERM as EPERM is a more a
306 } 306 * appropriate error code for process_vw_readv/writev
307 mm = task->mm; 307 */
308 308 if (rc == -EACCES)
309 if (!mm || (task->flags & PF_KTHREAD)) { 309 rc = -EPERM;
310 task_unlock(task);
311 rc = -EINVAL;
312 goto put_task_struct; 310 goto put_task_struct;
313 } 311 }
314 312
315 atomic_inc(&mm->mm_users);
316 task_unlock(task);
317
318 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
319 rc = process_vm_rw_single_vec( 314 rc = process_vm_rw_single_vec(
320 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 315 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
diff --git a/mm/swap.c b/mm/swap.c
index b0f529b38979..fff1ff7fb9ad 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -659,7 +659,7 @@ void lru_add_page_tail(struct zone* zone,
659 VM_BUG_ON(!PageHead(page)); 659 VM_BUG_ON(!PageHead(page));
660 VM_BUG_ON(PageCompound(page_tail)); 660 VM_BUG_ON(PageCompound(page_tail));
661 VM_BUG_ON(PageLRU(page_tail)); 661 VM_BUG_ON(PageLRU(page_tail));
662 VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); 662 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
663 663
664 SetPageLRU(page_tail); 664 SetPageLRU(page_tail);
665 665
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a98628086452..a97d97a3a512 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
541 541
542 if (cf_sk->layer.dn == NULL) 542 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb);
543 return -EINVAL; 544 return -EINVAL;
545 }
544 546
545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 547 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
546} 548}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
683 } 685 }
684 err = transmit_skb(skb, cf_sk, 686 err = transmit_skb(skb, cf_sk,
685 msg->msg_flags&MSG_DONTWAIT, timeo); 687 msg->msg_flags&MSG_DONTWAIT, timeo);
686 if (err < 0) { 688 if (err < 0)
687 kfree_skb(skb); 689 /* skb is already freed */
688 goto pipe_err; 690 goto pipe_err;
689 } 691
690 sent += size; 692 sent += size;
691 } 693 }
692 694
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e7..94b08612a4d8 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
252 251
253 rcu_read_lock(); 252 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
257 256
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || 257 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 258 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 259 layer->id != 0)
261 260 cfmuxl_remove_uplayer(layr, layer->id);
262 idx = layer->id % UP_CACHE_SIZE; 261
263 spin_lock_bh(&muxl->receive_lock);
264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */ 262 /* NOTE: ctrlcmd is not allowed to block */
269 layer->ctrlcmd(layer, ctrl, phyid); 263 layer->ctrlcmd(layer, ctrl, phyid);
270 } 264 }
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3b..761ad9d6cc3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
85 } else { 85 } else {
86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
88 ceph_debugfs_client_init(client);
89 client->have_fsid = true;
90 } 88 }
91 return 0; 89 return 0;
92} 90}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42bd..1845cde26227 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/ceph/mon_client.h> 9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h> 10#include <linux/ceph/libceph.h>
11#include <linux/ceph/debugfs.h>
11#include <linux/ceph/decode.h> 12#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h> 13#include <linux/ceph/auth.h>
14 14
15/* 15/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
340 client->monc.monmap = monmap; 340 client->monc.monmap = monmap;
341 kfree(old); 341 kfree(old);
342 342
343 if (!client->have_fsid) {
344 client->have_fsid = true;
345 mutex_unlock(&monc->mutex);
346 /*
347 * do debugfs initialization without mutex to avoid
348 * creating a locking dependency
349 */
350 ceph_debugfs_client_init(client);
351 goto out_unlocked;
352 }
343out: 353out:
344 mutex_unlock(&monc->mutex); 354 mutex_unlock(&monc->mutex);
355out_unlocked:
345 wake_up_all(&client->auth_wq); 356 wake_up_all(&client->auth_wq);
346} 357}
347 358
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776f..d183262943d9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
409 409
410config INET_UDP_DIAG 410config INET_UDP_DIAG
411 tristate "UDP: socket monitoring interface" 411 tristate "UDP: socket monitoring interface"
412 depends on INET_DIAG 412 depends on INET_DIAG && (IPV6 || IPV6=n)
413 default n 413 default n
414 ---help--- 414 ---help---
415 Support for UDP socket monitoring interface used by the ss tool. 415 Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f0..63e49890ad31 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
863 if (addr_type == RTN_UNICAST && 863 if (addr_type == RTN_UNICAST &&
864 (arp_fwd_proxy(in_dev, dev, rt) || 864 (arp_fwd_proxy(in_dev, dev, rt) ||
865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
866 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 866 (rt->dst.dev != dev &&
867 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
867 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 868 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
868 if (n) 869 if (n)
869 neigh_release(n); 870 neigh_release(n);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f7679075..42dd1a90edea 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop; 576 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4cb9cd2f2c39..7a7724da9bff 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
778static __net_init int ipv4_sysctl_init_net(struct net *net) 778static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 779{
780 struct ctl_table *table; 780 struct ctl_table *table;
781 unsigned long limit;
782 781
783 table = ipv4_net_table; 782 table = ipv4_net_table;
784 if (!net_eq(net, &init_net)) { 783 if (!net_eq(net, &init_net)) {
@@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
815 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 814 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
816 815
817 tcp_init_mem(net); 816 tcp_init_mem(net);
818 limit = nr_free_buffer_pages() / 8;
819 limit = max(limit, 128UL);
820 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
821 net->ipv4.sysctl_tcp_mem[1] = limit;
822 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
823 817
824 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
825 net_ipv4_ctl_path, table); 819 net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 06373b4a449a..37755ccc0e96 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3218,7 +3229,6 @@ __setup("thash_entries=", set_thash_entries);
3218 3229
3219void tcp_init_mem(struct net *net) 3230void tcp_init_mem(struct net *net)
3220{ 3231{
3221 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3222 unsigned long limit = nr_free_buffer_pages() / 8; 3232 unsigned long limit = nr_free_buffer_pages() / 8;
3223 limit = max(limit, 128UL); 3233 limit = max(limit, 128UL);
3224 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; 3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
@@ -3287,7 +3297,8 @@ void __init tcp_init(void)
3287 sysctl_max_syn_backlog = max(128, cnt / 256); 3297 sysctl_max_syn_backlog = max(128, cnt / 256);
3288 3298
3289 tcp_init_mem(&init_net); 3299 tcp_init_mem(&init_net);
3290 limit = nr_free_buffer_pages() / 8; 3300 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3301 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3291 limit = max(limit, 128UL); 3302 limit = max(limit, 128UL);
3292 max_share = min(4UL*1024*1024, limit); 3303 max_share = min(4UL*1024*1024, limit);
3293 3304
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 976034f82320..53c8ce4046b2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1307 return in_sack; 1307 return in_sack;
1308} 1308}
1309 1309
1310static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1310/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1311 struct tcp_sacktag_state *state, 1311static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq,
1312 int dup_sack, int pcount) 1314 int dup_sack, int pcount)
1313{ 1315{
1314 struct tcp_sock *tp = tcp_sk(sk); 1316 struct tcp_sock *tp = tcp_sk(sk);
1315 u8 sacked = TCP_SKB_CB(skb)->sacked;
1316 int fack_count = state->fack_count; 1317 int fack_count = state->fack_count;
1317 1318
1318 /* Account D-SACK for retransmitted packet. */ 1319 /* Account D-SACK for retransmitted packet. */
1319 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1320 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1320 if (tp->undo_marker && tp->undo_retrans && 1321 if (tp->undo_marker && tp->undo_retrans &&
1321 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1322 after(end_seq, tp->undo_marker))
1322 tp->undo_retrans--; 1323 tp->undo_retrans--;
1323 if (sacked & TCPCB_SACKED_ACKED) 1324 if (sacked & TCPCB_SACKED_ACKED)
1324 state->reord = min(fack_count, state->reord); 1325 state->reord = min(fack_count, state->reord);
1325 } 1326 }
1326 1327
1327 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1328 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1328 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1329 if (!after(end_seq, tp->snd_una))
1329 return sacked; 1330 return sacked;
1330 1331
1331 if (!(sacked & TCPCB_SACKED_ACKED)) { 1332 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1344 /* New sack for not retransmitted frame, 1345 /* New sack for not retransmitted frame,
1345 * which was in hole. It is reordering. 1346 * which was in hole. It is reordering.
1346 */ 1347 */
1347 if (before(TCP_SKB_CB(skb)->seq, 1348 if (before(start_seq,
1348 tcp_highest_sack_seq(tp))) 1349 tcp_highest_sack_seq(tp)))
1349 state->reord = min(fack_count, 1350 state->reord = min(fack_count,
1350 state->reord); 1351 state->reord);
1351 1352
1352 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1353 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1353 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1354 if (!after(end_seq, tp->frto_highmark))
1354 state->flag |= FLAG_ONLY_ORIG_SACKED; 1355 state->flag |= FLAG_ONLY_ORIG_SACKED;
1355 } 1356 }
1356 1357
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1368 1369
1369 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1370 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1370 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1371 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1371 before(TCP_SKB_CB(skb)->seq, 1372 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1372 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1373 tp->lost_cnt_hint += pcount; 1373 tp->lost_cnt_hint += pcount;
1374 1374
1375 if (fack_count > tp->fackets_out) 1375 if (fack_count > tp->fackets_out)
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1388 return sacked; 1388 return sacked;
1389} 1389}
1390 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1391static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1392 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1393 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1395,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395{ 1398{
1396 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1397 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1398 1403
1399 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1400 1405
1401 if (skb == tp->lost_skb_hint) 1406 /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
1407 if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
1402 tp->lost_cnt_hint += pcount; 1408 tp->lost_cnt_hint += pcount;
1403 1409
1404 TCP_SKB_CB(prev)->end_seq += shifted; 1410 TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1424,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1424 skb_shinfo(skb)->gso_type = 0; 1430 skb_shinfo(skb)->gso_type = 0;
1425 } 1431 }
1426 1432
1427 /* We discard results */ 1433 /* Adjust counters and hints for the newly sacked sequence range but
1428 tcp_sacktag_one(skb, sk, state, dup_sack, pcount); 1434 * discard the return value since prev is already marked.
1435 */
1436 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1437 start_seq, end_seq, dup_sack, pcount);
1429 1438
1430 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1439 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1431 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1440 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1664,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1664 break; 1673 break;
1665 1674
1666 if (in_sack) { 1675 if (in_sack) {
1667 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1676 TCP_SKB_CB(skb)->sacked =
1668 state, 1677 tcp_sacktag_one(sk,
1669 dup_sack, 1678 state,
1670 tcp_skb_pcount(skb)); 1679 TCP_SKB_CB(skb)->sacked,
1680 TCP_SKB_CB(skb)->seq,
1681 TCP_SKB_CB(skb)->end_seq,
1682 dup_sack,
1683 tcp_skb_pcount(skb));
1671 1684
1672 if (!before(TCP_SKB_CB(skb)->seq, 1685 if (!before(TCP_SKB_CB(skb)->seq,
1673 tcp_highest_sack_seq(tp))) 1686 tcp_highest_sack_seq(tp)))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..94d683a61cba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.iov[0].iov_len, IPPROTO_TCP, 0); 651 arg.iov[0].iov_len, IPPROTO_TCP, 0);
652 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 652 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
654 /* When socket is gone, all binding information is lost.
655 * routing might fail in this case. using iif for oif to
656 * make sure we can deliver it
657 */
658 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
654 659
655 net = dev_net(skb_dst(skb)->dev); 660 net = dev_net(skb_dst(skb)->dev);
656 arg.tos = ip_hdr(skb)->tos; 661 arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399df..cd2e0723266d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_check_oom(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84 /* Catch exceptional cases, when connection requires reset. 81 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 82 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0a0d94ad9b08..b142bd4c2390 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 751409120769..5a5e504a8ffb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
612 tid_agg_rx->buf_size; 612 tid_agg_rx->buf_size;
613 if (!tid_agg_rx->reorder_buf[index] && 613 if (!tid_agg_rx->reorder_buf[index] &&
614 tid_agg_rx->stored_mpdu_num > 1) { 614 tid_agg_rx->stored_mpdu_num) {
615 /* 615 /*
616 * No buffers ready to be released, but check whether any 616 * No buffers ready to be released, but check whether any
617 * frames in the reorder buffer have timed out. 617 * frames in the reorder buffer have timed out.
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ffd..ae3a035f5390 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
232 if (toklen <= (n_parts + 1) * 4) 232 if (toklen <= (n_parts + 1) * 4)
233 return -EINVAL; 233 return -EINVAL;
234 234
235 princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); 235 princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
236 if (!princ->name_parts) 236 if (!princ->name_parts)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
355 355
356 _debug("n_elem %d", n_elem); 356 _debug("n_elem %d", n_elem);
357 357
358 td = kcalloc(sizeof(struct krb5_tagged_data), n_elem, 358 td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!td) 360 if (!td)
361 return -ENOMEM; 361 return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a3..7e267d7b9c75 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
148 148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{ 150{
151 BUILD_BUG_ON(sizeof(skb->cb) < 151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
153 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
154} 153}
155 154
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2776012132ea..e83d61ca78ca 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
130 130
131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{ 132{
133 BUILD_BUG_ON(sizeof(skb->cb) < 133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
135 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
136} 135}
137 136
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7a..d7eea99333e9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
94 94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{ 96{
97 BUILD_BUG_ON(sizeof(skb->cb) < 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100} 99}
101 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9acf..60d47180f043 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
166 166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) 167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{ 168{
169 BUILD_BUG_ON(sizeof(skb->cb) < 169 qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); 170 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
172} 171}
173 172
174static unsigned int sfq_hash(const struct sfq_sched_data *q, 173static unsigned int sfq_hash(const struct sfq_sched_data *q,
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e3bfcbe8a520..a3b9782441f9 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1924,6 +1924,12 @@ sub process {
1924 my $pre_ctx = "$1$2"; 1924 my $pre_ctx = "$1$2";
1925 1925
1926 my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0); 1926 my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
1927
1928 if ($line =~ /^\+\t{6,}/) {
1929 WARN("DEEP_INDENTATION",
1930 "Too many leading tabs - consider code refactoring\n" . $herecurr);
1931 }
1932
1927 my $ctx_cnt = $realcnt - $#ctx - 1; 1933 my $ctx_cnt = $realcnt - $#ctx - 1;
1928 my $ctx = join("\n", @ctx); 1934 my $ctx = join("\n", @ctx);
1929 1935
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e8c969577768..d0de2a2c3a2d 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -932,7 +932,7 @@ static int do_isapnp_entry(const char *filename,
932 (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f); 932 (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f);
933 return 1; 933 return 1;
934} 934}
935ADD_TO_DEVTABLE("isa", struct isapnp_device_id, do_isapnp_entry); 935ADD_TO_DEVTABLE("isapnp", struct isapnp_device_id, do_isapnp_entry);
936 936
937/* 937/*
938 * Append a match expression for a single masked hex digit. 938 * Append a match expression for a single masked hex digit.
diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c
index e09f144177f5..c99c6078be33 100644
--- a/sound/isa/sb/emu8000_patch.c
+++ b/sound/isa/sb/emu8000_patch.c
@@ -22,7 +22,6 @@
22#include "emu8000_local.h" 22#include "emu8000_local.h"
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <linux/moduleparam.h>
26 25
27static int emu8000_reset_addr; 26static int emu8000_reset_addr;
28module_param(emu8000_reset_addr, int, 0444); 27module_param(emu8000_reset_addr, int, 0444);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 4df72c0e8c37..c2c65f63bf06 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1447,7 +1447,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
1447 for (i = 0; i < c->cvt_setups.used; i++) { 1447 for (i = 0; i < c->cvt_setups.used; i++) {
1448 p = snd_array_elem(&c->cvt_setups, i); 1448 p = snd_array_elem(&c->cvt_setups, i);
1449 if (!p->active && p->stream_tag == stream_tag && 1449 if (!p->active && p->stream_tag == stream_tag &&
1450 get_wcaps_type(get_wcaps(codec, p->nid)) == type) 1450 get_wcaps_type(get_wcaps(c, p->nid)) == type)
1451 p->dirty = 1; 1451 p->dirty = 1;
1452 } 1452 }
1453 } 1453 }
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index d8a35da0803f..9d819c4b4923 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -282,7 +282,8 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
282EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl); 282EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl);
283 283
284static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid, 284static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
285 const struct auto_pin_cfg *cfg) 285 const struct auto_pin_cfg *cfg,
286 char *lastname, int *lastidx)
286{ 287{
287 unsigned int def_conf, conn; 288 unsigned int def_conf, conn;
288 char name[44]; 289 char name[44];
@@ -298,6 +299,10 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
298 return 0; 299 return 0;
299 300
300 snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx); 301 snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx);
302 if (!strcmp(name, lastname) && idx == *lastidx)
303 idx++;
304 strncpy(lastname, name, 44);
305 *lastidx = idx;
301 err = snd_hda_jack_add_kctl(codec, nid, name, idx); 306 err = snd_hda_jack_add_kctl(codec, nid, name, idx);
302 if (err < 0) 307 if (err < 0)
303 return err; 308 return err;
@@ -311,41 +316,42 @@ int snd_hda_jack_add_kctls(struct hda_codec *codec,
311 const struct auto_pin_cfg *cfg) 316 const struct auto_pin_cfg *cfg)
312{ 317{
313 const hda_nid_t *p; 318 const hda_nid_t *p;
314 int i, err; 319 int i, err, lastidx = 0;
320 char lastname[44] = "";
315 321
316 for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) { 322 for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) {
317 err = add_jack_kctl(codec, *p, cfg); 323 err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
318 if (err < 0) 324 if (err < 0)
319 return err; 325 return err;
320 } 326 }
321 for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) { 327 for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) {
322 if (*p == *cfg->line_out_pins) /* might be duplicated */ 328 if (*p == *cfg->line_out_pins) /* might be duplicated */
323 break; 329 break;
324 err = add_jack_kctl(codec, *p, cfg); 330 err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
325 if (err < 0) 331 if (err < 0)
326 return err; 332 return err;
327 } 333 }
328 for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) { 334 for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) {
329 if (*p == *cfg->line_out_pins) /* might be duplicated */ 335 if (*p == *cfg->line_out_pins) /* might be duplicated */
330 break; 336 break;
331 err = add_jack_kctl(codec, *p, cfg); 337 err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
332 if (err < 0) 338 if (err < 0)
333 return err; 339 return err;
334 } 340 }
335 for (i = 0; i < cfg->num_inputs; i++) { 341 for (i = 0; i < cfg->num_inputs; i++) {
336 err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg); 342 err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, lastname, &lastidx);
337 if (err < 0) 343 if (err < 0)
338 return err; 344 return err;
339 } 345 }
340 for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) { 346 for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) {
341 err = add_jack_kctl(codec, *p, cfg); 347 err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
342 if (err < 0) 348 if (err < 0)
343 return err; 349 return err;
344 } 350 }
345 err = add_jack_kctl(codec, cfg->dig_in_pin, cfg); 351 err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, lastname, &lastidx);
346 if (err < 0) 352 if (err < 0)
347 return err; 353 return err;
348 err = add_jack_kctl(codec, cfg->mono_out_pin, cfg); 354 err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, lastname, &lastidx);
349 if (err < 0) 355 if (err < 0)
350 return err; 356 return err;
351 return 0; 357 return 0;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 35abe3c62908..21d91d580da8 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -728,18 +728,19 @@ static int ca0132_hp_switch_put(struct snd_kcontrol *kcontrol,
728 728
729 err = chipio_read(codec, REG_CODEC_MUTE, &data); 729 err = chipio_read(codec, REG_CODEC_MUTE, &data);
730 if (err < 0) 730 if (err < 0)
731 return err; 731 goto exit;
732 732
733 /* *valp 0 is mute, 1 is unmute */ 733 /* *valp 0 is mute, 1 is unmute */
734 data = (data & 0x7f) | (*valp ? 0 : 0x80); 734 data = (data & 0x7f) | (*valp ? 0 : 0x80);
735 chipio_write(codec, REG_CODEC_MUTE, data); 735 err = chipio_write(codec, REG_CODEC_MUTE, data);
736 if (err < 0) 736 if (err < 0)
737 return err; 737 goto exit;
738 738
739 spec->curr_hp_switch = *valp; 739 spec->curr_hp_switch = *valp;
740 740
741 exit:
741 snd_hda_power_down(codec); 742 snd_hda_power_down(codec);
742 return 1; 743 return err < 0 ? err : 1;
743} 744}
744 745
745static int ca0132_speaker_switch_get(struct snd_kcontrol *kcontrol, 746static int ca0132_speaker_switch_get(struct snd_kcontrol *kcontrol,
@@ -770,18 +771,19 @@ static int ca0132_speaker_switch_put(struct snd_kcontrol *kcontrol,
770 771
771 err = chipio_read(codec, REG_CODEC_MUTE, &data); 772 err = chipio_read(codec, REG_CODEC_MUTE, &data);
772 if (err < 0) 773 if (err < 0)
773 return err; 774 goto exit;
774 775
775 /* *valp 0 is mute, 1 is unmute */ 776 /* *valp 0 is mute, 1 is unmute */
776 data = (data & 0xef) | (*valp ? 0 : 0x10); 777 data = (data & 0xef) | (*valp ? 0 : 0x10);
777 chipio_write(codec, REG_CODEC_MUTE, data); 778 err = chipio_write(codec, REG_CODEC_MUTE, data);
778 if (err < 0) 779 if (err < 0)
779 return err; 780 goto exit;
780 781
781 spec->curr_speaker_switch = *valp; 782 spec->curr_speaker_switch = *valp;
782 783
784 exit:
783 snd_hda_power_down(codec); 785 snd_hda_power_down(codec);
784 return 1; 786 return err < 0 ? err : 1;
785} 787}
786 788
787static int ca0132_hp_volume_get(struct snd_kcontrol *kcontrol, 789static int ca0132_hp_volume_get(struct snd_kcontrol *kcontrol,
@@ -819,25 +821,26 @@ static int ca0132_hp_volume_put(struct snd_kcontrol *kcontrol,
819 821
820 err = chipio_read(codec, REG_CODEC_HP_VOL_L, &data); 822 err = chipio_read(codec, REG_CODEC_HP_VOL_L, &data);
821 if (err < 0) 823 if (err < 0)
822 return err; 824 goto exit;
823 825
824 val = 31 - left_vol; 826 val = 31 - left_vol;
825 data = (data & 0xe0) | val; 827 data = (data & 0xe0) | val;
826 chipio_write(codec, REG_CODEC_HP_VOL_L, data); 828 err = chipio_write(codec, REG_CODEC_HP_VOL_L, data);
827 if (err < 0) 829 if (err < 0)
828 return err; 830 goto exit;
829 831
830 val = 31 - right_vol; 832 val = 31 - right_vol;
831 data = (data & 0xe0) | val; 833 data = (data & 0xe0) | val;
832 chipio_write(codec, REG_CODEC_HP_VOL_R, data); 834 err = chipio_write(codec, REG_CODEC_HP_VOL_R, data);
833 if (err < 0) 835 if (err < 0)
834 return err; 836 goto exit;
835 837
836 spec->curr_hp_volume[0] = left_vol; 838 spec->curr_hp_volume[0] = left_vol;
837 spec->curr_hp_volume[1] = right_vol; 839 spec->curr_hp_volume[1] = right_vol;
838 840
841 exit:
839 snd_hda_power_down(codec); 842 snd_hda_power_down(codec);
840 return 1; 843 return err < 0 ? err : 1;
841} 844}
842 845
843static int add_hp_switch(struct hda_codec *codec, hda_nid_t nid) 846static int add_hp_switch(struct hda_codec *codec, hda_nid_t nid)
@@ -936,6 +939,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
936 if (err < 0) 939 if (err < 0)
937 return err; 940 return err;
938 err = add_in_volume(codec, spec->dig_in, "IEC958"); 941 err = add_in_volume(codec, spec->dig_in, "IEC958");
942 if (err < 0)
943 return err;
939 } 944 }
940 return 0; 945 return 0;
941} 946}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 0e99357e822c..bc5a993d1146 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -988,8 +988,10 @@ static void cs_automic(struct hda_codec *codec)
988 change_cur_input(codec, !spec->automic_idx, 0); 988 change_cur_input(codec, !spec->automic_idx, 0);
989 } else { 989 } else {
990 if (present) { 990 if (present) {
991 spec->last_input = spec->cur_input; 991 if (spec->cur_input != spec->automic_idx) {
992 spec->cur_input = spec->automic_idx; 992 spec->last_input = spec->cur_input;
993 spec->cur_input = spec->automic_idx;
994 }
993 } else { 995 } else {
994 spec->cur_input = spec->last_input; 996 spec->cur_input = spec->last_input;
995 } 997 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0db1dc49382b..1358987c49d8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -177,6 +177,7 @@ struct alc_spec {
177 unsigned int detect_lo:1; /* Line-out detection enabled */ 177 unsigned int detect_lo:1; /* Line-out detection enabled */
178 unsigned int automute_speaker_possible:1; /* there are speakers and either LO or HP */ 178 unsigned int automute_speaker_possible:1; /* there are speakers and either LO or HP */
179 unsigned int automute_lo_possible:1; /* there are line outs and HP */ 179 unsigned int automute_lo_possible:1; /* there are line outs and HP */
180 unsigned int keep_vref_in_automute:1; /* Don't clear VREF in automute */
180 181
181 /* other flags */ 182 /* other flags */
182 unsigned int no_analog :1; /* digital I/O only */ 183 unsigned int no_analog :1; /* digital I/O only */
@@ -495,13 +496,24 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
495 496
496 for (i = 0; i < num_pins; i++) { 497 for (i = 0; i < num_pins; i++) {
497 hda_nid_t nid = pins[i]; 498 hda_nid_t nid = pins[i];
499 unsigned int val;
498 if (!nid) 500 if (!nid)
499 break; 501 break;
500 switch (spec->automute_mode) { 502 switch (spec->automute_mode) {
501 case ALC_AUTOMUTE_PIN: 503 case ALC_AUTOMUTE_PIN:
504 /* don't reset VREF value in case it's controlling
505 * the amp (see alc861_fixup_asus_amp_vref_0f())
506 */
507 if (spec->keep_vref_in_automute) {
508 val = snd_hda_codec_read(codec, nid, 0,
509 AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
510 val &= ~PIN_HP;
511 } else
512 val = 0;
513 val |= pin_bits;
502 snd_hda_codec_write(codec, nid, 0, 514 snd_hda_codec_write(codec, nid, 0,
503 AC_VERB_SET_PIN_WIDGET_CONTROL, 515 AC_VERB_SET_PIN_WIDGET_CONTROL,
504 pin_bits); 516 val);
505 break; 517 break;
506 case ALC_AUTOMUTE_AMP: 518 case ALC_AUTOMUTE_AMP:
507 snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0, 519 snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0,
@@ -1843,6 +1855,8 @@ static const char * const alc_slave_vols[] = {
1843 "Speaker Playback Volume", 1855 "Speaker Playback Volume",
1844 "Mono Playback Volume", 1856 "Mono Playback Volume",
1845 "Line-Out Playback Volume", 1857 "Line-Out Playback Volume",
1858 "CLFE Playback Volume",
1859 "Bass Speaker Playback Volume",
1846 "PCM Playback Volume", 1860 "PCM Playback Volume",
1847 NULL, 1861 NULL,
1848}; 1862};
@@ -1858,6 +1872,8 @@ static const char * const alc_slave_sws[] = {
1858 "Mono Playback Switch", 1872 "Mono Playback Switch",
1859 "IEC958 Playback Switch", 1873 "IEC958 Playback Switch",
1860 "Line-Out Playback Switch", 1874 "Line-Out Playback Switch",
1875 "CLFE Playback Switch",
1876 "Bass Speaker Playback Switch",
1861 "PCM Playback Switch", 1877 "PCM Playback Switch",
1862 NULL, 1878 NULL,
1863}; 1879};
@@ -2306,7 +2322,7 @@ static int alc_build_pcms(struct hda_codec *codec)
2306 "%s Analog", codec->chip_name); 2322 "%s Analog", codec->chip_name);
2307 info->name = spec->stream_name_analog; 2323 info->name = spec->stream_name_analog;
2308 2324
2309 if (spec->multiout.dac_nids > 0) { 2325 if (spec->multiout.num_dacs > 0) {
2310 p = spec->stream_analog_playback; 2326 p = spec->stream_analog_playback;
2311 if (!p) 2327 if (!p)
2312 p = &alc_pcm_analog_playback; 2328 p = &alc_pcm_analog_playback;
@@ -4358,6 +4374,7 @@ enum {
4358 ALC882_FIXUP_ACER_ASPIRE_8930G, 4374 ALC882_FIXUP_ACER_ASPIRE_8930G,
4359 ALC882_FIXUP_ASPIRE_8930G_VERBS, 4375 ALC882_FIXUP_ASPIRE_8930G_VERBS,
4360 ALC885_FIXUP_MACPRO_GPIO, 4376 ALC885_FIXUP_MACPRO_GPIO,
4377 ALC889_FIXUP_DAC_ROUTE,
4361}; 4378};
4362 4379
4363static void alc889_fixup_coef(struct hda_codec *codec, 4380static void alc889_fixup_coef(struct hda_codec *codec,
@@ -4411,6 +4428,23 @@ static void alc885_fixup_macpro_gpio(struct hda_codec *codec,
4411 alc882_gpio_mute(codec, 1, 0); 4428 alc882_gpio_mute(codec, 1, 0);
4412} 4429}
4413 4430
4431/* Fix the connection of some pins for ALC889:
4432 * At least, Acer Aspire 5935 shows the connections to DAC3/4 don't
4433 * work correctly (bko#42740)
4434 */
4435static void alc889_fixup_dac_route(struct hda_codec *codec,
4436 const struct alc_fixup *fix, int action)
4437{
4438 if (action == ALC_FIXUP_ACT_PRE_PROBE) {
4439 hda_nid_t conn1[2] = { 0x0c, 0x0d };
4440 hda_nid_t conn2[2] = { 0x0e, 0x0f };
4441 snd_hda_override_conn_list(codec, 0x14, 2, conn1);
4442 snd_hda_override_conn_list(codec, 0x15, 2, conn1);
4443 snd_hda_override_conn_list(codec, 0x18, 2, conn2);
4444 snd_hda_override_conn_list(codec, 0x1a, 2, conn2);
4445 }
4446}
4447
4414static const struct alc_fixup alc882_fixups[] = { 4448static const struct alc_fixup alc882_fixups[] = {
4415 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 4449 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
4416 .type = ALC_FIXUP_PINS, 4450 .type = ALC_FIXUP_PINS,
@@ -4558,6 +4592,10 @@ static const struct alc_fixup alc882_fixups[] = {
4558 .type = ALC_FIXUP_FUNC, 4592 .type = ALC_FIXUP_FUNC,
4559 .v.func = alc885_fixup_macpro_gpio, 4593 .v.func = alc885_fixup_macpro_gpio,
4560 }, 4594 },
4595 [ALC889_FIXUP_DAC_ROUTE] = {
4596 .type = ALC_FIXUP_FUNC,
4597 .v.func = alc889_fixup_dac_route,
4598 },
4561}; 4599};
4562 4600
4563static const struct snd_pci_quirk alc882_fixup_tbl[] = { 4601static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -4582,6 +4620,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4582 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", 4620 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
4583 ALC882_FIXUP_ACER_ASPIRE_4930G), 4621 ALC882_FIXUP_ACER_ASPIRE_4930G),
4584 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), 4622 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
4623 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
4585 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), 4624 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
4586 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), 4625 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
4587 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), 4626 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
@@ -4735,7 +4774,6 @@ enum {
4735 ALC262_FIXUP_FSC_H270, 4774 ALC262_FIXUP_FSC_H270,
4736 ALC262_FIXUP_HP_Z200, 4775 ALC262_FIXUP_HP_Z200,
4737 ALC262_FIXUP_TYAN, 4776 ALC262_FIXUP_TYAN,
4738 ALC262_FIXUP_TOSHIBA_RX1,
4739 ALC262_FIXUP_LENOVO_3000, 4777 ALC262_FIXUP_LENOVO_3000,
4740 ALC262_FIXUP_BENQ, 4778 ALC262_FIXUP_BENQ,
4741 ALC262_FIXUP_BENQ_T31, 4779 ALC262_FIXUP_BENQ_T31,
@@ -4765,16 +4803,6 @@ static const struct alc_fixup alc262_fixups[] = {
4765 { } 4803 { }
4766 } 4804 }
4767 }, 4805 },
4768 [ALC262_FIXUP_TOSHIBA_RX1] = {
4769 .type = ALC_FIXUP_PINS,
4770 .v.pins = (const struct alc_pincfg[]) {
4771 { 0x14, 0x90170110 }, /* speaker */
4772 { 0x15, 0x0421101f }, /* HP */
4773 { 0x1a, 0x40f000f0 }, /* N/A */
4774 { 0x1b, 0x40f000f0 }, /* N/A */
4775 { 0x1e, 0x40f000f0 }, /* N/A */
4776 }
4777 },
4778 [ALC262_FIXUP_LENOVO_3000] = { 4806 [ALC262_FIXUP_LENOVO_3000] = {
4779 .type = ALC_FIXUP_VERBS, 4807 .type = ALC_FIXUP_VERBS,
4780 .v.verbs = (const struct hda_verb[]) { 4808 .v.verbs = (const struct hda_verb[]) {
@@ -4807,8 +4835,6 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
4807 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FIXUP_BENQ), 4835 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FIXUP_BENQ),
4808 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), 4836 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
4809 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), 4837 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
4810 SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",
4811 ALC262_FIXUP_TOSHIBA_RX1),
4812 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), 4838 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
4813 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), 4839 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
4814 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), 4840 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -5377,7 +5403,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5377 SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A", 5403 SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
5378 ALC269_FIXUP_AMIC), 5404 ALC269_FIXUP_AMIC),
5379 SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC), 5405 SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
5380 SND_PCI_QUIRK(0x1043, 0x1113, "ASUS N63Jn", ALC269_FIXUP_AMIC),
5381 SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC), 5406 SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC),
5382 SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC), 5407 SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC),
5383 SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC), 5408 SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC),
@@ -5589,6 +5614,25 @@ enum {
5589 PINFIX_ASUS_A6RP, 5614 PINFIX_ASUS_A6RP,
5590}; 5615};
5591 5616
5617/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
5618static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec,
5619 const struct alc_fixup *fix, int action)
5620{
5621 struct alc_spec *spec = codec->spec;
5622 unsigned int val;
5623
5624 if (action != ALC_FIXUP_ACT_INIT)
5625 return;
5626 val = snd_hda_codec_read(codec, 0x0f, 0,
5627 AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
5628 if (!(val & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN)))
5629 val |= AC_PINCTL_IN_EN;
5630 val |= AC_PINCTL_VREF_50;
5631 snd_hda_codec_write(codec, 0x0f, 0,
5632 AC_VERB_SET_PIN_WIDGET_CONTROL, val);
5633 spec->keep_vref_in_automute = 1;
5634}
5635
5592static const struct alc_fixup alc861_fixups[] = { 5636static const struct alc_fixup alc861_fixups[] = {
5593 [PINFIX_FSC_AMILO_PI1505] = { 5637 [PINFIX_FSC_AMILO_PI1505] = {
5594 .type = ALC_FIXUP_PINS, 5638 .type = ALC_FIXUP_PINS,
@@ -5599,17 +5643,14 @@ static const struct alc_fixup alc861_fixups[] = {
5599 } 5643 }
5600 }, 5644 },
5601 [PINFIX_ASUS_A6RP] = { 5645 [PINFIX_ASUS_A6RP] = {
5602 .type = ALC_FIXUP_VERBS, 5646 .type = ALC_FIXUP_FUNC,
5603 .v.verbs = (const struct hda_verb[]) { 5647 .v.func = alc861_fixup_asus_amp_vref_0f,
5604 /* node 0x0f VREF seems controlling the master output */
5605 { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
5606 { }
5607 },
5608 }, 5648 },
5609}; 5649};
5610 5650
5611static const struct snd_pci_quirk alc861_fixup_tbl[] = { 5651static const struct snd_pci_quirk alc861_fixup_tbl[] = {
5612 SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP), 5652 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP),
5653 SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", PINFIX_ASUS_A6RP),
5613 SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP), 5654 SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
5614 SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505), 5655 SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
5615 {} 5656 {}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 948f0be2f4f3..6345df131a00 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5078,9 +5078,9 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
5078 spec->gpio_dir, spec->gpio_data); 5078 spec->gpio_dir, spec->gpio_data);
5079 } else { 5079 } else {
5080 notmtd_lvl = spec->gpio_led_polarity ? 5080 notmtd_lvl = spec->gpio_led_polarity ?
5081 AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_GRD; 5081 AC_PINCTL_VREF_50 : AC_PINCTL_VREF_GRD;
5082 muted_lvl = spec->gpio_led_polarity ? 5082 muted_lvl = spec->gpio_led_polarity ?
5083 AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ; 5083 AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_50;
5084 spec->vref_led = muted ? muted_lvl : notmtd_lvl; 5084 spec->vref_led = muted ? muted_lvl : notmtd_lvl;
5085 stac_vrefout_set(codec, spec->vref_mute_led_nid, 5085 stac_vrefout_set(codec, spec->vref_mute_led_nid,
5086 spec->vref_led); 5086 spec->vref_led);
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 03e63fed9caf..dff9a00ee8fb 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -199,6 +199,9 @@ struct via_spec {
199 unsigned int no_pin_power_ctl; 199 unsigned int no_pin_power_ctl;
200 enum VIA_HDA_CODEC codec_type; 200 enum VIA_HDA_CODEC codec_type;
201 201
202 /* analog low-power control */
203 bool alc_mode;
204
202 /* smart51 setup */ 205 /* smart51 setup */
203 unsigned int smart51_nums; 206 unsigned int smart51_nums;
204 hda_nid_t smart51_pins[2]; 207 hda_nid_t smart51_pins[2];
@@ -663,6 +666,9 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
663 /* init input-src */ 666 /* init input-src */
664 for (i = 0; i < spec->num_adc_nids; i++) { 667 for (i = 0; i < spec->num_adc_nids; i++) {
665 int adc_idx = spec->inputs[spec->cur_mux[i]].adc_idx; 668 int adc_idx = spec->inputs[spec->cur_mux[i]].adc_idx;
669 /* secondary ADCs must have the unique MUX */
670 if (i > 0 && !spec->mux_nids[i])
671 break;
666 if (spec->mux_nids[adc_idx]) { 672 if (spec->mux_nids[adc_idx]) {
667 int mux_idx = spec->inputs[spec->cur_mux[i]].mux_idx; 673 int mux_idx = spec->inputs[spec->cur_mux[i]].mux_idx;
668 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, 674 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0,
@@ -687,6 +693,15 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
687 } 693 }
688} 694}
689 695
696static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
697 unsigned int parm)
698{
699 if (snd_hda_codec_read(codec, nid, 0,
700 AC_VERB_GET_POWER_STATE, 0) == parm)
701 return;
702 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
703}
704
690static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid, 705static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
691 unsigned int *affected_parm) 706 unsigned int *affected_parm)
692{ 707{
@@ -709,7 +724,7 @@ static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
709 } else 724 } else
710 parm = AC_PWRST_D3; 725 parm = AC_PWRST_D3;
711 726
712 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm); 727 update_power_state(codec, nid, parm);
713} 728}
714 729
715static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol, 730static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol,
@@ -749,6 +764,7 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
749 return 0; 764 return 0;
750 spec->no_pin_power_ctl = val; 765 spec->no_pin_power_ctl = val;
751 set_widgets_power_state(codec); 766 set_widgets_power_state(codec);
767 analog_low_current_mode(codec);
752 return 1; 768 return 1;
753} 769}
754 770
@@ -1036,13 +1052,19 @@ static bool is_aa_path_mute(struct hda_codec *codec)
1036} 1052}
1037 1053
1038/* enter/exit analog low-current mode */ 1054/* enter/exit analog low-current mode */
1039static void analog_low_current_mode(struct hda_codec *codec) 1055static void __analog_low_current_mode(struct hda_codec *codec, bool force)
1040{ 1056{
1041 struct via_spec *spec = codec->spec; 1057 struct via_spec *spec = codec->spec;
1042 bool enable; 1058 bool enable;
1043 unsigned int verb, parm; 1059 unsigned int verb, parm;
1044 1060
1045 enable = is_aa_path_mute(codec) && (spec->opened_streams != 0); 1061 if (spec->no_pin_power_ctl)
1062 enable = false;
1063 else
1064 enable = is_aa_path_mute(codec) && !spec->opened_streams;
1065 if (enable == spec->alc_mode && !force)
1066 return;
1067 spec->alc_mode = enable;
1046 1068
1047 /* decide low current mode's verb & parameter */ 1069 /* decide low current mode's verb & parameter */
1048 switch (spec->codec_type) { 1070 switch (spec->codec_type) {
@@ -1074,6 +1096,11 @@ static void analog_low_current_mode(struct hda_codec *codec)
1074 snd_hda_codec_write(codec, codec->afg, 0, verb, parm); 1096 snd_hda_codec_write(codec, codec->afg, 0, verb, parm);
1075} 1097}
1076 1098
1099static void analog_low_current_mode(struct hda_codec *codec)
1100{
1101 return __analog_low_current_mode(codec, false);
1102}
1103
1077/* 1104/*
1078 * generic initialization of ADC, input mixers and output mixers 1105 * generic initialization of ADC, input mixers and output mixers
1079 */ 1106 */
@@ -1446,6 +1473,7 @@ static int via_build_controls(struct hda_codec *codec)
1446 struct snd_kcontrol *kctl; 1473 struct snd_kcontrol *kctl;
1447 int err, i; 1474 int err, i;
1448 1475
1476 spec->no_pin_power_ctl = 1;
1449 if (spec->set_widgets_power_state) 1477 if (spec->set_widgets_power_state)
1450 if (!via_clone_control(spec, &via_pin_power_ctl_enum)) 1478 if (!via_clone_control(spec, &via_pin_power_ctl_enum))
1451 return -ENOMEM; 1479 return -ENOMEM;
@@ -1499,10 +1527,6 @@ static int via_build_controls(struct hda_codec *codec)
1499 return err; 1527 return err;
1500 } 1528 }
1501 1529
1502 /* init power states */
1503 set_widgets_power_state(codec);
1504 analog_low_current_mode(codec);
1505
1506 via_free_kctls(codec); /* no longer needed */ 1530 via_free_kctls(codec); /* no longer needed */
1507 1531
1508 err = snd_hda_jack_add_kctls(codec, &spec->autocfg); 1532 err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
@@ -2295,10 +2319,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
2295 2319
2296 if (mux) { 2320 if (mux) {
2297 /* switch to D0 beofre change index */ 2321 /* switch to D0 beofre change index */
2298 if (snd_hda_codec_read(codec, mux, 0, 2322 update_power_state(codec, mux, AC_PWRST_D0);
2299 AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0)
2300 snd_hda_codec_write(codec, mux, 0,
2301 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
2302 snd_hda_codec_write(codec, mux, 0, 2323 snd_hda_codec_write(codec, mux, 0,
2303 AC_VERB_SET_CONNECT_SEL, 2324 AC_VERB_SET_CONNECT_SEL,
2304 spec->inputs[cur].mux_idx); 2325 spec->inputs[cur].mux_idx);
@@ -2776,6 +2797,10 @@ static int via_init(struct hda_codec *codec)
2776 for (i = 0; i < spec->num_iverbs; i++) 2797 for (i = 0; i < spec->num_iverbs; i++)
2777 snd_hda_sequence_write(codec, spec->init_verbs[i]); 2798 snd_hda_sequence_write(codec, spec->init_verbs[i]);
2778 2799
2800 /* init power states */
2801 set_widgets_power_state(codec);
2802 __analog_low_current_mode(codec, true);
2803
2779 via_auto_init_multi_out(codec); 2804 via_auto_init_multi_out(codec);
2780 via_auto_init_hp_out(codec); 2805 via_auto_init_hp_out(codec);
2781 via_auto_init_speaker_out(codec); 2806 via_auto_init_speaker_out(codec);
@@ -2922,9 +2947,9 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
2922 if (imux_is_smixer) 2947 if (imux_is_smixer)
2923 parm = AC_PWRST_D0; 2948 parm = AC_PWRST_D0;
2924 /* SW0 (17h), AIW 0/1 (13h/14h) */ 2949 /* SW0 (17h), AIW 0/1 (13h/14h) */
2925 snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm); 2950 update_power_state(codec, 0x17, parm);
2926 snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 2951 update_power_state(codec, 0x13, parm);
2927 snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm); 2952 update_power_state(codec, 0x14, parm);
2928 2953
2929 /* outputs */ 2954 /* outputs */
2930 /* PW0 (19h), SW1 (18h), AOW1 (11h) */ 2955 /* PW0 (19h), SW1 (18h), AOW1 (11h) */
@@ -2932,8 +2957,8 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
2932 set_pin_power_state(codec, 0x19, &parm); 2957 set_pin_power_state(codec, 0x19, &parm);
2933 if (spec->smart51_enabled) 2958 if (spec->smart51_enabled)
2934 set_pin_power_state(codec, 0x1b, &parm); 2959 set_pin_power_state(codec, 0x1b, &parm);
2935 snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 2960 update_power_state(codec, 0x18, parm);
2936 snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 2961 update_power_state(codec, 0x11, parm);
2937 2962
2938 /* PW6 (22h), SW2 (26h), AOW2 (24h) */ 2963 /* PW6 (22h), SW2 (26h), AOW2 (24h) */
2939 if (is_8ch) { 2964 if (is_8ch) {
@@ -2941,20 +2966,16 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
2941 set_pin_power_state(codec, 0x22, &parm); 2966 set_pin_power_state(codec, 0x22, &parm);
2942 if (spec->smart51_enabled) 2967 if (spec->smart51_enabled)
2943 set_pin_power_state(codec, 0x1a, &parm); 2968 set_pin_power_state(codec, 0x1a, &parm);
2944 snd_hda_codec_write(codec, 0x26, 0, 2969 update_power_state(codec, 0x26, parm);
2945 AC_VERB_SET_POWER_STATE, parm); 2970 update_power_state(codec, 0x24, parm);
2946 snd_hda_codec_write(codec, 0x24, 0,
2947 AC_VERB_SET_POWER_STATE, parm);
2948 } else if (codec->vendor_id == 0x11064397) { 2971 } else if (codec->vendor_id == 0x11064397) {
2949 /* PW7(23h), SW2(27h), AOW2(25h) */ 2972 /* PW7(23h), SW2(27h), AOW2(25h) */
2950 parm = AC_PWRST_D3; 2973 parm = AC_PWRST_D3;
2951 set_pin_power_state(codec, 0x23, &parm); 2974 set_pin_power_state(codec, 0x23, &parm);
2952 if (spec->smart51_enabled) 2975 if (spec->smart51_enabled)
2953 set_pin_power_state(codec, 0x1a, &parm); 2976 set_pin_power_state(codec, 0x1a, &parm);
2954 snd_hda_codec_write(codec, 0x27, 0, 2977 update_power_state(codec, 0x27, parm);
2955 AC_VERB_SET_POWER_STATE, parm); 2978 update_power_state(codec, 0x25, parm);
2956 snd_hda_codec_write(codec, 0x25, 0,
2957 AC_VERB_SET_POWER_STATE, parm);
2958 } 2979 }
2959 2980
2960 /* PW 3/4/7 (1ch/1dh/23h) */ 2981 /* PW 3/4/7 (1ch/1dh/23h) */
@@ -2966,17 +2987,13 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
2966 set_pin_power_state(codec, 0x23, &parm); 2987 set_pin_power_state(codec, 0x23, &parm);
2967 2988
2968 /* MW0 (16h), Sw3 (27h), AOW 0/3 (10h/25h) */ 2989 /* MW0 (16h), Sw3 (27h), AOW 0/3 (10h/25h) */
2969 snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE, 2990 update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
2970 imux_is_smixer ? AC_PWRST_D0 : parm); 2991 update_power_state(codec, 0x10, parm);
2971 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
2972 if (is_8ch) { 2992 if (is_8ch) {
2973 snd_hda_codec_write(codec, 0x25, 0, 2993 update_power_state(codec, 0x25, parm);
2974 AC_VERB_SET_POWER_STATE, parm); 2994 update_power_state(codec, 0x27, parm);
2975 snd_hda_codec_write(codec, 0x27, 0,
2976 AC_VERB_SET_POWER_STATE, parm);
2977 } else if (codec->vendor_id == 0x11064397 && spec->hp_independent_mode) 2995 } else if (codec->vendor_id == 0x11064397 && spec->hp_independent_mode)
2978 snd_hda_codec_write(codec, 0x25, 0, 2996 update_power_state(codec, 0x25, parm);
2979 AC_VERB_SET_POWER_STATE, parm);
2980} 2997}
2981 2998
2982static int patch_vt1708S(struct hda_codec *codec); 2999static int patch_vt1708S(struct hda_codec *codec);
@@ -3149,10 +3166,10 @@ static void set_widgets_power_state_vt1702(struct hda_codec *codec)
3149 if (imux_is_smixer) 3166 if (imux_is_smixer)
3150 parm = AC_PWRST_D0; /* SW0 (13h) = stereo mixer (idx 3) */ 3167 parm = AC_PWRST_D0; /* SW0 (13h) = stereo mixer (idx 3) */
3151 /* SW0 (13h), AIW 0/1/2 (12h/1fh/20h) */ 3168 /* SW0 (13h), AIW 0/1/2 (12h/1fh/20h) */
3152 snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 3169 update_power_state(codec, 0x13, parm);
3153 snd_hda_codec_write(codec, 0x12, 0, AC_VERB_SET_POWER_STATE, parm); 3170 update_power_state(codec, 0x12, parm);
3154 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3171 update_power_state(codec, 0x1f, parm);
3155 snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_POWER_STATE, parm); 3172 update_power_state(codec, 0x20, parm);
3156 3173
3157 /* outputs */ 3174 /* outputs */
3158 /* PW 3/4 (16h/17h) */ 3175 /* PW 3/4 (16h/17h) */
@@ -3160,10 +3177,9 @@ static void set_widgets_power_state_vt1702(struct hda_codec *codec)
3160 set_pin_power_state(codec, 0x17, &parm); 3177 set_pin_power_state(codec, 0x17, &parm);
3161 set_pin_power_state(codec, 0x16, &parm); 3178 set_pin_power_state(codec, 0x16, &parm);
3162 /* MW0 (1ah), AOW 0/1 (10h/1dh) */ 3179 /* MW0 (1ah), AOW 0/1 (10h/1dh) */
3163 snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, 3180 update_power_state(codec, 0x1a, imux_is_smixer ? AC_PWRST_D0 : parm);
3164 imux_is_smixer ? AC_PWRST_D0 : parm); 3181 update_power_state(codec, 0x10, parm);
3165 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3182 update_power_state(codec, 0x1d, parm);
3166 snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm);
3167} 3183}
3168 3184
3169static int patch_vt1702(struct hda_codec *codec) 3185static int patch_vt1702(struct hda_codec *codec)
@@ -3228,52 +3244,48 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
3228 if (imux_is_smixer) 3244 if (imux_is_smixer)
3229 parm = AC_PWRST_D0; 3245 parm = AC_PWRST_D0;
3230 /* MUX6/7 (1eh/1fh), AIW 0/1 (10h/11h) */ 3246 /* MUX6/7 (1eh/1fh), AIW 0/1 (10h/11h) */
3231 snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3247 update_power_state(codec, 0x1e, parm);
3232 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3248 update_power_state(codec, 0x1f, parm);
3233 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3249 update_power_state(codec, 0x10, parm);
3234 snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3250 update_power_state(codec, 0x11, parm);
3235 3251
3236 /* outputs */ 3252 /* outputs */
3237 /* PW3 (27h), MW2 (1ah), AOW3 (bh) */ 3253 /* PW3 (27h), MW2 (1ah), AOW3 (bh) */
3238 parm = AC_PWRST_D3; 3254 parm = AC_PWRST_D3;
3239 set_pin_power_state(codec, 0x27, &parm); 3255 set_pin_power_state(codec, 0x27, &parm);
3240 snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm); 3256 update_power_state(codec, 0x1a, parm);
3241 snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm); 3257 update_power_state(codec, 0xb, parm);
3242 3258
3243 /* PW2 (26h), AOW2 (ah) */ 3259 /* PW2 (26h), AOW2 (ah) */
3244 parm = AC_PWRST_D3; 3260 parm = AC_PWRST_D3;
3245 set_pin_power_state(codec, 0x26, &parm); 3261 set_pin_power_state(codec, 0x26, &parm);
3246 if (spec->smart51_enabled) 3262 if (spec->smart51_enabled)
3247 set_pin_power_state(codec, 0x2b, &parm); 3263 set_pin_power_state(codec, 0x2b, &parm);
3248 snd_hda_codec_write(codec, 0xa, 0, AC_VERB_SET_POWER_STATE, parm); 3264 update_power_state(codec, 0xa, parm);
3249 3265
3250 /* PW0 (24h), AOW0 (8h) */ 3266 /* PW0 (24h), AOW0 (8h) */
3251 parm = AC_PWRST_D3; 3267 parm = AC_PWRST_D3;
3252 set_pin_power_state(codec, 0x24, &parm); 3268 set_pin_power_state(codec, 0x24, &parm);
3253 if (!spec->hp_independent_mode) /* check for redirected HP */ 3269 if (!spec->hp_independent_mode) /* check for redirected HP */
3254 set_pin_power_state(codec, 0x28, &parm); 3270 set_pin_power_state(codec, 0x28, &parm);
3255 snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm); 3271 update_power_state(codec, 0x8, parm);
3256 /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */ 3272 /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
3257 snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE, 3273 update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
3258 imux_is_smixer ? AC_PWRST_D0 : parm);
3259 3274
3260 /* PW1 (25h), AOW1 (9h) */ 3275 /* PW1 (25h), AOW1 (9h) */
3261 parm = AC_PWRST_D3; 3276 parm = AC_PWRST_D3;
3262 set_pin_power_state(codec, 0x25, &parm); 3277 set_pin_power_state(codec, 0x25, &parm);
3263 if (spec->smart51_enabled) 3278 if (spec->smart51_enabled)
3264 set_pin_power_state(codec, 0x2a, &parm); 3279 set_pin_power_state(codec, 0x2a, &parm);
3265 snd_hda_codec_write(codec, 0x9, 0, AC_VERB_SET_POWER_STATE, parm); 3280 update_power_state(codec, 0x9, parm);
3266 3281
3267 if (spec->hp_independent_mode) { 3282 if (spec->hp_independent_mode) {
3268 /* PW4 (28h), MW3 (1bh), MUX1(34h), AOW4 (ch) */ 3283 /* PW4 (28h), MW3 (1bh), MUX1(34h), AOW4 (ch) */
3269 parm = AC_PWRST_D3; 3284 parm = AC_PWRST_D3;
3270 set_pin_power_state(codec, 0x28, &parm); 3285 set_pin_power_state(codec, 0x28, &parm);
3271 snd_hda_codec_write(codec, 0x1b, 0, 3286 update_power_state(codec, 0x1b, parm);
3272 AC_VERB_SET_POWER_STATE, parm); 3287 update_power_state(codec, 0x34, parm);
3273 snd_hda_codec_write(codec, 0x34, 0, 3288 update_power_state(codec, 0xc, parm);
3274 AC_VERB_SET_POWER_STATE, parm);
3275 snd_hda_codec_write(codec, 0xc, 0,
3276 AC_VERB_SET_POWER_STATE, parm);
3277 } 3289 }
3278} 3290}
3279 3291
@@ -3433,8 +3445,8 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3433 if (imux_is_smixer) 3445 if (imux_is_smixer)
3434 parm = AC_PWRST_D0; 3446 parm = AC_PWRST_D0;
3435 /* SW0 (17h), AIW0(13h) */ 3447 /* SW0 (17h), AIW0(13h) */
3436 snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm); 3448 update_power_state(codec, 0x17, parm);
3437 snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 3449 update_power_state(codec, 0x13, parm);
3438 3450
3439 parm = AC_PWRST_D3; 3451 parm = AC_PWRST_D3;
3440 set_pin_power_state(codec, 0x1e, &parm); 3452 set_pin_power_state(codec, 0x1e, &parm);
@@ -3442,12 +3454,11 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3442 if (spec->dmic_enabled) 3454 if (spec->dmic_enabled)
3443 set_pin_power_state(codec, 0x22, &parm); 3455 set_pin_power_state(codec, 0x22, &parm);
3444 else 3456 else
3445 snd_hda_codec_write(codec, 0x22, 0, 3457 update_power_state(codec, 0x22, AC_PWRST_D3);
3446 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
3447 3458
3448 /* SW2(26h), AIW1(14h) */ 3459 /* SW2(26h), AIW1(14h) */
3449 snd_hda_codec_write(codec, 0x26, 0, AC_VERB_SET_POWER_STATE, parm); 3460 update_power_state(codec, 0x26, parm);
3450 snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm); 3461 update_power_state(codec, 0x14, parm);
3451 3462
3452 /* outputs */ 3463 /* outputs */
3453 /* PW0 (19h), SW1 (18h), AOW1 (11h) */ 3464 /* PW0 (19h), SW1 (18h), AOW1 (11h) */
@@ -3456,8 +3467,8 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3456 /* Smart 5.1 PW2(1bh) */ 3467 /* Smart 5.1 PW2(1bh) */
3457 if (spec->smart51_enabled) 3468 if (spec->smart51_enabled)
3458 set_pin_power_state(codec, 0x1b, &parm); 3469 set_pin_power_state(codec, 0x1b, &parm);
3459 snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 3470 update_power_state(codec, 0x18, parm);
3460 snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3471 update_power_state(codec, 0x11, parm);
3461 3472
3462 /* PW7 (23h), SW3 (27h), AOW3 (25h) */ 3473 /* PW7 (23h), SW3 (27h), AOW3 (25h) */
3463 parm = AC_PWRST_D3; 3474 parm = AC_PWRST_D3;
@@ -3465,12 +3476,12 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3465 /* Smart 5.1 PW1(1ah) */ 3476 /* Smart 5.1 PW1(1ah) */
3466 if (spec->smart51_enabled) 3477 if (spec->smart51_enabled)
3467 set_pin_power_state(codec, 0x1a, &parm); 3478 set_pin_power_state(codec, 0x1a, &parm);
3468 snd_hda_codec_write(codec, 0x27, 0, AC_VERB_SET_POWER_STATE, parm); 3479 update_power_state(codec, 0x27, parm);
3469 3480
3470 /* Smart 5.1 PW5(1eh) */ 3481 /* Smart 5.1 PW5(1eh) */
3471 if (spec->smart51_enabled) 3482 if (spec->smart51_enabled)
3472 set_pin_power_state(codec, 0x1e, &parm); 3483 set_pin_power_state(codec, 0x1e, &parm);
3473 snd_hda_codec_write(codec, 0x25, 0, AC_VERB_SET_POWER_STATE, parm); 3484 update_power_state(codec, 0x25, parm);
3474 3485
3475 /* Mono out */ 3486 /* Mono out */
3476 /* SW4(28h)->MW1(29h)-> PW12 (2ah)*/ 3487 /* SW4(28h)->MW1(29h)-> PW12 (2ah)*/
@@ -3486,9 +3497,9 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3486 mono_out = 1; 3497 mono_out = 1;
3487 } 3498 }
3488 parm = mono_out ? AC_PWRST_D0 : AC_PWRST_D3; 3499 parm = mono_out ? AC_PWRST_D0 : AC_PWRST_D3;
3489 snd_hda_codec_write(codec, 0x28, 0, AC_VERB_SET_POWER_STATE, parm); 3500 update_power_state(codec, 0x28, parm);
3490 snd_hda_codec_write(codec, 0x29, 0, AC_VERB_SET_POWER_STATE, parm); 3501 update_power_state(codec, 0x29, parm);
3491 snd_hda_codec_write(codec, 0x2a, 0, AC_VERB_SET_POWER_STATE, parm); 3502 update_power_state(codec, 0x2a, parm);
3492 3503
3493 /* PW 3/4 (1ch/1dh) */ 3504 /* PW 3/4 (1ch/1dh) */
3494 parm = AC_PWRST_D3; 3505 parm = AC_PWRST_D3;
@@ -3496,15 +3507,12 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
3496 set_pin_power_state(codec, 0x1d, &parm); 3507 set_pin_power_state(codec, 0x1d, &parm);
3497 /* HP Independent Mode, power on AOW3 */ 3508 /* HP Independent Mode, power on AOW3 */
3498 if (spec->hp_independent_mode) 3509 if (spec->hp_independent_mode)
3499 snd_hda_codec_write(codec, 0x25, 0, 3510 update_power_state(codec, 0x25, parm);
3500 AC_VERB_SET_POWER_STATE, parm);
3501 3511
3502 /* force to D0 for internal Speaker */ 3512 /* force to D0 for internal Speaker */
3503 /* MW0 (16h), AOW0 (10h) */ 3513 /* MW0 (16h), AOW0 (10h) */
3504 snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE, 3514 update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
3505 imux_is_smixer ? AC_PWRST_D0 : parm); 3515 update_power_state(codec, 0x10, mono_out ? AC_PWRST_D0 : parm);
3506 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE,
3507 mono_out ? AC_PWRST_D0 : parm);
3508} 3516}
3509 3517
3510static int patch_vt1716S(struct hda_codec *codec) 3518static int patch_vt1716S(struct hda_codec *codec)
@@ -3580,54 +3588,45 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
3580 set_pin_power_state(codec, 0x2b, &parm); 3588 set_pin_power_state(codec, 0x2b, &parm);
3581 parm = AC_PWRST_D0; 3589 parm = AC_PWRST_D0;
3582 /* MUX9/10 (1eh/1fh), AIW 0/1 (10h/11h) */ 3590 /* MUX9/10 (1eh/1fh), AIW 0/1 (10h/11h) */
3583 snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3591 update_power_state(codec, 0x1e, parm);
3584 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3592 update_power_state(codec, 0x1f, parm);
3585 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3593 update_power_state(codec, 0x10, parm);
3586 snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3594 update_power_state(codec, 0x11, parm);
3587 3595
3588 /* outputs */ 3596 /* outputs */
3589 /* AOW0 (8h)*/ 3597 /* AOW0 (8h)*/
3590 snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm); 3598 update_power_state(codec, 0x8, parm);
3591 3599
3592 if (spec->codec_type == VT1802) { 3600 if (spec->codec_type == VT1802) {
3593 /* PW4 (28h), MW4 (18h), MUX4(38h) */ 3601 /* PW4 (28h), MW4 (18h), MUX4(38h) */
3594 parm = AC_PWRST_D3; 3602 parm = AC_PWRST_D3;
3595 set_pin_power_state(codec, 0x28, &parm); 3603 set_pin_power_state(codec, 0x28, &parm);
3596 snd_hda_codec_write(codec, 0x18, 0, 3604 update_power_state(codec, 0x18, parm);
3597 AC_VERB_SET_POWER_STATE, parm); 3605 update_power_state(codec, 0x38, parm);
3598 snd_hda_codec_write(codec, 0x38, 0,
3599 AC_VERB_SET_POWER_STATE, parm);
3600 } else { 3606 } else {
3601 /* PW4 (26h), MW4 (1ch), MUX4(37h) */ 3607 /* PW4 (26h), MW4 (1ch), MUX4(37h) */
3602 parm = AC_PWRST_D3; 3608 parm = AC_PWRST_D3;
3603 set_pin_power_state(codec, 0x26, &parm); 3609 set_pin_power_state(codec, 0x26, &parm);
3604 snd_hda_codec_write(codec, 0x1c, 0, 3610 update_power_state(codec, 0x1c, parm);
3605 AC_VERB_SET_POWER_STATE, parm); 3611 update_power_state(codec, 0x37, parm);
3606 snd_hda_codec_write(codec, 0x37, 0,
3607 AC_VERB_SET_POWER_STATE, parm);
3608 } 3612 }
3609 3613
3610 if (spec->codec_type == VT1802) { 3614 if (spec->codec_type == VT1802) {
3611 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */ 3615 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
3612 parm = AC_PWRST_D3; 3616 parm = AC_PWRST_D3;
3613 set_pin_power_state(codec, 0x25, &parm); 3617 set_pin_power_state(codec, 0x25, &parm);
3614 snd_hda_codec_write(codec, 0x15, 0, 3618 update_power_state(codec, 0x15, parm);
3615 AC_VERB_SET_POWER_STATE, parm); 3619 update_power_state(codec, 0x35, parm);
3616 snd_hda_codec_write(codec, 0x35, 0,
3617 AC_VERB_SET_POWER_STATE, parm);
3618 } else { 3620 } else {
3619 /* PW1 (25h), MW1 (19h), MUX1(35h), AOW1 (9h) */ 3621 /* PW1 (25h), MW1 (19h), MUX1(35h), AOW1 (9h) */
3620 parm = AC_PWRST_D3; 3622 parm = AC_PWRST_D3;
3621 set_pin_power_state(codec, 0x25, &parm); 3623 set_pin_power_state(codec, 0x25, &parm);
3622 snd_hda_codec_write(codec, 0x19, 0, 3624 update_power_state(codec, 0x19, parm);
3623 AC_VERB_SET_POWER_STATE, parm); 3625 update_power_state(codec, 0x35, parm);
3624 snd_hda_codec_write(codec, 0x35, 0,
3625 AC_VERB_SET_POWER_STATE, parm);
3626 } 3626 }
3627 3627
3628 if (spec->hp_independent_mode) 3628 if (spec->hp_independent_mode)
3629 snd_hda_codec_write(codec, 0x9, 0, 3629 update_power_state(codec, 0x9, AC_PWRST_D0);
3630 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3631 3630
3632 /* Class-D */ 3631 /* Class-D */
3633 /* PW0 (24h), MW0(18h/14h), MUX0(34h) */ 3632 /* PW0 (24h), MW0(18h/14h), MUX0(34h) */
@@ -3637,12 +3636,10 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
3637 set_pin_power_state(codec, 0x24, &parm); 3636 set_pin_power_state(codec, 0x24, &parm);
3638 parm = present ? AC_PWRST_D3 : AC_PWRST_D0; 3637 parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
3639 if (spec->codec_type == VT1802) 3638 if (spec->codec_type == VT1802)
3640 snd_hda_codec_write(codec, 0x14, 0, 3639 update_power_state(codec, 0x14, parm);
3641 AC_VERB_SET_POWER_STATE, parm);
3642 else 3640 else
3643 snd_hda_codec_write(codec, 0x18, 0, 3641 update_power_state(codec, 0x18, parm);
3644 AC_VERB_SET_POWER_STATE, parm); 3642 update_power_state(codec, 0x34, parm);
3645 snd_hda_codec_write(codec, 0x34, 0, AC_VERB_SET_POWER_STATE, parm);
3646 3643
3647 /* Mono Out */ 3644 /* Mono Out */
3648 present = snd_hda_jack_detect(codec, 0x26); 3645 present = snd_hda_jack_detect(codec, 0x26);
@@ -3650,28 +3647,20 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
3650 parm = present ? AC_PWRST_D3 : AC_PWRST_D0; 3647 parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
3651 if (spec->codec_type == VT1802) { 3648 if (spec->codec_type == VT1802) {
3652 /* PW15 (33h), MW8(1ch), MUX8(3ch) */ 3649 /* PW15 (33h), MW8(1ch), MUX8(3ch) */
3653 snd_hda_codec_write(codec, 0x33, 0, 3650 update_power_state(codec, 0x33, parm);
3654 AC_VERB_SET_POWER_STATE, parm); 3651 update_power_state(codec, 0x1c, parm);
3655 snd_hda_codec_write(codec, 0x1c, 0, 3652 update_power_state(codec, 0x3c, parm);
3656 AC_VERB_SET_POWER_STATE, parm);
3657 snd_hda_codec_write(codec, 0x3c, 0,
3658 AC_VERB_SET_POWER_STATE, parm);
3659 } else { 3653 } else {
3660 /* PW15 (31h), MW8(17h), MUX8(3bh) */ 3654 /* PW15 (31h), MW8(17h), MUX8(3bh) */
3661 snd_hda_codec_write(codec, 0x31, 0, 3655 update_power_state(codec, 0x31, parm);
3662 AC_VERB_SET_POWER_STATE, parm); 3656 update_power_state(codec, 0x17, parm);
3663 snd_hda_codec_write(codec, 0x17, 0, 3657 update_power_state(codec, 0x3b, parm);
3664 AC_VERB_SET_POWER_STATE, parm);
3665 snd_hda_codec_write(codec, 0x3b, 0,
3666 AC_VERB_SET_POWER_STATE, parm);
3667 } 3658 }
3668 /* MW9 (21h) */ 3659 /* MW9 (21h) */
3669 if (imux_is_smixer || !is_aa_path_mute(codec)) 3660 if (imux_is_smixer || !is_aa_path_mute(codec))
3670 snd_hda_codec_write(codec, 0x21, 0, 3661 update_power_state(codec, 0x21, AC_PWRST_D0);
3671 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3672 else 3662 else
3673 snd_hda_codec_write(codec, 0x21, 0, 3663 update_power_state(codec, 0x21, AC_PWRST_D3);
3674 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
3675} 3664}
3676 3665
3677/* patch for vt2002P */ 3666/* patch for vt2002P */
@@ -3731,30 +3720,28 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
3731 set_pin_power_state(codec, 0x2b, &parm); 3720 set_pin_power_state(codec, 0x2b, &parm);
3732 parm = AC_PWRST_D0; 3721 parm = AC_PWRST_D0;
3733 /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */ 3722 /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */
3734 snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3723 update_power_state(codec, 0x1e, parm);
3735 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3724 update_power_state(codec, 0x1f, parm);
3736 snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3725 update_power_state(codec, 0x10, parm);
3737 snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3726 update_power_state(codec, 0x11, parm);
3738 3727
3739 /* outputs */ 3728 /* outputs */
3740 /* AOW0 (8h)*/ 3729 /* AOW0 (8h)*/
3741 snd_hda_codec_write(codec, 0x8, 0, 3730 update_power_state(codec, 0x8, AC_PWRST_D0);
3742 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3743 3731
3744 /* PW4 (28h), MW4 (18h), MUX4(38h) */ 3732 /* PW4 (28h), MW4 (18h), MUX4(38h) */
3745 parm = AC_PWRST_D3; 3733 parm = AC_PWRST_D3;
3746 set_pin_power_state(codec, 0x28, &parm); 3734 set_pin_power_state(codec, 0x28, &parm);
3747 snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 3735 update_power_state(codec, 0x18, parm);
3748 snd_hda_codec_write(codec, 0x38, 0, AC_VERB_SET_POWER_STATE, parm); 3736 update_power_state(codec, 0x38, parm);
3749 3737
3750 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */ 3738 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
3751 parm = AC_PWRST_D3; 3739 parm = AC_PWRST_D3;
3752 set_pin_power_state(codec, 0x25, &parm); 3740 set_pin_power_state(codec, 0x25, &parm);
3753 snd_hda_codec_write(codec, 0x15, 0, AC_VERB_SET_POWER_STATE, parm); 3741 update_power_state(codec, 0x15, parm);
3754 snd_hda_codec_write(codec, 0x35, 0, AC_VERB_SET_POWER_STATE, parm); 3742 update_power_state(codec, 0x35, parm);
3755 if (spec->hp_independent_mode) 3743 if (spec->hp_independent_mode)
3756 snd_hda_codec_write(codec, 0x9, 0, 3744 update_power_state(codec, 0x9, AC_PWRST_D0);
3757 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3758 3745
3759 /* Internal Speaker */ 3746 /* Internal Speaker */
3760 /* PW0 (24h), MW0(14h), MUX0(34h) */ 3747 /* PW0 (24h), MW0(14h), MUX0(34h) */
@@ -3763,15 +3750,11 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
3763 parm = AC_PWRST_D3; 3750 parm = AC_PWRST_D3;
3764 set_pin_power_state(codec, 0x24, &parm); 3751 set_pin_power_state(codec, 0x24, &parm);
3765 if (present) { 3752 if (present) {
3766 snd_hda_codec_write(codec, 0x14, 0, 3753 update_power_state(codec, 0x14, AC_PWRST_D3);
3767 AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3754 update_power_state(codec, 0x34, AC_PWRST_D3);
3768 snd_hda_codec_write(codec, 0x34, 0,
3769 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
3770 } else { 3755 } else {
3771 snd_hda_codec_write(codec, 0x14, 0, 3756 update_power_state(codec, 0x14, AC_PWRST_D0);
3772 AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3757 update_power_state(codec, 0x34, AC_PWRST_D0);
3773 snd_hda_codec_write(codec, 0x34, 0,
3774 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3775 } 3758 }
3776 3759
3777 3760
@@ -3782,26 +3765,20 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
3782 parm = AC_PWRST_D3; 3765 parm = AC_PWRST_D3;
3783 set_pin_power_state(codec, 0x31, &parm); 3766 set_pin_power_state(codec, 0x31, &parm);
3784 if (present) { 3767 if (present) {
3785 snd_hda_codec_write(codec, 0x1c, 0, 3768 update_power_state(codec, 0x1c, AC_PWRST_D3);
3786 AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3769 update_power_state(codec, 0x3c, AC_PWRST_D3);
3787 snd_hda_codec_write(codec, 0x3c, 0, 3770 update_power_state(codec, 0x3e, AC_PWRST_D3);
3788 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
3789 snd_hda_codec_write(codec, 0x3e, 0,
3790 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
3791 } else { 3771 } else {
3792 snd_hda_codec_write(codec, 0x1c, 0, 3772 update_power_state(codec, 0x1c, AC_PWRST_D0);
3793 AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3773 update_power_state(codec, 0x3c, AC_PWRST_D0);
3794 snd_hda_codec_write(codec, 0x3c, 0, 3774 update_power_state(codec, 0x3e, AC_PWRST_D0);
3795 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3796 snd_hda_codec_write(codec, 0x3e, 0,
3797 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
3798 } 3775 }
3799 3776
3800 /* PW15 (33h), MW15 (1dh), MUX15(3dh) */ 3777 /* PW15 (33h), MW15 (1dh), MUX15(3dh) */
3801 parm = AC_PWRST_D3; 3778 parm = AC_PWRST_D3;
3802 set_pin_power_state(codec, 0x33, &parm); 3779 set_pin_power_state(codec, 0x33, &parm);
3803 snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm); 3780 update_power_state(codec, 0x1d, parm);
3804 snd_hda_codec_write(codec, 0x3d, 0, AC_VERB_SET_POWER_STATE, parm); 3781 update_power_state(codec, 0x3d, parm);
3805 3782
3806} 3783}
3807 3784
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 9f3b01bb72c8..e0a4263baa20 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2102,6 +2102,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
2102 }, 2102 },
2103 { 2103 {
2104 .subvendor = 0x161f, 2104 .subvendor = 0x161f,
2105 .subdevice = 0x202f,
2106 .name = "Gateway M520",
2107 .type = AC97_TUNE_INV_EAPD
2108 },
2109 {
2110 .subvendor = 0x161f,
2105 .subdevice = 0x203a, 2111 .subdevice = 0x203a,
2106 .name = "Gateway 4525GZ", /* AD1981B */ 2112 .name = "Gateway 4525GZ", /* AD1981B */
2107 .type = AC97_TUNE_INV_EAPD 2113 .type = AC97_TUNE_INV_EAPD
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 26c7e8bcb229..c0dbb52d45be 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -618,9 +618,12 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
618 mutex_lock(&chip->mutex); 618 mutex_lock(&chip->mutex);
619 reg = oxygen_read_ac97(chip, codec, index); 619 reg = oxygen_read_ac97(chip, codec, index);
620 mutex_unlock(&chip->mutex); 620 mutex_unlock(&chip->mutex);
621 value->value.integer.value[0] = 31 - (reg & 0x1f); 621 if (!stereo) {
622 if (stereo) 622 value->value.integer.value[0] = 31 - (reg & 0x1f);
623 value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f); 623 } else {
624 value->value.integer.value[0] = 31 - ((reg >> 8) & 0x1f);
625 value->value.integer.value[1] = 31 - (reg & 0x1f);
626 }
624 return 0; 627 return 0;
625} 628}
626 629
@@ -636,14 +639,14 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
636 639
637 mutex_lock(&chip->mutex); 640 mutex_lock(&chip->mutex);
638 oldreg = oxygen_read_ac97(chip, codec, index); 641 oldreg = oxygen_read_ac97(chip, codec, index);
639 newreg = oldreg; 642 if (!stereo) {
640 newreg = (newreg & ~0x1f) | 643 newreg = oldreg & ~0x1f;
641 (31 - (value->value.integer.value[0] & 0x1f)); 644 newreg |= 31 - (value->value.integer.value[0] & 0x1f);
642 if (stereo) 645 } else {
643 newreg = (newreg & ~0x1f00) | 646 newreg = oldreg & ~0x1f1f;
644 ((31 - (value->value.integer.value[1] & 0x1f)) << 8); 647 newreg |= (31 - (value->value.integer.value[0] & 0x1f)) << 8;
645 else 648 newreg |= 31 - (value->value.integer.value[1] & 0x1f);
646 newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8); 649 }
647 change = newreg != oldreg; 650 change = newreg != oldreg;
648 if (change) 651 if (change)
649 oxygen_write_ac97(chip, codec, index, newreg); 652 oxygen_write_ac97(chip, codec, index, newreg);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 9d38db8f1919..78979b3e0e95 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1113,7 +1113,7 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
1113 priv->config[id].mmcc &= 0xC0; 1113 priv->config[id].mmcc &= 0xC0;
1114 priv->config[id].mmcc |= cs42l73_mclk_coeffs[mclk_coeff].mmcc; 1114 priv->config[id].mmcc |= cs42l73_mclk_coeffs[mclk_coeff].mmcc;
1115 priv->config[id].spc &= 0xFC; 1115 priv->config[id].spc &= 0xFC;
1116 priv->config[id].spc &= MCK_SCLK_64FS; 1116 priv->config[id].spc |= MCK_SCLK_MCLK;
1117 } else { 1117 } else {
1118 /* CS42L73 Slave */ 1118 /* CS42L73 Slave */
1119 priv->config[id].spc &= 0xFC; 1119 priv->config[id].spc &= 0xFC;
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 66f0611e68b6..89f2af77b1c3 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1405,6 +1405,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
1405 1405
1406 case SND_SOC_BIAS_OFF: 1406 case SND_SOC_BIAS_OFF:
1407 regcache_cache_only(wm5100->regmap, true); 1407 regcache_cache_only(wm5100->regmap, true);
1408 regcache_mark_dirty(wm5100->regmap);
1408 if (wm5100->pdata.ldo_ena) 1409 if (wm5100->pdata.ldo_ena)
1409 gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0); 1410 gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
1410 regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies), 1411 regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
@@ -2183,6 +2184,7 @@ static void wm5100_micd_irq(struct snd_soc_codec *codec)
2183 if (wm5100->jack_detecting) { 2184 if (wm5100->jack_detecting) {
2184 dev_dbg(codec->dev, "Microphone detected\n"); 2185 dev_dbg(codec->dev, "Microphone detected\n");
2185 wm5100->jack_mic = true; 2186 wm5100->jack_mic = true;
2187 wm5100->jack_detecting = false;
2186 snd_soc_jack_report(wm5100->jack, 2188 snd_soc_jack_report(wm5100->jack,
2187 SND_JACK_HEADSET, 2189 SND_JACK_HEADSET,
2188 SND_JACK_HEADSET | SND_JACK_BTN_0); 2190 SND_JACK_HEADSET | SND_JACK_BTN_0);
@@ -2221,6 +2223,7 @@ static void wm5100_micd_irq(struct snd_soc_codec *codec)
2221 SND_JACK_BTN_0); 2223 SND_JACK_BTN_0);
2222 } else if (wm5100->jack_detecting) { 2224 } else if (wm5100->jack_detecting) {
2223 dev_dbg(codec->dev, "Headphone detected\n"); 2225 dev_dbg(codec->dev, "Headphone detected\n");
2226 wm5100->jack_detecting = false;
2224 snd_soc_jack_report(wm5100->jack, SND_JACK_HEADPHONE, 2227 snd_soc_jack_report(wm5100->jack, SND_JACK_HEADPHONE,
2225 SND_JACK_HEADPHONE); 2228 SND_JACK_HEADPHONE);
2226 2229
@@ -2610,6 +2613,13 @@ static const struct regmap_config wm5100_regmap = {
2610 .cache_type = REGCACHE_RBTREE, 2613 .cache_type = REGCACHE_RBTREE,
2611}; 2614};
2612 2615
2616static const unsigned int wm5100_mic_ctrl_reg[] = {
2617 WM5100_IN1L_CONTROL,
2618 WM5100_IN2L_CONTROL,
2619 WM5100_IN3L_CONTROL,
2620 WM5100_IN4L_CONTROL,
2621};
2622
2613static __devinit int wm5100_i2c_probe(struct i2c_client *i2c, 2623static __devinit int wm5100_i2c_probe(struct i2c_client *i2c,
2614 const struct i2c_device_id *id) 2624 const struct i2c_device_id *id)
2615{ 2625{
@@ -2742,7 +2752,7 @@ static __devinit int wm5100_i2c_probe(struct i2c_client *i2c,
2742 } 2752 }
2743 2753
2744 for (i = 0; i < ARRAY_SIZE(wm5100->pdata.in_mode); i++) { 2754 for (i = 0; i < ARRAY_SIZE(wm5100->pdata.in_mode); i++) {
2745 regmap_update_bits(wm5100->regmap, WM5100_IN1L_CONTROL, 2755 regmap_update_bits(wm5100->regmap, wm5100_mic_ctrl_reg[i],
2746 WM5100_IN1_MODE_MASK | 2756 WM5100_IN1_MODE_MASK |
2747 WM5100_IN1_DMIC_SUP_MASK, 2757 WM5100_IN1_DMIC_SUP_MASK,
2748 (wm5100->pdata.in_mode[i] << 2758 (wm5100->pdata.in_mode[i] <<
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 296de4e30d26..29c4b02c4790 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -96,7 +96,7 @@ static int wm8962_regulator_event_##n(struct notifier_block *nb, \
96 struct wm8962_priv *wm8962 = container_of(nb, struct wm8962_priv, \ 96 struct wm8962_priv *wm8962 = container_of(nb, struct wm8962_priv, \
97 disable_nb[n]); \ 97 disable_nb[n]); \
98 if (event & REGULATOR_EVENT_DISABLE) { \ 98 if (event & REGULATOR_EVENT_DISABLE) { \
99 regcache_cache_only(wm8962->regmap, true); \ 99 regcache_mark_dirty(wm8962->regmap); \
100 } \ 100 } \
101 return 0; \ 101 return 0; \
102} 102}
@@ -3159,13 +3159,13 @@ static int wm8962_hw_params(struct snd_pcm_substream *substream,
3159 case SNDRV_PCM_FORMAT_S16_LE: 3159 case SNDRV_PCM_FORMAT_S16_LE:
3160 break; 3160 break;
3161 case SNDRV_PCM_FORMAT_S20_3LE: 3161 case SNDRV_PCM_FORMAT_S20_3LE:
3162 aif0 |= 0x40; 3162 aif0 |= 0x4;
3163 break; 3163 break;
3164 case SNDRV_PCM_FORMAT_S24_LE: 3164 case SNDRV_PCM_FORMAT_S24_LE:
3165 aif0 |= 0x80; 3165 aif0 |= 0x8;
3166 break; 3166 break;
3167 case SNDRV_PCM_FORMAT_S32_LE: 3167 case SNDRV_PCM_FORMAT_S32_LE:
3168 aif0 |= 0xc0; 3168 aif0 |= 0xc;
3169 break; 3169 break;
3170 default: 3170 default:
3171 return -EINVAL; 3171 return -EINVAL;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 93d27b660257..ec69a6c152fe 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -770,6 +770,8 @@ static void vmid_reference(struct snd_soc_codec *codec)
770{ 770{
771 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 771 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
772 772
773 pm_runtime_get_sync(codec->dev);
774
773 wm8994->vmid_refcount++; 775 wm8994->vmid_refcount++;
774 776
775 dev_dbg(codec->dev, "Referencing VMID, refcount is now %d\n", 777 dev_dbg(codec->dev, "Referencing VMID, refcount is now %d\n",
@@ -783,7 +785,12 @@ static void vmid_reference(struct snd_soc_codec *codec)
783 WM8994_VMID_RAMP_MASK, 785 WM8994_VMID_RAMP_MASK,
784 WM8994_STARTUP_BIAS_ENA | 786 WM8994_STARTUP_BIAS_ENA |
785 WM8994_VMID_BUF_ENA | 787 WM8994_VMID_BUF_ENA |
786 (0x11 << WM8994_VMID_RAMP_SHIFT)); 788 (0x3 << WM8994_VMID_RAMP_SHIFT));
789
790 /* Remove discharge for line out */
791 snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
792 WM8994_LINEOUT1_DISCH |
793 WM8994_LINEOUT2_DISCH, 0);
787 794
788 /* Main bias enable, VMID=2x40k */ 795 /* Main bias enable, VMID=2x40k */
789 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1, 796 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
@@ -837,6 +844,8 @@ static void vmid_dereference(struct snd_soc_codec *codec)
837 WM8994_VMID_BUF_ENA | 844 WM8994_VMID_BUF_ENA |
838 WM8994_VMID_RAMP_MASK, 0); 845 WM8994_VMID_RAMP_MASK, 0);
839 } 846 }
847
848 pm_runtime_put(codec->dev);
840} 849}
841 850
842static int vmid_event(struct snd_soc_dapm_widget *w, 851static int vmid_event(struct snd_soc_dapm_widget *w,
@@ -2753,11 +2762,6 @@ static int wm8994_resume(struct snd_soc_codec *codec)
2753 codec->cache_only = 0; 2762 codec->cache_only = 0;
2754 } 2763 }
2755 2764
2756 /* Restore the registers */
2757 ret = snd_soc_cache_sync(codec);
2758 if (ret != 0)
2759 dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
2760
2761 wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 2765 wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
2762 2766
2763 for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) { 2767 for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 13aa2bdaa7d7..61f7daa4d0e6 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -108,7 +108,7 @@ static int wm8996_regulator_event_##n(struct notifier_block *nb, \
108 struct wm8996_priv *wm8996 = container_of(nb, struct wm8996_priv, \ 108 struct wm8996_priv *wm8996 = container_of(nb, struct wm8996_priv, \
109 disable_nb[n]); \ 109 disable_nb[n]); \
110 if (event & REGULATOR_EVENT_DISABLE) { \ 110 if (event & REGULATOR_EVENT_DISABLE) { \
111 regcache_cache_only(wm8996->regmap, true); \ 111 regcache_mark_dirty(wm8996->regmap); \
112 } \ 112 } \
113 return 0; \ 113 return 0; \
114} 114}
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 2a61094075f8..8a68cea4a3ee 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -586,14 +586,14 @@ SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
586}; 586};
587 587
588static const struct snd_kcontrol_new line2_mix[] = { 588static const struct snd_kcontrol_new line2_mix[] = {
589SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0), 589SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER2, 2, 1, 0),
590SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0), 590SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER2, 1, 1, 0),
591SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0), 591SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
592}; 592};
593 593
594static const struct snd_kcontrol_new line2n_mix[] = { 594static const struct snd_kcontrol_new line2n_mix[] = {
595SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0), 595SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
596SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0), 596SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
597}; 597};
598 598
599static const struct snd_kcontrol_new line2p_mix[] = { 599static const struct snd_kcontrol_new line2p_mix[] = {
@@ -613,6 +613,8 @@ SND_SOC_DAPM_INPUT("IN2RP:VXRP"),
613SND_SOC_DAPM_SUPPLY("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0, NULL, 0), 613SND_SOC_DAPM_SUPPLY("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0, NULL, 0),
614SND_SOC_DAPM_SUPPLY("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0, NULL, 0), 614SND_SOC_DAPM_SUPPLY("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0, NULL, 0),
615 615
616SND_SOC_DAPM_SUPPLY("LINEOUT_VMID_BUF", WM8993_ANTIPOP1, 7, 0, NULL, 0),
617
616SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0, 618SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
617 in1l_pga, ARRAY_SIZE(in1l_pga)), 619 in1l_pga, ARRAY_SIZE(in1l_pga)),
618SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0, 620SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
@@ -834,9 +836,11 @@ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
834}; 836};
835 837
836static const struct snd_soc_dapm_route lineout1_se_routes[] = { 838static const struct snd_soc_dapm_route lineout1_se_routes[] = {
839 { "LINEOUT1N Mixer", NULL, "LINEOUT_VMID_BUF" },
837 { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" }, 840 { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
838 { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" }, 841 { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
839 842
843 { "LINEOUT1P Mixer", NULL, "LINEOUT_VMID_BUF" },
840 { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" }, 844 { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
841 845
842 { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" }, 846 { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
@@ -844,8 +848,8 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
844}; 848};
845 849
846static const struct snd_soc_dapm_route lineout2_diff_routes[] = { 850static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
847 { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" }, 851 { "LINEOUT2 Mixer", "IN1L Switch", "IN1L PGA" },
848 { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" }, 852 { "LINEOUT2 Mixer", "IN1R Switch", "IN1R PGA" },
849 { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" }, 853 { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
850 854
851 { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" }, 855 { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
@@ -853,9 +857,11 @@ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
853}; 857};
854 858
855static const struct snd_soc_dapm_route lineout2_se_routes[] = { 859static const struct snd_soc_dapm_route lineout2_se_routes[] = {
860 { "LINEOUT2N Mixer", NULL, "LINEOUT_VMID_BUF" },
856 { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" }, 861 { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
857 { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" }, 862 { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
858 863
864 { "LINEOUT2P Mixer", NULL, "LINEOUT_VMID_BUF" },
859 { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" }, 865 { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
860 866
861 { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" }, 867 { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index 7ac0ba2025c3..c6012ff5bd3e 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -230,8 +230,6 @@ static const struct snd_kcontrol_new neo1973_wm8753_controls[] = {
230 230
231/* GTA02 specific routes and controls */ 231/* GTA02 specific routes and controls */
232 232
233#ifdef CONFIG_MACH_NEO1973_GTA02
234
235static int gta02_speaker_enabled; 233static int gta02_speaker_enabled;
236 234
237static int lm4853_set_spk(struct snd_kcontrol *kcontrol, 235static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
@@ -311,10 +309,6 @@ static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
311 return 0; 309 return 0;
312} 310}
313 311
314#else
315static int neo1973_gta02_wm8753_init(struct snd_soc_code *codec) { return 0; }
316#endif
317
318static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd) 312static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
319{ 313{
320 struct snd_soc_codec *codec = rtd->codec; 314 struct snd_soc_codec *codec = rtd->codec;
@@ -322,10 +316,6 @@ static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
322 int ret; 316 int ret;
323 317
324 /* set up NC codec pins */ 318 /* set up NC codec pins */
325 if (machine_is_neo1973_gta01()) {
326 snd_soc_dapm_nc_pin(dapm, "LOUT2");
327 snd_soc_dapm_nc_pin(dapm, "ROUT2");
328 }
329 snd_soc_dapm_nc_pin(dapm, "OUT3"); 319 snd_soc_dapm_nc_pin(dapm, "OUT3");
330 snd_soc_dapm_nc_pin(dapm, "OUT4"); 320 snd_soc_dapm_nc_pin(dapm, "OUT4");
331 snd_soc_dapm_nc_pin(dapm, "LINE1"); 321 snd_soc_dapm_nc_pin(dapm, "LINE1");
@@ -370,50 +360,6 @@ static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
370 return 0; 360 return 0;
371} 361}
372 362
373/* GTA01 specific controls */
374
375#ifdef CONFIG_MACH_NEO1973_GTA01
376
377static const struct snd_soc_dapm_route neo1973_lm4857_routes[] = {
378 {"Amp IN", NULL, "ROUT1"},
379 {"Amp IN", NULL, "LOUT1"},
380
381 {"Handset Spk", NULL, "Amp EP"},
382 {"Stereo Out", NULL, "Amp LS"},
383 {"Headphone", NULL, "Amp HP"},
384};
385
386static const struct snd_soc_dapm_widget neo1973_lm4857_dapm_widgets[] = {
387 SND_SOC_DAPM_SPK("Handset Spk", NULL),
388 SND_SOC_DAPM_SPK("Stereo Out", NULL),
389 SND_SOC_DAPM_HP("Headphone", NULL),
390};
391
392static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm)
393{
394 int ret;
395
396 ret = snd_soc_dapm_new_controls(dapm, neo1973_lm4857_dapm_widgets,
397 ARRAY_SIZE(neo1973_lm4857_dapm_widgets));
398 if (ret)
399 return ret;
400
401 ret = snd_soc_dapm_add_routes(dapm, neo1973_lm4857_routes,
402 ARRAY_SIZE(neo1973_lm4857_routes));
403 if (ret)
404 return ret;
405
406 snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
407 snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
408 snd_soc_dapm_ignore_suspend(dapm, "Headphone");
409
410 return 0;
411}
412
413#else
414static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm) { return 0; };
415#endif
416
417static struct snd_soc_dai_link neo1973_dai[] = { 363static struct snd_soc_dai_link neo1973_dai[] = {
418{ /* Hifi Playback - for similatious use with voice below */ 364{ /* Hifi Playback - for similatious use with voice below */
419 .name = "WM8753", 365 .name = "WM8753",
@@ -440,11 +386,6 @@ static struct snd_soc_aux_dev neo1973_aux_devs[] = {
440 .name = "dfbmcs320", 386 .name = "dfbmcs320",
441 .codec_name = "dfbmcs320.0", 387 .codec_name = "dfbmcs320.0",
442 }, 388 },
443 {
444 .name = "lm4857",
445 .codec_name = "lm4857.0-007c",
446 .init = neo1973_lm4857_init,
447 },
448}; 389};
449 390
450static struct snd_soc_codec_conf neo1973_codec_conf[] = { 391static struct snd_soc_codec_conf neo1973_codec_conf[] = {
@@ -454,14 +395,10 @@ static struct snd_soc_codec_conf neo1973_codec_conf[] = {
454 }, 395 },
455}; 396};
456 397
457#ifdef CONFIG_MACH_NEO1973_GTA02
458static const struct gpio neo1973_gta02_gpios[] = { 398static const struct gpio neo1973_gta02_gpios[] = {
459 { GTA02_GPIO_HP_IN, GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" }, 399 { GTA02_GPIO_HP_IN, GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" },
460 { GTA02_GPIO_AMP_SHUT, GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" }, 400 { GTA02_GPIO_AMP_SHUT, GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" },
461}; 401};
462#else
463static const struct gpio neo1973_gta02_gpios[] = {};
464#endif
465 402
466static struct snd_soc_card neo1973 = { 403static struct snd_soc_card neo1973 = {
467 .name = "neo1973", 404 .name = "neo1973",
@@ -480,7 +417,7 @@ static int __init neo1973_init(void)
480{ 417{
481 int ret; 418 int ret;
482 419
483 if (!machine_is_neo1973_gta01() && !machine_is_neo1973_gta02()) 420 if (!machine_is_neo1973_gta02())
484 return -ENODEV; 421 return -ENODEV;
485 422
486 if (machine_is_neo1973_gta02()) { 423 if (machine_is_neo1973_gta02()) {
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index db6c89a28bda..ea4a82d01160 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1152,12 +1152,8 @@ static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
1152{ 1152{
1153 struct fsi_priv *fsi = fsi_get_priv(substream); 1153 struct fsi_priv *fsi = fsi_get_priv(substream);
1154 struct fsi_stream *io = fsi_get_stream(fsi, fsi_is_play(substream)); 1154 struct fsi_stream *io = fsi_get_stream(fsi, fsi_is_play(substream));
1155 int samples_pos = io->buff_sample_pos - 1;
1156 1155
1157 if (samples_pos < 0) 1156 return fsi_sample2frame(fsi, io->buff_sample_pos);
1158 samples_pos = 0;
1159
1160 return fsi_sample2frame(fsi, samples_pos);
1161} 1157}
1162 1158
1163static struct snd_pcm_ops fsi_pcm_ops = { 1159static struct snd_pcm_ops fsi_pcm_ops = {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b5ecf6d23214..92cee24ed2dc 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -567,6 +567,17 @@ int snd_soc_suspend(struct device *dev)
567 if (!codec->suspended && codec->driver->suspend) { 567 if (!codec->suspended && codec->driver->suspend) {
568 switch (codec->dapm.bias_level) { 568 switch (codec->dapm.bias_level) {
569 case SND_SOC_BIAS_STANDBY: 569 case SND_SOC_BIAS_STANDBY:
570 /*
571 * If the CODEC is capable of idle
572 * bias off then being in STANDBY
573 * means it's doing something,
574 * otherwise fall through.
575 */
576 if (codec->dapm.idle_bias_off) {
577 dev_dbg(codec->dev,
578 "idle_bias_off CODEC on over suspend\n");
579 break;
580 }
570 case SND_SOC_BIAS_OFF: 581 case SND_SOC_BIAS_OFF:
571 codec->driver->suspend(codec); 582 codec->driver->suspend(codec);
572 codec->suspended = 1; 583 codec->suspended = 1;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8edc5035fc8f..d89ab4c7d44b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1618,6 +1618,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1618 } 1618 }
1619}, 1619},
1620{ 1620{
1621 /* Edirol UM-3G */
1622 USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
1623 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1624 .ifnum = 0,
1625 .type = QUIRK_MIDI_STANDARD_INTERFACE
1626 }
1627},
1628{
1621 /* Boss JS-8 Jam Station */ 1629 /* Boss JS-8 Jam Station */
1622 USB_DEVICE(0x0582, 0x0109), 1630 USB_DEVICE(0x0582, 0x0109),
1623 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 1631 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index ac86d67b636e..7c12650165ae 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -104,7 +104,7 @@ endif
104 104
105CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 105CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
106EXTLIBS = -lpthread -lrt -lelf -lm 106EXTLIBS = -lpthread -lrt -lelf -lm
107ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 107ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
108ALL_LDFLAGS = $(LDFLAGS) 108ALL_LDFLAGS = $(LDFLAGS)
109STRIP ?= strip 109STRIP ?= strip
110 110
@@ -168,10 +168,7 @@ endif
168 168
169### --- END CONFIGURATION SECTION --- 169### --- END CONFIGURATION SECTION ---
170 170
171# Those must not be GNU-specific; they are shared with perl/ which may 171BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
172# be built by a different compiler. (Note that this is an artifact now
173# but it still might be nice to keep that distinction.)
174BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include
175BASIC_LDFLAGS = 172BASIC_LDFLAGS =
176 173
177# Guard against environment variables 174# Guard against environment variables
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index a57b66e853c2..185a96d66dd1 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -1,2 +1,8 @@
1 1
2#include "../../../arch/x86/lib/memcpy_64.S" 2#include "../../../arch/x86/lib/memcpy_64.S"
3/*
4 * We need to provide note.GNU-stack section, saying that we want
5 * NOT executable stack. Otherwise the final linking will assume that
6 * the ELF stack should not be restricted at all and set it RWX.
7 */
8.section .note.GNU-stack,"",@progbits
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 59d43abfbfec..fb8566181f27 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -20,7 +20,6 @@
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 */ 22 */
23#define _GNU_SOURCE
24#include <sys/utsname.h> 23#include <sys/utsname.h>
25#include <sys/types.h> 24#include <sys/types.h>
26#include <sys/stat.h> 25#include <sys/stat.h>
@@ -31,7 +30,6 @@
31#include <stdlib.h> 30#include <stdlib.h>
32#include <string.h> 31#include <string.h>
33 32
34#undef _GNU_SOURCE
35#include "perf.h" 33#include "perf.h"
36#include "builtin.h" 34#include "builtin.h"
37#include "util/util.h" 35#include "util/util.h"
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8f80df896038..dd162aa24baa 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -89,8 +89,6 @@ void get_term_dimensions(struct winsize *ws)
89 89
90static void perf_top__update_print_entries(struct perf_top *top) 90static void perf_top__update_print_entries(struct perf_top *top)
91{ 91{
92 top->print_entries = top->winsize.ws_row;
93
94 if (top->print_entries > 9) 92 if (top->print_entries > 9)
95 top->print_entries -= 9; 93 top->print_entries -= 9;
96} 94}
@@ -100,6 +98,13 @@ static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *ar
100 struct perf_top *top = arg; 98 struct perf_top *top = arg;
101 99
102 get_term_dimensions(&top->winsize); 100 get_term_dimensions(&top->winsize);
101 if (!top->print_entries
102 || (top->print_entries+4) > top->winsize.ws_row) {
103 top->print_entries = top->winsize.ws_row;
104 } else {
105 top->print_entries += 4;
106 top->winsize.ws_row = top->print_entries;
107 }
103 perf_top__update_print_entries(top); 108 perf_top__update_print_entries(top);
104} 109}
105 110
@@ -453,8 +458,10 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
453 }; 458 };
454 perf_top__sig_winch(SIGWINCH, NULL, top); 459 perf_top__sig_winch(SIGWINCH, NULL, top);
455 sigaction(SIGWINCH, &act, NULL); 460 sigaction(SIGWINCH, &act, NULL);
456 } else 461 } else {
462 perf_top__sig_winch(SIGWINCH, NULL, top);
457 signal(SIGWINCH, SIG_DFL); 463 signal(SIGWINCH, SIG_DFL);
464 }
458 break; 465 break;
459 case 'E': 466 case 'E':
460 if (top->evlist->nr_entries > 1) { 467 if (top->evlist->nr_entries > 1) {
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 73ddaf06b8e7..2044324b755a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -554,7 +554,7 @@ static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
554 554
555 is_kernel_mmap = memcmp(event->mmap.filename, 555 is_kernel_mmap = memcmp(event->mmap.filename,
556 kmmap_prefix, 556 kmmap_prefix,
557 strlen(kmmap_prefix)) == 0; 557 strlen(kmmap_prefix) - 1) == 0;
558 if (event->mmap.filename[0] == '/' || 558 if (event->mmap.filename[0] == '/' ||
559 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 559 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
560 560
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 667f3b78bb2c..7132ee834e0e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -463,6 +463,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
463 memset(data, 0, sizeof(*data)); 463 memset(data, 0, sizeof(*data));
464 data->cpu = data->pid = data->tid = -1; 464 data->cpu = data->pid = data->tid = -1;
465 data->stream_id = data->id = data->time = -1ULL; 465 data->stream_id = data->id = data->time = -1ULL;
466 data->period = 1;
466 467
467 if (event->header.type != PERF_RECORD_SAMPLE) { 468 if (event->header.type != PERF_RECORD_SAMPLE) {
468 if (!sample_id_all) 469 if (!sample_id_all)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3e7e0b09c12c..ecd7f4dd7eea 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2105,7 +2105,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool,
2105 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); 2105 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
2106 2106
2107 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; 2107 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
2108 size = strlen(name); 2108 size = strlen(ev.event_type.event_type.name);
2109 size = ALIGN(size, sizeof(u64)); 2109 size = ALIGN(size, sizeof(u64));
2110 ev.event_type.header.size = sizeof(ev.event_type) - 2110 ev.event_type.header.size = sizeof(ev.event_type) -
2111 (sizeof(ev.event_type.event_type.name) - size); 2111 (sizeof(ev.event_type.event_type.name) - size);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index eb25900e2211..29cb65459811 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -19,7 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#define _GNU_SOURCE
23#include <sys/utsname.h> 22#include <sys/utsname.h>
24#include <sys/types.h> 23#include <sys/types.h>
25#include <sys/stat.h> 24#include <sys/stat.h>
@@ -33,7 +32,6 @@
33#include <limits.h> 32#include <limits.h>
34#include <elf.h> 33#include <elf.h>
35 34
36#undef _GNU_SOURCE
37#include "util.h" 35#include "util.h"
38#include "event.h" 36#include "event.h"
39#include "string.h" 37#include "string.h"
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 215d50f2042e..0975438c3e72 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,4 +1,3 @@
1#define _GNU_SOURCE
2#include <ctype.h> 1#include <ctype.h>
3#include <dirent.h> 2#include <dirent.h>
4#include <errno.h> 3#include <errno.h>
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 6c164dc9ee95..1a8d4dc4f386 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -21,14 +21,13 @@
21 * The parts for function graph printing was taken and modified from the 21 * The parts for function graph printing was taken and modified from the
22 * Linux Kernel that were written by Frederic Weisbecker. 22 * Linux Kernel that were written by Frederic Weisbecker.
23 */ 23 */
24#define _GNU_SOURCE 24
25#include <stdio.h> 25#include <stdio.h>
26#include <stdlib.h> 26#include <stdlib.h>
27#include <string.h> 27#include <string.h>
28#include <ctype.h> 28#include <ctype.h>
29#include <errno.h> 29#include <errno.h>
30 30
31#undef _GNU_SOURCE
32#include "../perf.h" 31#include "../perf.h"
33#include "util.h" 32#include "util.h"
34#include "trace-event.h" 33#include "trace-event.h"
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 1212a386a033..e81aef1f2569 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -1,6 +1,4 @@
1#define _GNU_SOURCE
2#include <stdio.h> 1#include <stdio.h>
3#undef _GNU_SOURCE
4#include "../libslang.h" 2#include "../libslang.h"
5#include <stdlib.h> 3#include <stdlib.h>
6#include <string.h> 4#include <string.h>
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
index 6ef3c5691762..4f48f5901b30 100644
--- a/tools/perf/util/ui/helpline.c
+++ b/tools/perf/util/ui/helpline.c
@@ -1,4 +1,3 @@
1#define _GNU_SOURCE
2#include <stdio.h> 1#include <stdio.h>
3#include <stdlib.h> 2#include <stdlib.h>
4#include <string.h> 3#include <string.h>
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b9c530cce79a..ecf9898169c8 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -40,7 +40,6 @@
40#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) 40#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
41 41
42#define _ALL_SOURCE 1 42#define _ALL_SOURCE 1
43#define _GNU_SOURCE 1
44#define _BSD_SOURCE 1 43#define _BSD_SOURCE 1
45#define HAS_BOOL 44#define HAS_BOOL
46 45
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7287bf5d1c9e..a91f980077d8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1543,7 +1543,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1543 if (memslot && memslot->dirty_bitmap) { 1543 if (memslot && memslot->dirty_bitmap) {
1544 unsigned long rel_gfn = gfn - memslot->base_gfn; 1544 unsigned long rel_gfn = gfn - memslot->base_gfn;
1545 1545
1546 if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap)) 1546 if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
1547 memslot->nr_dirty_pages++; 1547 memslot->nr_dirty_pages++;
1548 } 1548 }
1549} 1549}